aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--include/linux/workqueue.h1
-rw-r--r--kernel/workqueue.c73
3 files changed, 75 insertions, 2 deletions
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 4044bb1ada8..e175f3910b1 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -587,8 +587,7 @@ int phy_stop_interrupts(struct phy_device *phydev)
* Finish any pending work; we might have been scheduled
* to be called from keventd ourselves, though.
*/
- if (!current_is_keventd())
- flush_scheduled_work();
+ run_scheduled_work(&phydev->phy_queue);
free_irq(phydev->irq, phydev);
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index f0cb1df7b47..edef8d50b26 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -162,6 +162,7 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
extern int FASTCALL(schedule_work(struct work_struct *work));
+extern int FASTCALL(run_scheduled_work(struct work_struct *work));
extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index c5257316f4b..6b186750e9b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -108,6 +108,79 @@ static inline void *get_wq_data(struct work_struct *work)
return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK);
}
+static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cwq->lock, flags);
+ /*
+ * We need to re-validate the work info after we've gotten
+ * the cpu_workqueue lock. We can run the work now iff:
+ *
+ * - the wq_data still matches the cpu_workqueue_struct
+ * - AND the work is still marked pending
+ * - AND the work is still on a list (which will be this
+ * workqueue_struct list)
+ *
+ * All these conditions are important, because we
+ * need to protect against the work being run right
+ * now on another CPU (all but the last one might be
+ * true if it's currently running and has not been
+ * released yet, for example).
+ */
+ if (get_wq_data(work) == cwq
+ && work_pending(work)
+ && !list_empty(&work->entry)) {
+ work_func_t f = work->func;
+ list_del_init(&work->entry);
+ spin_unlock_irqrestore(&cwq->lock, flags);
+
+ if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
+ work_release(work);
+ f(work);
+
+ spin_lock_irqsave(&cwq->lock, flags);
+ cwq->remove_sequence++;
+ wake_up(&cwq->work_done);
+ ret = 1;
+ }
+ spin_unlock_irqrestore(&cwq->lock, flags);
+ return ret;
+}
+
+/**
+ * run_scheduled_work - run scheduled work synchronously
+ * @work: work to run
+ *
+ * This checks if the work was pending, and runs it
+ * synchronously if so. It returns a boolean to indicate
+ * whether it had any scheduled work to run or not.
+ *
+ * NOTE! This _only_ works for normal work_structs. You
+ * CANNOT use this for delayed work, because the wq data
+ * for delayed work will not point properly to the per-
+ * CPU workqueue struct, but will change!
+ */
+int fastcall run_scheduled_work(struct work_struct *work)
+{
+ for (;;) {
+ struct cpu_workqueue_struct *cwq;
+
+ if (!work_pending(work))
+ return 0;
+ if (list_empty(&work->entry))
+ return 0;
+ /* NOTE! This depends intimately on __queue_work! */
+ cwq = get_wq_data(work);
+ if (!cwq)
+ return 0;
+ if (__run_work(cwq, work))
+ return 1;
+ }
+}
+EXPORT_SYMBOL(run_scheduled_work);
+
/* Preempt must be disabled. */
static void __queue_work(struct cpu_workqueue_struct *cwq,
struct work_struct *work)