summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c73
1 files changed, 73 insertions, 0 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index c5257316f4b9..6b186750e9be 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -108,6 +108,79 @@ static inline void *get_wq_data(struct work_struct *work)
return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK);
}
+static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cwq->lock, flags);
+ /*
+ * We need to re-validate the work info after we've gotten
+ * the cpu_workqueue lock. We can run the work now iff:
+ *
+ * - the wq_data still matches the cpu_workqueue_struct
+ * - AND the work is still marked pending
+ * - AND the work is still on a list (which will be this
+ * workqueue_struct list)
+ *
+ * All these conditions are important, because we
+ * need to protect against the work being run right
+ * now on another CPU (all but the last one might be
+ * true if it's currently running and has not been
+ * released yet, for example).
+ */
+ if (get_wq_data(work) == cwq
+ && work_pending(work)
+ && !list_empty(&work->entry)) {
+ work_func_t f = work->func;
+ list_del_init(&work->entry);
+ spin_unlock_irqrestore(&cwq->lock, flags);
+
+ if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
+ work_release(work);
+ f(work);
+
+ spin_lock_irqsave(&cwq->lock, flags);
+ cwq->remove_sequence++;
+ wake_up(&cwq->work_done);
+ ret = 1;
+ }
+ spin_unlock_irqrestore(&cwq->lock, flags);
+ return ret;
+}
+
+/**
+ * run_scheduled_work - run scheduled work synchronously
+ * @work: work to run
+ *
+ * This checks if the work was pending, and runs it
+ * synchronously if so. It returns a boolean to indicate
+ * whether it had any scheduled work to run or not.
+ *
+ * NOTE! This _only_ works for normal work_structs. You
+ * CANNOT use this for delayed work, because the wq data
+ * for delayed work will not point properly to the per-
+ * CPU workqueue struct, but will change!
+ */
+int fastcall run_scheduled_work(struct work_struct *work)
+{
+ for (;;) {
+ struct cpu_workqueue_struct *cwq;
+
+ if (!work_pending(work))
+ return 0;
+ if (list_empty(&work->entry))
+ return 0;
+ /* NOTE! This depends intimately on __queue_work! */
+ cwq = get_wq_data(work);
+ if (!cwq)
+ return 0;
+ if (__run_work(cwq, work))
+ return 1;
+ }
+}
+EXPORT_SYMBOL(run_scheduled_work);
+
/* Preempt must be disabled. */
static void __queue_work(struct cpu_workqueue_struct *cwq,
struct work_struct *work)