summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/workqueue.h1
-rw-r--r--kernel/workqueue.c32
2 files changed, 30 insertions, 3 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 0a7814131e66..07cf5e5f91cb 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -225,6 +225,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
enum {
WQ_FREEZEABLE = 1 << 0, /* freeze during suspend */
WQ_SINGLE_CPU = 1 << 1, /* only single cpu at a time */
+ WQ_NON_REENTRANT = 1 << 2, /* guarantee non-reentrance */
};
extern struct workqueue_struct *
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index c68277c204ab..bce1074bdec1 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -534,11 +534,37 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
debug_work_activate(work);
- /* determine gcwq to use */
+ /*
+ * Determine gcwq to use. SINGLE_CPU is inherently
+ * NON_REENTRANT, so test it first.
+ */
if (!(wq->flags & WQ_SINGLE_CPU)) {
- /* just use the requested cpu for multicpu workqueues */
+ struct global_cwq *last_gcwq;
+
+ /*
+ * It's multi cpu. If @wq is non-reentrant and @work
+ * was previously on a different cpu, it might still
+ * be running there, in which case the work needs to
+ * be queued on that cpu to guarantee non-reentrance.
+ */
gcwq = get_gcwq(cpu);
- spin_lock_irqsave(&gcwq->lock, flags);
+ if (wq->flags & WQ_NON_REENTRANT &&
+ (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
+ struct worker *worker;
+
+ spin_lock_irqsave(&last_gcwq->lock, flags);
+
+ worker = find_worker_executing_work(last_gcwq, work);
+
+ if (worker && worker->current_cwq->wq == wq)
+ gcwq = last_gcwq;
+ else {
+ /* meh... not running there, queue here */
+ spin_unlock_irqrestore(&last_gcwq->lock, flags);
+ spin_lock_irqsave(&gcwq->lock, flags);
+ }
+ } else
+ spin_lock_irqsave(&gcwq->lock, flags);
} else {
unsigned int req_cpu = cpu;