2 * Derived from the kernel/workqueue.c
4 * This is the generic async execution mechanism. Work items as are
5 * executed in process context.
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/init.h>
12 #include <linux/signal.h>
13 #include <linux/completion.h>
14 #include <linux/workqueue.h>
15 #include <linux/slab.h>
16 #include <linux/cpu.h>
17 #include <linux/notifier.h>
18 #include <linux/kthread.h>
19 #include <linux/hardirq.h>
20 #include <linux/mempolicy.h>
21 #include <linux/kallsyms.h>
22 #include <linux/debug_locks.h>
23 #include <linux/lockdep.h>
24 #include <linux/idr.h>
26 static spinlock_t wq_lock;
27 static struct list_head workq;
28 static wait_queue_head_t more_work;
29 static struct task_struct *workq_thread;
30 static struct work_struct *current_work;
32 static void queue_work(struct work_struct *work)
36 spin_lock_irqsave(&wq_lock, flags);
37 list_add_tail(&work->entry, &workq);
39 spin_unlock_irqrestore(&wq_lock, flags);
42 static void _delayed_work_timer_fn(unsigned long __data)
44 struct delayed_work *dwork = (struct delayed_work *)__data;
45 queue_work(&dwork->work);
48 static void __queue_delayed_work(struct delayed_work *dwork,
51 struct timer_list *timer = &dwork->timer;
52 struct work_struct *work = &dwork->work;
54 BUG_ON(timer_pending(timer));
55 BUG_ON(!list_empty(&work->entry));
57 timer->expires = jiffies + delay;
58 timer->data = (unsigned long)dwork;
59 timer->function = _delayed_work_timer_fn;
64 int schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
66 if (test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(&dwork->work)))
70 queue_work(&dwork->work);
72 __queue_delayed_work(dwork, delay);
78 struct work_struct work;
79 struct completion done;
82 static void wq_barrier_func(struct work_struct *work)
84 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
85 complete(&barr->done);
88 static void workqueue_barrier(struct work_struct *work)
91 struct wq_barrier barr;
93 spin_lock_irq(&wq_lock);
94 if (current_work != work)
97 INIT_WORK(&barr.work, wq_barrier_func);
98 init_completion(&barr.done);
99 list_add(&barr.work.entry, &workq);
103 spin_unlock_irq(&wq_lock);
106 wait_for_completion(&barr.done);
109 static int try_to_grab_pending(struct work_struct *work)
113 BUG_ON(in_interrupt());
115 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
118 spin_lock_irq(&wq_lock);
119 if (!list_empty(&work->entry)) {
120 list_del_init(&work->entry);
123 /* Already executed, retry. */
125 spin_unlock_irq(&wq_lock);
130 static int __cancel_work_timer(struct work_struct *work,
131 struct timer_list *timer)
136 ret = (timer && likely(del_timer(timer)));
137 if (ret) /* Was active timer, return true. */
140 /* Inactive timer case */
141 ret = try_to_grab_pending(work);
145 workqueue_barrier(work);
146 work_clear_pending(work);
150 int cancel_delayed_work_sync(struct delayed_work *dwork)
152 return __cancel_work_timer(&dwork->work, &dwork->timer);
155 static void run_workqueue(void)
157 spin_lock_irq(&wq_lock);
158 while (!list_empty(&workq)) {
159 struct work_struct *work = list_entry(workq.next,
160 struct work_struct, entry);
162 work_func_t f = work->func;
163 list_del_init(workq.next);
165 spin_unlock_irq(&wq_lock);
167 work_clear_pending(work);
170 BUG_ON(in_interrupt());
171 spin_lock_irq(&wq_lock);
174 spin_unlock_irq(&wq_lock);
177 static int worker_thread(void *dummy)
180 wait_event_interruptible(more_work,
181 (kthread_should_stop() || !list_empty(&workq)));
183 if (kthread_should_stop())
192 int __init ovs_workqueues_init(void)
194 spin_lock_init(&wq_lock);
195 INIT_LIST_HEAD(&workq);
196 init_waitqueue_head(&more_work);
198 workq_thread = kthread_create(worker_thread, NULL, "ovs_workq");
199 if (IS_ERR(workq_thread))
200 return PTR_ERR(workq_thread);
202 wake_up_process(workq_thread);
206 void ovs_workqueues_exit(void)
208 BUG_ON(!list_empty(&workq));
209 kthread_stop(workq_thread);