2 * Derived from the kernel/workqueue.c
4 * This is the generic async execution mechanism. Work items as are
5 * executed in process context.
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/init.h>
12 #include <linux/signal.h>
13 #include <linux/completion.h>
14 #include <linux/workqueue.h>
15 #include <linux/slab.h>
16 #include <linux/cpu.h>
17 #include <linux/notifier.h>
18 #include <linux/kthread.h>
19 #include <linux/hardirq.h>
20 #include <linux/mempolicy.h>
21 #include <linux/kallsyms.h>
22 #include <linux/debug_locks.h>
23 #include <linux/lockdep.h>
24 #include <linux/idr.h>
26 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
28 static spinlock_t wq_lock;
29 static struct list_head workq;
30 static wait_queue_head_t more_work;
31 static struct task_struct *workq_thread;
32 static struct work_struct *current_work;
34 static void queue_work(struct work_struct *work)
38 spin_lock_irqsave(&wq_lock, flags);
39 list_add_tail(&work->entry, &workq);
41 spin_unlock_irqrestore(&wq_lock, flags);
44 static void _delayed_work_timer_fn(unsigned long __data)
46 struct delayed_work *dwork = (struct delayed_work *)__data;
47 queue_work(&dwork->work);
50 static void __queue_delayed_work(struct delayed_work *dwork,
53 struct timer_list *timer = &dwork->timer;
54 struct work_struct *work = &dwork->work;
56 BUG_ON(timer_pending(timer));
57 BUG_ON(!list_empty(&work->entry));
59 timer->expires = jiffies + delay;
60 timer->data = (unsigned long)dwork;
61 timer->function = _delayed_work_timer_fn;
66 int schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
68 if (test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(&dwork->work)))
72 queue_work(&dwork->work);
74 __queue_delayed_work(dwork, delay);
80 struct work_struct work;
81 struct completion done;
84 static void wq_barrier_func(struct work_struct *work)
86 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
87 complete(&barr->done);
90 static void workqueue_barrier(struct work_struct *work)
93 struct wq_barrier barr;
95 spin_lock_irq(&wq_lock);
96 if (current_work != work)
99 INIT_WORK(&barr.work, wq_barrier_func);
100 init_completion(&barr.done);
101 list_add(&barr.work.entry, &workq);
105 spin_unlock_irq(&wq_lock);
108 wait_for_completion(&barr.done);
111 static int try_to_grab_pending(struct work_struct *work)
115 BUG_ON(in_interrupt());
117 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
120 spin_lock_irq(&wq_lock);
121 if (!list_empty(&work->entry)) {
122 list_del_init(&work->entry);
125 /* Already executed, retry. */
127 spin_unlock_irq(&wq_lock);
132 static int __cancel_work_timer(struct work_struct *work,
133 struct timer_list *timer)
138 ret = (timer && likely(del_timer(timer)));
139 if (ret) /* Was active timer, return true. */
142 /* Inactive timer case */
143 ret = try_to_grab_pending(work);
147 workqueue_barrier(work);
148 work_clear_pending(work);
152 int cancel_delayed_work_sync(struct delayed_work *dwork)
154 return __cancel_work_timer(&dwork->work, &dwork->timer);
157 static void run_workqueue(void)
159 spin_lock_irq(&wq_lock);
160 while (!list_empty(&workq)) {
161 struct work_struct *work = list_entry(workq.next,
162 struct work_struct, entry);
164 work_func_t f = work->func;
165 list_del_init(workq.next);
167 spin_unlock_irq(&wq_lock);
169 work_clear_pending(work);
172 BUG_ON(in_interrupt());
173 spin_lock_irq(&wq_lock);
176 spin_unlock_irq(&wq_lock);
179 static int worker_thread(void *dummy)
182 wait_event_interruptible(more_work,
183 (kthread_should_stop() || !list_empty(&workq)));
185 if (kthread_should_stop())
194 int __init ovs_workqueues_init(void)
196 spin_lock_init(&wq_lock);
197 INIT_LIST_HEAD(&workq);
198 init_waitqueue_head(&more_work);
200 workq_thread = kthread_create(worker_thread, NULL, "ovs_workq");
201 if (IS_ERR(workq_thread))
202 return PTR_ERR(workq_thread);
204 wake_up_process(workq_thread);
208 void ovs_workqueues_exit(void)
210 BUG_ON(!list_empty(&workq));
211 kthread_stop(workq_thread);