datapath: Avoid system freeze due to ovs-flow-rehash softlockup.
[openvswitch] / datapath / linux / compat / workqueue.c
1 /*
2  * Derived from the kernel/workqueue.c
3  *
4  * This is the generic async execution mechanism.  Work items as are
5  * executed in process context.
6  *
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/init.h>
12 #include <linux/signal.h>
13 #include <linux/completion.h>
14 #include <linux/workqueue.h>
15 #include <linux/slab.h>
16 #include <linux/cpu.h>
17 #include <linux/notifier.h>
18 #include <linux/kthread.h>
19 #include <linux/hardirq.h>
20 #include <linux/mempolicy.h>
21 #include <linux/kallsyms.h>
22 #include <linux/debug_locks.h>
23 #include <linux/lockdep.h>
24 #include <linux/idr.h>
25
26 static spinlock_t wq_lock;
27 static struct list_head workq;
28 static wait_queue_head_t more_work;
29 static struct task_struct *workq_thread;
30 static struct work_struct *current_work;
31
32 static void queue_work(struct work_struct *work)
33 {
34         unsigned long flags;
35
36         spin_lock_irqsave(&wq_lock, flags);
37         list_add_tail(&work->entry, &workq);
38         wake_up(&more_work);
39         spin_unlock_irqrestore(&wq_lock, flags);
40 }
41
42 static void _delayed_work_timer_fn(unsigned long __data)
43 {
44         struct delayed_work *dwork = (struct delayed_work *)__data;
45         queue_work(&dwork->work);
46 }
47
48 static void __queue_delayed_work(struct delayed_work *dwork,
49                 unsigned long delay)
50 {
51         struct timer_list *timer = &dwork->timer;
52         struct work_struct *work = &dwork->work;
53
54         BUG_ON(timer_pending(timer));
55         BUG_ON(!list_empty(&work->entry));
56
57         timer->expires = jiffies + delay;
58         timer->data = (unsigned long)dwork;
59         timer->function = _delayed_work_timer_fn;
60
61         add_timer(timer);
62 }
63
64 int schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
65 {
66         if (test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(&dwork->work)))
67                 return 0;
68
69         if (delay == 0)
70                 queue_work(&dwork->work);
71         else
72                 __queue_delayed_work(dwork, delay);
73
74         return 1;
75 }
76
77 struct wq_barrier {
78         struct work_struct      work;
79         struct completion       done;
80 };
81
82 static void wq_barrier_func(struct work_struct *work)
83 {
84         struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
85         complete(&barr->done);
86 }
87
88 static void workqueue_barrier(struct work_struct *work)
89 {
90         bool need_barrier;
91         struct wq_barrier barr;
92
93         spin_lock_irq(&wq_lock);
94         if (current_work != work)
95                 need_barrier = false;
96         else {
97                 INIT_WORK(&barr.work, wq_barrier_func);
98                 init_completion(&barr.done);
99                 list_add(&barr.work.entry, &workq);
100                 wake_up(&more_work);
101                 need_barrier = true;
102         }
103         spin_unlock_irq(&wq_lock);
104
105         if (need_barrier)
106                 wait_for_completion(&barr.done);
107 }
108
109 static int try_to_grab_pending(struct work_struct *work)
110 {
111         int ret;
112
113         BUG_ON(in_interrupt());
114
115         if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
116                 return 0;
117
118         spin_lock_irq(&wq_lock);
119         if (!list_empty(&work->entry)) {
120                 list_del_init(&work->entry);
121                 ret = 0;
122         } else
123                 /* Already executed, retry. */
124                 ret = -1;
125         spin_unlock_irq(&wq_lock);
126
127         return ret;
128 }
129
130 static int __cancel_work_timer(struct work_struct *work,
131                                struct timer_list *timer)
132 {
133         int ret;
134
135         for (;;) {
136                 ret = (timer && likely(del_timer(timer)));
137                 if (ret) /* Was active timer, return true. */
138                         break;
139
140                 /* Inactive timer case */
141                 ret = try_to_grab_pending(work);
142                 if (!ret)
143                         break;
144         }
145         workqueue_barrier(work);
146         work_clear_pending(work);
147         return ret;
148 }
149
150 int cancel_delayed_work_sync(struct delayed_work *dwork)
151 {
152         return __cancel_work_timer(&dwork->work, &dwork->timer);
153 }
154
155 static void run_workqueue(void)
156 {
157         spin_lock_irq(&wq_lock);
158         while (!list_empty(&workq)) {
159                 struct work_struct *work = list_entry(workq.next,
160                                 struct work_struct, entry);
161
162                 work_func_t f = work->func;
163                 list_del_init(workq.next);
164                 current_work = work;
165                 spin_unlock_irq(&wq_lock);
166
167                 work_clear_pending(work);
168                 f(work);
169
170                 BUG_ON(in_interrupt());
171                 spin_lock_irq(&wq_lock);
172                 current_work = NULL;
173         }
174         spin_unlock_irq(&wq_lock);
175 }
176
177 static int worker_thread(void *dummy)
178 {
179         for (;;) {
180                 wait_event_interruptible(more_work,
181                                 (kthread_should_stop() || !list_empty(&workq)));
182
183                 if (kthread_should_stop())
184                         break;
185
186                 run_workqueue();
187         }
188
189         return 0;
190 }
191
192 int __init ovs_workqueues_init(void)
193 {
194         spin_lock_init(&wq_lock);
195         INIT_LIST_HEAD(&workq);
196         init_waitqueue_head(&more_work);
197
198         workq_thread = kthread_create(worker_thread, NULL, "ovs_workq");
199         if (IS_ERR(workq_thread))
200                 return PTR_ERR(workq_thread);
201
202         wake_up_process(workq_thread);
203         return 0;
204 }
205
206 void  ovs_workqueues_exit(void)
207 {
208         BUG_ON(!list_empty(&workq));
209         kthread_stop(workq_thread);
210 }