1 #ifndef __LINUX_WORKQUEUE_WRAPPER_H
2 #define __LINUX_WORKQUEUE_WRAPPER_H 1
4 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
5 #include_next <linux/workqueue.h>
6 static inline int __init ovs_workqueues_init(void) { return 0; }
7 static inline void ovs_workqueues_exit(void) {}
10 #include <linux/timer.h>
12 int __init ovs_workqueues_init(void);
13 void ovs_workqueues_exit(void);
16 /* Older kernels have an implementation of work queues with some very bad
17 * characteristics when trying to cancel work (potential deadlocks, use after
18 * free, etc. Therefore we implement simple ovs specific work queue using
19 * single worker thread. work-queue API are kept similar for compatibility.
23 typedef void (*work_func_t)(struct work_struct *work);
25 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
28 #define WORK_STRUCT_PENDING 0 /* T if work item pending execution */
30 struct list_head entry;
34 #define WORK_DATA_INIT() ATOMIC_LONG_INIT(0)
36 #define work_clear_pending(work) \
37 clear_bit(WORK_STRUCT_PENDING, work_data_bits(work))
40 struct work_struct work;
41 struct timer_list timer;
44 #define __WORK_INITIALIZER(n, f) { \
45 .data = WORK_DATA_INIT(), \
46 .entry = { &(n).entry, &(n).entry }, \
50 #define __DELAYED_WORK_INITIALIZER(n, f) { \
51 .work = __WORK_INITIALIZER((n).work, (f)), \
52 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
55 #define DECLARE_DELAYED_WORK(n, f) \
56 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
58 #define schedule_delayed_work rpl_schedule_delayed_work
59 int schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
61 #define cancel_delayed_work_sync rpl_cancel_delayed_work_sync
62 int cancel_delayed_work_sync(struct delayed_work *dwork);
64 #define INIT_WORK(_work, _func) \
66 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
67 INIT_LIST_HEAD(&(_work)->entry); \
68 (_work)->func = (_func); \
71 #endif /* kernel version < 2.6.23 */