2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2010, 2011 Nicira Networks.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/hardirq.h>
12 #include <linux/kernel.h>
13 #include <linux/percpu.h>
14 #include <linux/sched.h>
16 #include "loop_counter.h"
18 int loop_suppress(struct datapath *dp, struct sw_flow_actions *actions)
21 pr_warn("%s: flow looped %d times, dropping\n",
22 dp_name(dp), MAX_LOOPS);
23 actions->actions_len = 0;
27 #ifndef CONFIG_PREEMPT_RT
29 /* We use a separate counter for each CPU for both interrupt and non-interrupt
30 * context in order to keep the limit deterministic for a given packet.
32 struct percpu_loop_counters {
33 struct loop_counter counters[2];
36 static DEFINE_PER_CPU(struct percpu_loop_counters, loop_counters);
38 struct loop_counter *loop_get_counter(void)
40 return &get_cpu_var(loop_counters).counters[!!in_interrupt()];
43 void loop_put_counter(void)
45 put_cpu_var(loop_counters);
48 #else /* !CONFIG_PREEMPT_RT */
50 struct loop_counter *loop_get_counter(void)
52 WARN_ON(in_interrupt());
54 /* Only two bits of the extra_flags field in struct task_struct are
55 * used and it's an unsigned int. We hijack the most significant bits
56 * to be our counter structure. On RT kernels softirqs always run in
57 * process context so we are guaranteed to have a valid task_struct.
60 #ifdef __LITTLE_ENDIAN
61 return (void *)(¤t->extra_flags + 1) -
62 sizeof(struct loop_counter);
64 return (struct loop_counter *)¤t->extra_flags;
66 #error "Please fix <asm/byteorder.h>."
70 void loop_put_counter(void) { }
72 #endif /* CONFIG_PREEMPT_RT */