2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008 The Board of Trustees of The Leland
4 * Stanford Junior University
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/if_ether.h>
11 #include <linux/if_vlan.h>
12 #include <net/llc_pdu.h>
14 #include <linux/jiffies.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/tcp.h>
18 #include <linux/udp.h>
20 #include <linux/rcupdate.h>
27 struct kmem_cache *flow_cache;
29 /* Internal function used to compare fields in flow. */
31 int flow_fields_match(const struct sw_flow_key *a, const struct sw_flow_key *b,
32 uint32_t w, uint32_t src_mask, uint32_t dst_mask)
34 return ((w & OFPFW_IN_PORT || a->in_port == b->in_port)
35 && (w & OFPFW_DL_VLAN || a->dl_vlan == b->dl_vlan)
36 && (w & OFPFW_DL_SRC || !memcmp(a->dl_src, b->dl_src, ETH_ALEN))
37 && (w & OFPFW_DL_DST || !memcmp(a->dl_dst, b->dl_dst, ETH_ALEN))
38 && (w & OFPFW_DL_TYPE || a->dl_type == b->dl_type)
39 && !((a->nw_src ^ b->nw_src) & src_mask)
40 && !((a->nw_dst ^ b->nw_dst) & dst_mask)
41 && (w & OFPFW_NW_PROTO || a->nw_proto == b->nw_proto)
42 && (w & OFPFW_TP_SRC || a->tp_src == b->tp_src)
43 && (w & OFPFW_TP_DST || a->tp_dst == b->tp_dst));
46 /* Returns nonzero if 'a' and 'b' match, that is, if their fields are equal
47 * modulo wildcards in 'b', zero otherwise. */
48 int flow_matches_1wild(const struct sw_flow_key *a,
49 const struct sw_flow_key *b)
51 return flow_fields_match(a, b, b->wildcards,
52 b->nw_src_mask, b->nw_dst_mask);
54 EXPORT_SYMBOL(flow_matches_1wild);
56 /* Returns nonzero if 'a' and 'b' match, that is, if their fields are equal
57 * modulo wildcards in 'a' or 'b', zero otherwise. */
58 int flow_matches_2wild(const struct sw_flow_key *a,
59 const struct sw_flow_key *b)
61 return flow_fields_match(a, b,
62 a->wildcards | b->wildcards,
63 a->nw_src_mask & b->nw_src_mask,
64 a->nw_dst_mask & b->nw_dst_mask);
66 EXPORT_SYMBOL(flow_matches_2wild);
68 /* Returns nonzero if 't' (the table entry's key) and 'd' (the key
69 * describing the match) match, that is, if their fields are
70 * equal modulo wildcards, zero otherwise. If 'strict' is nonzero, the
71 * wildcards must match in both 't_key' and 'd_key'. Note that the
72 * table's wildcards are ignored unless 'strict' is set. */
73 int flow_matches_desc(const struct sw_flow_key *t, const struct sw_flow_key *d,
76 if (strict && d->wildcards != t->wildcards)
78 return flow_matches_1wild(t, d);
80 EXPORT_SYMBOL(flow_matches_desc);
82 static uint32_t make_nw_mask(int n_wild_bits)
84 n_wild_bits &= (1u << OFPFW_NW_SRC_BITS) - 1;
85 return n_wild_bits < 32 ? htonl(~((1u << n_wild_bits) - 1)) : 0;
88 void flow_extract_match(struct sw_flow_key* to, const struct ofp_match* from)
90 to->wildcards = ntohl(from->wildcards) & OFPFW_ALL;
92 to->in_port = from->in_port;
93 to->dl_vlan = from->dl_vlan;
94 memcpy(to->dl_src, from->dl_src, ETH_ALEN);
95 memcpy(to->dl_dst, from->dl_dst, ETH_ALEN);
96 to->dl_type = from->dl_type;
98 to->nw_src = to->nw_dst = to->nw_proto = 0;
99 to->tp_src = to->tp_dst = 0;
101 #define OFPFW_TP (OFPFW_TP_SRC | OFPFW_TP_DST)
102 #define OFPFW_NW (OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK | OFPFW_NW_PROTO)
103 if (to->wildcards & OFPFW_DL_TYPE) {
104 /* Can't sensibly match on network or transport headers if the
105 * data link type is unknown. */
106 to->wildcards |= OFPFW_NW | OFPFW_TP;
107 } else if (from->dl_type == htons(ETH_P_IP)) {
108 to->nw_src = from->nw_src;
109 to->nw_dst = from->nw_dst;
110 to->nw_proto = from->nw_proto;
112 if (to->wildcards & OFPFW_NW_PROTO) {
113 /* Can't sensibly match on transport headers if the
114 * network protocol is unknown. */
115 to->wildcards |= OFPFW_TP;
116 } else if (from->nw_proto == IPPROTO_TCP
117 || from->nw_proto == IPPROTO_UDP) {
118 to->tp_src = from->tp_src;
119 to->tp_dst = from->tp_dst;
121 /* Transport layer fields are undefined. Mark them as
122 * exact-match to allow such flows to reside in
123 * table-hash, instead of falling into table-linear. */
124 to->wildcards &= ~OFPFW_TP;
127 /* Network and transport layer fields are undefined. Mark them
128 * as exact-match to allow such flows to reside in table-hash,
129 * instead of falling into table-linear. */
130 to->wildcards &= ~(OFPFW_NW | OFPFW_TP);
133 /* We set these late because code above adjusts to->wildcards. */
134 to->nw_src_mask = make_nw_mask(to->wildcards >> OFPFW_NW_SRC_SHIFT);
135 to->nw_dst_mask = make_nw_mask(to->wildcards >> OFPFW_NW_DST_SHIFT);
138 void flow_fill_match(struct ofp_match* to, const struct sw_flow_key* from)
140 to->wildcards = htonl(from->wildcards);
141 to->in_port = from->in_port;
142 to->dl_vlan = from->dl_vlan;
143 memcpy(to->dl_src, from->dl_src, ETH_ALEN);
144 memcpy(to->dl_dst, from->dl_dst, ETH_ALEN);
145 to->dl_type = from->dl_type;
146 to->nw_src = from->nw_src;
147 to->nw_dst = from->nw_dst;
148 to->nw_proto = from->nw_proto;
149 to->tp_src = from->tp_src;
150 to->tp_dst = from->tp_dst;
154 int flow_timeout(struct sw_flow *flow)
156 if (flow->idle_timeout != OFP_FLOW_PERMANENT
157 && time_after(jiffies, flow->used + flow->idle_timeout * HZ))
158 return OFPER_IDLE_TIMEOUT;
159 else if (flow->hard_timeout != OFP_FLOW_PERMANENT
160 && time_after(jiffies,
161 flow->init_time + flow->hard_timeout * HZ))
162 return OFPER_HARD_TIMEOUT;
166 EXPORT_SYMBOL(flow_timeout);
168 /* Allocates and returns a new flow with 'n_actions' action, using allocation
169 * flags 'flags'. Returns the new flow or a null pointer on failure. */
170 struct sw_flow *flow_alloc(int n_actions, gfp_t flags)
172 struct sw_flow_actions *sfa;
173 int size = sizeof *sfa + (n_actions * sizeof sfa->actions[0]);
174 struct sw_flow *flow = kmem_cache_alloc(flow_cache, flags);
178 sfa = kmalloc(size, flags);
179 if (unlikely(!sfa)) {
180 kmem_cache_free(flow_cache, flow);
183 sfa->n_actions = n_actions;
189 /* Frees 'flow' immediately. */
190 void flow_free(struct sw_flow *flow)
194 kfree(flow->sf_acts);
195 kmem_cache_free(flow_cache, flow);
197 EXPORT_SYMBOL(flow_free);
199 /* RCU callback used by flow_deferred_free. */
200 static void rcu_free_flow_callback(struct rcu_head *rcu)
202 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
206 /* Schedules 'flow' to be freed after the next RCU grace period.
207 * The caller must hold rcu_read_lock for this to be sensible. */
208 void flow_deferred_free(struct sw_flow *flow)
210 call_rcu(&flow->rcu, rcu_free_flow_callback);
212 EXPORT_SYMBOL(flow_deferred_free);
214 /* RCU callback used by flow_deferred_free_acts. */
215 static void rcu_free_acts_callback(struct rcu_head *rcu)
217 struct sw_flow_actions *sf_acts = container_of(rcu,
218 struct sw_flow_actions, rcu);
222 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
223 * The caller must hold rcu_read_lock for this to be sensible. */
224 void flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
226 call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
228 EXPORT_SYMBOL(flow_deferred_free_acts);
230 /* Copies 'actions' into a newly allocated structure for use by 'flow'
231 * and safely frees the structure that defined the previous actions. */
232 void flow_replace_acts(struct sw_flow *flow, const struct ofp_action *actions,
235 struct sw_flow_actions *sfa;
236 struct sw_flow_actions *orig_sfa = flow->sf_acts;
237 int size = sizeof *sfa + (n_actions * sizeof sfa->actions[0]);
239 sfa = kmalloc(size, GFP_ATOMIC);
243 sfa->n_actions = n_actions;
244 memcpy(sfa->actions, actions, n_actions * sizeof sfa->actions[0]);
246 rcu_assign_pointer(flow->sf_acts, sfa);
247 flow_deferred_free_acts(orig_sfa);
251 EXPORT_SYMBOL(flow_replace_acts);
253 /* Prints a representation of 'key' to the kernel log. */
254 void print_flow(const struct sw_flow_key *key)
256 printk("wild%08x port%04x:vlan%04x mac%02x:%02x:%02x:%02x:%02x:%02x"
257 "->%02x:%02x:%02x:%02x:%02x:%02x "
258 "proto%04x ip%u.%u.%u.%u->%u.%u.%u.%u port%d->%d\n",
259 key->wildcards, ntohs(key->in_port), ntohs(key->dl_vlan),
260 key->dl_src[0], key->dl_src[1], key->dl_src[2],
261 key->dl_src[3], key->dl_src[4], key->dl_src[5],
262 key->dl_dst[0], key->dl_dst[1], key->dl_dst[2],
263 key->dl_dst[3], key->dl_dst[4], key->dl_dst[5],
265 ((unsigned char *)&key->nw_src)[0],
266 ((unsigned char *)&key->nw_src)[1],
267 ((unsigned char *)&key->nw_src)[2],
268 ((unsigned char *)&key->nw_src)[3],
269 ((unsigned char *)&key->nw_dst)[0],
270 ((unsigned char *)&key->nw_dst)[1],
271 ((unsigned char *)&key->nw_dst)[2],
272 ((unsigned char *)&key->nw_dst)[3],
273 ntohs(key->tp_src), ntohs(key->tp_dst));
275 EXPORT_SYMBOL(print_flow);
277 static int tcphdr_ok(struct sk_buff *skb)
279 int th_ofs = skb_transport_offset(skb);
280 if (skb->len >= th_ofs + sizeof(struct tcphdr)) {
281 int tcp_len = tcp_hdrlen(skb);
282 return (tcp_len >= sizeof(struct tcphdr)
283 && skb->len >= th_ofs + tcp_len);
288 static int udphdr_ok(struct sk_buff *skb)
290 int th_ofs = skb_transport_offset(skb);
291 return skb->len >= th_ofs + sizeof(struct udphdr);
294 /* Parses the Ethernet frame in 'skb', which was received on 'in_port',
295 * and initializes 'key' to match. Returns 1 if 'skb' contains an IP
296 * fragment, 0 otherwise. */
297 int flow_extract(struct sk_buff *skb, uint16_t in_port,
298 struct sw_flow_key *key)
304 key->in_port = htons(in_port);
307 key->nw_src_mask = 0;
308 key->nw_dst_mask = 0;
310 /* This code doesn't check that skb->len is long enough to contain the
311 * MAC or network header. With a 46-byte minimum length frame this
312 * assumption is always correct. */
314 /* Doesn't verify checksums. Should it? */
316 /* Data link layer. We only support Ethernet. */
318 nh_ofs = sizeof(struct ethhdr);
319 if (likely(ntohs(mac->h_proto) >= OFP_DL_TYPE_ETH2_CUTOFF)) {
320 /* This is an Ethernet II frame */
321 key->dl_type = mac->h_proto;
323 /* This is an 802.2 frame */
324 if (snap_get_ethertype(skb, &key->dl_type) != -EINVAL) {
325 nh_ofs += sizeof(struct snap_hdr);
327 key->dl_type = htons(OFP_DL_TYPE_NOT_ETH_TYPE);
328 nh_ofs += sizeof(struct llc_pdu_un);
332 /* Check for a VLAN tag */
333 if (likely(key->dl_type != htons(ETH_P_8021Q))) {
334 key->dl_vlan = htons(OFP_VLAN_NONE);
336 struct vlan_hdr *vh = (struct vlan_hdr *)(skb_mac_header(skb) + nh_ofs);
337 key->dl_type = vh->h_vlan_encapsulated_proto;
338 key->dl_vlan = vh->h_vlan_TCI & htons(VLAN_VID_MASK);
339 nh_ofs += sizeof(*vh);
341 memcpy(key->dl_src, mac->h_source, ETH_ALEN);
342 memcpy(key->dl_dst, mac->h_dest, ETH_ALEN);
343 skb_set_network_header(skb, nh_ofs);
346 if (likely(key->dl_type == htons(ETH_P_IP))) {
347 struct iphdr *nh = ip_hdr(skb);
348 key->nw_src = nh->saddr;
349 key->nw_dst = nh->daddr;
350 key->nw_proto = nh->protocol;
351 th_ofs = nh_ofs + nh->ihl * 4;
352 skb_set_transport_header(skb, th_ofs);
354 /* Transport layer. */
355 if (!(nh->frag_off & htons(IP_MF | IP_OFFSET))) {
356 if (key->nw_proto == IPPROTO_TCP) {
357 if (tcphdr_ok(skb)) {
358 struct tcphdr *tcp = tcp_hdr(skb);
359 key->tp_src = tcp->source;
360 key->tp_dst = tcp->dest;
362 /* Avoid tricking other code into
363 * thinking that this packet has an L4
367 } else if (key->nw_proto == IPPROTO_UDP) {
368 if (udphdr_ok(skb)) {
369 struct udphdr *udp = udp_hdr(skb);
370 key->tp_src = udp->source;
371 key->tp_dst = udp->dest;
373 /* Avoid tricking other code into
374 * thinking that this packet has an L4
401 /* Initializes the flow module.
402 * Returns zero if successful or a negative error code. */
405 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
407 if (flow_cache == NULL)
413 /* Uninitializes the flow module. */
416 kmem_cache_destroy(flow_cache);