break;
case ODPAT_CONTROLLER:
- err = output_control(dp, skb, nla_get_u32(a));
+ err = output_control(dp, skb, nla_get_u64(a));
if (err) {
kfree_skb(skb);
return err;
break;
case ODPAT_SET_TUNNEL:
- OVS_CB(skb)->tun_id = nla_get_be32(a);
+ OVS_CB(skb)->tun_id = nla_get_be64(a);
break;
case ODPAT_SET_DL_TCI:
/* Append each packet in 'skb' list to 'queue'. There will be only one packet
* unless we broke up a GSO packet. */
static int queue_control_packets(struct sk_buff *skb, struct sk_buff_head *queue,
- int queue_no, u32 arg)
+ int queue_no, u64 arg)
{
struct sk_buff *nskb;
int port_no;
header->type = queue_no;
header->length = skb->len;
header->port = port_no;
- header->reserved = 0;
header->arg = arg;
skb_queue_tail(queue, skb);
}
int dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no,
- u32 arg)
+ u64 arg)
{
struct dp_stats_percpu *stats;
struct sk_buff_head *queue;
nla_for_each_attr(a, actions, actions_len, rem) {
static const u32 action_lens[ODPAT_MAX + 1] = {
[ODPAT_OUTPUT] = 4,
- [ODPAT_CONTROLLER] = 4,
+ [ODPAT_CONTROLLER] = 8,
[ODPAT_SET_DL_TCI] = 2,
[ODPAT_STRIP_VLAN] = 0,
[ODPAT_SET_DL_SRC] = ETH_ALEN,
[ODPAT_SET_NW_TOS] = 1,
[ODPAT_SET_TP_SRC] = 2,
[ODPAT_SET_TP_DST] = 2,
- [ODPAT_SET_TUNNEL] = 4,
+ [ODPAT_SET_TUNNEL] = 8,
[ODPAT_SET_PRIORITY] = 4,
[ODPAT_POP_PRIORITY] = 0,
[ODPAT_DROP_SPOOFED_ARP] = 0,
* @flow: The flow associated with this packet. May be %NULL if no flow.
* @ip_summed: Consistently stores L4 checksumming status across different
* kernel versions.
- * @tun_id: ID (in network byte order) of the tunnel that encapsulated this
- * packet. It is 0 if the packet was not received on a tunnel.
+ * @tun_id: ID of the tunnel that encapsulated this packet. It is 0 if the
+ * packet was not received on a tunnel.
*/
struct ovs_skb_cb {
struct vport *vport;
#ifdef NEED_CSUM_NORMALIZE
enum csum_type ip_summed;
#endif
- __be32 tun_id;
+ __be64 tun_id;
};
#define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
void dp_process_received_packet(struct vport *, struct sk_buff *);
int dp_detach_port(struct vport *);
-int dp_output_control(struct datapath *, struct sk_buff *, int, u32 arg);
+int dp_output_control(struct datapath *, struct sk_buff *, int, u64 arg);
int dp_min_mtu(const struct datapath *dp);
void set_internal_devs_mtu(const struct datapath *dp);
}
struct port_lookup_key {
+ const struct tnl_mutable_config *mutable;
+ __be64 key;
u32 tunnel_type;
__be32 saddr;
__be32 daddr;
- __be32 key;
- const struct tnl_mutable_config *mutable;
};
/*
static u32 port_hash(struct port_lookup_key *k)
{
- return jhash_3words(k->key, k->saddr, k->daddr, k->tunnel_type);
+ u32 x = jhash_3words(k->saddr, k->daddr, k->tunnel_type, 0);
+ return jhash_2words(k->key >> 32, k->key, x);
}
static u32 mutable_hash(const struct tnl_mutable_config *mutable)
return 0;
}
-struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be32 key,
+struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
int tunnel_type,
const struct tnl_mutable_config **mutable)
{
#endif /* IPv6 */
bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable,
- struct sk_buff *skb, unsigned int mtu, __be32 flow_key)
+ struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
{
unsigned int eth_hdr_len = ETH_HLEN;
unsigned int total_length = 0, header_length = 0, payload_length;
int tnl_send(struct vport *vport, struct sk_buff *skb);
void tnl_rcv(struct vport *vport, struct sk_buff *skb);
-struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be32 key,
+struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
int tunnel_type,
const struct tnl_mutable_config **mutable);
bool tnl_frag_needed(struct vport *vport,
const struct tnl_mutable_config *mutable,
- struct sk_buff *skb, unsigned int mtu, __be32 flow_key);
+ struct sk_buff *skb, unsigned int mtu, __be64 flow_key);
void tnl_free_linked_skbs(struct sk_buff *skb);
static inline struct tnl_vport *tnl_vport_priv(const struct vport *vport)
return len;
}
+/* Returns the least-significant 32 bits of a __be64. */
+static __be32 be64_get_low32(__be64 x)
+{
+#ifdef __BIG_ENDIAN
+ return x;
+#else
+ return x >> 32;
+#endif
+}
+
static void gre_build_header(const struct vport *vport,
const struct tnl_mutable_config *mutable,
void *header)
greh->flags |= GRE_KEY;
if (mutable->port_config.out_key)
- *options = mutable->port_config.out_key;
+ *options = be64_get_low32(mutable->port_config.out_key);
}
static struct sk_buff *gre_update_header(const struct vport *vport,
/* Work backwards over the options so the checksum is last. */
if (mutable->port_config.flags & TNL_F_OUT_KEY_ACTION) {
- *options = OVS_CB(skb)->tun_id;
+ *options = be64_get_low32(OVS_CB(skb)->tun_id);
options--;
}
return skb;
}
-static int parse_header(struct iphdr *iph, __be16 *flags, __be32 *key)
+/* Zero-extends a __be32 into the least-significant 32 bits of a __be64. */
+static __be64 be32_extend_to_be64(__be32 x)
+{
+#ifdef __BIG_ENDIAN
+ return x;
+#else
+ return (__be64) x << 32;
+#endif
+}
+
+static int parse_header(struct iphdr *iph, __be16 *flags, __be64 *key)
{
/* IP and ICMP protocol handlers check that the IHL is valid. */
struct gre_base_hdr *greh = (struct gre_base_hdr *)((u8 *)iph + (iph->ihl << 2));
if (greh->flags & GRE_KEY) {
hdr_len += GRE_HEADER_SECTION;
- *key = *options;
+ *key = be32_extend_to_be64(*options);
options++;
} else
*key = 0;
struct iphdr *iph;
__be16 flags;
- __be32 key;
+ __be64 key;
int tunnel_hdr_len, tot_hdr_len;
unsigned int orig_mac_header;
unsigned int orig_nw_header;
int hdr_len;
struct iphdr *iph;
__be16 flags;
- __be32 key;
+ __be64 key;
if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr) + ETH_HLEN)))
goto error;
NXAST_POP_QUEUE, /* struct nx_action_pop_queue */
NXAST_REG_MOVE, /* struct nx_action_reg_move */
NXAST_REG_LOAD, /* struct nx_action_reg_load */
- NXAST_NOTE /* struct nx_action_note */
+ NXAST_NOTE, /* struct nx_action_note */
+ NXAST_SET_TUNNEL64, /* struct nx_action_set_tunnel64 */
};
/* Header for Nicira-defined actions. */
/* Action structure for NXAST_SET_TUNNEL.
*
- * Sets the encapsulating tunnel ID. */
+ * Sets the encapsulating tunnel ID to a 32-bit value. The most-significant 32
+ * bits of the tunnel ID are set to 0. */
struct nx_action_set_tunnel {
uint16_t type; /* OFPAT_VENDOR. */
uint16_t len; /* Length is 16. */
};
OFP_ASSERT(sizeof(struct nx_action_set_tunnel) == 16);
+/* Action structure for NXAST_SET_TUNNEL64.
+ *
+ * Sets the encapsulating tunnel ID to a 64-bit value. */
+struct nx_action_set_tunnel64 {
+ ovs_be16 type; /* OFPAT_VENDOR. */
+ ovs_be16 len; /* Length is 16. */
+ ovs_be32 vendor; /* NX_VENDOR_ID. */
+ ovs_be16 subtype; /* NXAST_SET_TUNNEL64. */
+ uint8_t pad[6];
+ ovs_be64 tun_id; /* Tunnel ID. */
+};
+OFP_ASSERT(sizeof(struct nx_action_set_tunnel64) == 24);
+
/* Action structure for NXAST_DROP_SPOOFED_ARP.
*
* Stops processing further actions, if the packet being processed is an
#ifndef __aligned_u64
#define __aligned_u64 __u64 __attribute__((aligned(8)))
+#define __aligned_be64 __be64 __attribute__((aligned(8)))
+#define __aligned_le64 __le64 __attribute__((aligned(8)))
#endif
#include <linux/if_link.h>
/**
* struct odp_msg - format of messages read from datapath fd.
- * @type: One of the %_ODPL_* constants.
* @length: Total length of message, including this header.
+ * @type: One of the %_ODPL_* constants.
* @port: Port that received the packet embedded in this message.
- * @reserved: Not currently used. Should be set to 0.
* @arg: Argument value whose meaning depends on @type.
*
* For @type == %_ODPL_MISS_NR, the header is followed by packet data. The
* data.
*/
struct odp_msg {
- uint32_t type;
uint32_t length;
+ uint16_t type;
uint16_t port;
- uint16_t reserved;
- uint32_t arg;
+ __aligned_u64 arg;
};
/**
#define ODP_TCI_PRESENT 0x1000 /* CFI bit */
struct odp_flow_key {
- ovs_be32 tun_id; /* Encapsulating tunnel ID. */
+ ovs_be64 tun_id; /* Encapsulating tunnel ID. */
ovs_be32 nw_src; /* IP source address. */
ovs_be32 nw_dst; /* IP destination address. */
uint16_t in_port; /* Input switch port. */
/* This goes in the "config" member of struct odp_port for tunnel vports. */
struct tnl_port_config {
- __u32 flags;
- __be32 saddr;
- __be32 daddr;
- __be32 in_key;
- __be32 out_key;
- __u8 tos;
- __u8 ttl;
+ __aligned_be64 in_key;
+ __aligned_be64 out_key;
+ __u32 flags;
+ __be32 saddr;
+ __be32 daddr;
+ __u8 tos;
+ __u8 ttl;
};
#endif /* openvswitch/tunnel.h */
}
void
-cls_rule_set_tun_id(struct cls_rule *rule, ovs_be32 tun_id)
+cls_rule_set_tun_id(struct cls_rule *rule, ovs_be64 tun_id)
{
rule->wc.wildcards &= ~FWW_TUN_ID;
rule->flow.tun_id = tun_id;
}
}
if (!(w & FWW_TUN_ID)) {
- ds_put_format(s, "tun_id=0x%"PRIx32",", ntohl(f->tun_id));
+ ds_put_format(s, "tun_id=0x%"PRIx64",", ntohll(f->tun_id));
}
if (!(w & FWW_IN_PORT)) {
ds_put_format(s, "in_port=%"PRIu16",",
const flow_wildcards_t wc = wildcards->wildcards;
int i;
- BUILD_ASSERT_DECL(FLOW_SIG_SIZE == 36 + FLOW_N_REGS * 4);
+ BUILD_ASSERT_DECL(FLOW_SIG_SIZE == 40 + FLOW_N_REGS * 4);
for (i = 0; i < FLOW_N_REGS; i++) {
if ((a->regs[i] ^ b->regs[i]) & wildcards->reg_masks[i]) {
const flow_wildcards_t wc = wildcards->wildcards;
int i;
- BUILD_ASSERT_DECL(FLOW_SIG_SIZE == 36 + 4 * FLOW_N_REGS);
+ BUILD_ASSERT_DECL(FLOW_SIG_SIZE == 40 + 4 * FLOW_N_REGS);
for (i = 0; i < FLOW_N_REGS; i++) {
flow->regs[i] &= wildcards->reg_masks[i];
void cls_rule_set_reg(struct cls_rule *, unsigned int reg_idx, uint32_t value);
void cls_rule_set_reg_masked(struct cls_rule *, unsigned int reg_idx,
uint32_t value, uint32_t mask);
-void cls_rule_set_tun_id(struct cls_rule *, ovs_be32 tun_id);
+void cls_rule_set_tun_id(struct cls_rule *, ovs_be64 tun_id);
void cls_rule_set_in_port(struct cls_rule *, uint16_t odp_port);
void cls_rule_set_dl_type(struct cls_rule *, ovs_be16);
void cls_rule_set_dl_src(struct cls_rule *, const uint8_t[6]);
static int dpif_netdev_open(const struct dpif_class *, const char *name,
bool create, struct dpif **);
static int dp_netdev_output_control(struct dp_netdev *, const struct ofpbuf *,
- int queue_no, int port_no, uint32_t arg);
+ int queue_no, int port_no, uint64_t arg);
static int dp_netdev_execute_actions(struct dp_netdev *,
struct ofpbuf *, struct flow *,
const struct nlattr *actions,
static int
dp_netdev_output_control(struct dp_netdev *dp, const struct ofpbuf *packet,
- int queue_no, int port_no, uint32_t arg)
+ int queue_no, int port_no, uint64_t arg)
{
struct odp_msg *header;
struct ofpbuf *msg;
case ODPAT_CONTROLLER:
dp_netdev_output_control(dp, packet, _ODPL_ACTION_NR,
- key->in_port, nl_attr_get_u32(a));
+ key->in_port, nl_attr_get_u64(a));
break;
case ODPAT_SET_DL_TCI:
/* Minimum number of bytes of headroom for a packet returned by dpif_recv()
* member function. This headroom allows "struct odp_msg" to be replaced by
* "struct ofp_packet_in" without copying the buffer. */
-#define DPIF_RECV_MSG_PADDING (sizeof(struct ofp_packet_in) \
- - sizeof(struct odp_msg))
+#define DPIF_RECV_MSG_PADDING \
+ ROUND_UP(sizeof(struct ofp_packet_in) - sizeof(struct odp_msg), 8)
BUILD_ASSERT_DECL(sizeof(struct ofp_packet_in) > sizeof(struct odp_msg));
-BUILD_ASSERT_DECL(DPIF_RECV_MSG_PADDING % 4 == 0);
+BUILD_ASSERT_DECL(DPIF_RECV_MSG_PADDING % 8 == 0);
int dpif_recv_get_mask(const struct dpif *, int *listen_mask);
int dpif_recv_set_mask(struct dpif *, int listen_mask);
* present and has a correct length, and otherwise NULL.
*/
int
-flow_extract(struct ofpbuf *packet, ovs_be32 tun_id, uint16_t in_port,
+flow_extract(struct ofpbuf *packet, ovs_be64 tun_id, uint16_t in_port,
struct flow *flow)
{
struct ofpbuf b = *packet;
void
flow_format(struct ds *ds, const struct flow *flow)
{
- ds_put_format(ds, "tunnel%#"PRIx32":in_port%04"PRIx16":tci(",
- ntohl(flow->tun_id), flow->in_port);
+ ds_put_format(ds, "tunnel%#"PRIx64":in_port%04"PRIx16":tci(",
+ flow->tun_id, flow->in_port);
if (flow->vlan_tci) {
ds_put_format(ds, "vlan%"PRIu16",pcp%d",
vlan_tci_to_vid(flow->vlan_tci),
BUILD_ASSERT_DECL(FLOW_N_REGS <= NXM_NX_MAX_REGS);
struct flow {
+ ovs_be64 tun_id; /* Encapsulating tunnel ID. */
uint32_t regs[FLOW_N_REGS]; /* Registers. */
- ovs_be32 tun_id; /* Encapsulating tunnel ID. */
ovs_be32 nw_src; /* IP source address. */
ovs_be32 nw_dst; /* IP destination address. */
uint16_t in_port; /* Input switch port. */
/* Assert that there are FLOW_SIG_SIZE bytes of significant data in "struct
* flow", followed by FLOW_PAD_SIZE bytes of padding. */
-#define FLOW_SIG_SIZE (36 + FLOW_N_REGS * 4)
+#define FLOW_SIG_SIZE (40 + FLOW_N_REGS * 4)
#define FLOW_PAD_SIZE 0
BUILD_ASSERT_DECL(offsetof(struct flow, nw_tos) == FLOW_SIG_SIZE - 1);
BUILD_ASSERT_DECL(sizeof(((struct flow *)0)->nw_tos) == 1);
BUILD_ASSERT_DECL(sizeof(struct flow) == FLOW_SIG_SIZE + FLOW_PAD_SIZE);
-int flow_extract(struct ofpbuf *, ovs_be32 tun_id, uint16_t in_port,
+int flow_extract(struct ofpbuf *, uint64_t tun_id, uint16_t in_port,
struct flow *);
void flow_extract_stats(const struct flow *flow, struct ofpbuf *packet,
struct odp_flow_stats *stats);
#include <net/if.h>
#include <sys/ioctl.h>
+#include "byte-order.h"
#include "list.h"
#include "netdev-provider.h"
#include "openvswitch/datapath-protocol.h"
config.flags |= TNL_F_IN_KEY_MATCH;
config.flags |= TNL_F_OUT_KEY_ACTION;
} else {
- config.out_key = config.in_key = htonl(atoi(node->data));
+ uint64_t key = strtoull(node->data, NULL, 0);
+ config.out_key = config.in_key = htonll(key);
}
} else if (!strcmp(node->name, "in_key") && is_gre) {
if (!strcmp(node->data, "flow")) {
config.flags |= TNL_F_IN_KEY_MATCH;
} else {
- config.in_key = htonl(atoi(node->data));
+ config.in_key = htonll(strtoull(node->data, NULL, 0));
}
} else if (!strcmp(node->name, "out_key") && is_gre) {
if (!strcmp(node->data, "flow")) {
config.flags |= TNL_F_OUT_KEY_ACTION;
} else {
- config.out_key = htonl(atoi(node->data));
+ config.out_key = htonll(strtoull(node->data, NULL, 0));
}
} else if (!strcmp(node->name, "tos")) {
if (!strcmp(node->data, "inherit")) {
/* Tunnel ID. */
case NFI_NXM_NX_TUN_ID:
- flow->tun_id = htonl(ntohll(get_unaligned_be64(value)));
+ flow->tun_id = get_unaligned_be64(value);
return 0;
/* Registers. */
/* Tunnel ID. */
if (!(wc & FWW_TUN_ID)) {
- nxm_put_64(b, NXM_NX_TUN_ID, htonll(ntohl(flow->tun_id)));
+ nxm_put_64(b, NXM_NX_TUN_ID, flow->tun_id);
}
/* Registers. */
return ntohs(flow->tp_dst) & 0xff;
case NFI_NXM_NX_TUN_ID:
- return ntohl(flow->tun_id);
+ return ntohll(flow->tun_id);
#define NXM_READ_REGISTER(IDX) \
case NFI_NXM_NX_REG##IDX: \
} else if (dst->header == NXM_OF_VLAN_TCI) {
flow->vlan_tci = htons(new_data);
} else if (dst->header == NXM_NX_TUN_ID) {
- flow->tun_id = htonl(new_data);
+ flow->tun_id = htonll(new_data);
} else {
NOT_REACHED();
}
void
format_odp_flow_key(struct ds *ds, const struct odp_flow_key *key)
{
- ds_put_format(ds, "tun_id%#"PRIx32" in_port%d tci(",
- ntohl(key->tun_id), key->in_port);
+ ds_put_format(ds, "tun_id%#"PRIx64" in_port%d tci(",
+ ntohll(key->tun_id), key->in_port);
if (key->dl_tci) {
ds_put_format(ds, "vlan%"PRIu16",pcp%d",
vlan_tci_to_vid(key->dl_tci),
switch ((enum odp_action_type) type) {
case ODPAT_OUTPUT: return 4;
- case ODPAT_CONTROLLER: return 4;
+ case ODPAT_CONTROLLER: return 8;
case ODPAT_SET_DL_TCI: return 2;
case ODPAT_STRIP_VLAN: return 0;
case ODPAT_SET_DL_SRC: return ETH_ADDR_LEN;
case ODPAT_SET_NW_TOS: return 1;
case ODPAT_SET_TP_SRC: return 2;
case ODPAT_SET_TP_DST: return 2;
- case ODPAT_SET_TUNNEL: return 4;
+ case ODPAT_SET_TUNNEL: return 8;
case ODPAT_SET_PRIORITY: return 4;
case ODPAT_POP_PRIORITY: return 0;
case ODPAT_DROP_SPOOFED_ARP: return 0;
ds_put_format(ds, "%"PRIu16, nl_attr_get_u32(a));
break;
case ODPAT_CONTROLLER:
- ds_put_format(ds, "ctl(%"PRIu32")", nl_attr_get_u32(a));
+ ds_put_format(ds, "ctl(%"PRIu64")", nl_attr_get_u64(a));
break;
case ODPAT_SET_TUNNEL:
- ds_put_format(ds, "set_tunnel(%#"PRIx32")",
- ntohl(nl_attr_get_be32(a)));
+ ds_put_format(ds, "set_tunnel(%#"PRIx64")",
+ ntohll(nl_attr_get_be64(a)));
break;
case ODPAT_SET_DL_TCI:
ds_put_format(ds, "set_tci(vid=%"PRIu16",pcp=%d)",
nar->vendor = htonl(NX_VENDOR_ID);
nar->subtype = htons(NXAST_RESUBMIT);
nar->in_port = htons(str_to_u32(arg));
- } else if (!strcasecmp(act, "set_tunnel")) {
- struct nx_action_set_tunnel *nast;
- nast = put_action(b, sizeof *nast, OFPAT_VENDOR);
- nast->vendor = htonl(NX_VENDOR_ID);
- nast->subtype = htons(NXAST_SET_TUNNEL);
- nast->tun_id = htonl(str_to_u32(arg));
+ } else if (!strcasecmp(act, "set_tunnel")
+ || !strcasecmp(act, "set_tunnel64")) {
+ uint64_t tun_id = str_to_u64(arg);
+ if (!strcasecmp(act, "set_tunnel64") || tun_id > UINT32_MAX) {
+ struct nx_action_set_tunnel64 *nast64;
+ nast64 = put_action(b, sizeof *nast64, OFPAT_VENDOR);
+ nast64->vendor = htonl(NX_VENDOR_ID);
+ nast64->subtype = htons(NXAST_SET_TUNNEL64);
+ nast64->tun_id = htonll(tun_id);
+ } else {
+ struct nx_action_set_tunnel *nast;
+ nast = put_action(b, sizeof *nast, OFPAT_VENDOR);
+ nast->vendor = htonl(NX_VENDOR_ID);
+ nast->subtype = htons(NXAST_SET_TUNNEL);
+ nast->tun_id = htonl(tun_id);
+ }
} else if (!strcasecmp(act, "drop_spoofed_arp")) {
struct nx_action_header *nah;
nah = put_action(b, sizeof *nah, OFPAT_VENDOR);
switch (index) {
case F_TUN_ID:
- cls_rule_set_tun_id(rule, htonl(str_to_u32(value)));
+ cls_rule_set_tun_id(rule, htonll(str_to_u64(value)));
break;
case F_IN_PORT:
case NXAST_REG_MOVE: return sizeof(struct nx_action_reg_move);
case NXAST_REG_LOAD: return sizeof(struct nx_action_reg_load);
case NXAST_NOTE: return -1;
+ case NXAST_SET_TUNNEL64: return sizeof(struct nx_action_set_tunnel64);
default: return -1;
}
}
}
if (subtype <= TYPE_MAXIMUM(enum nx_action_subtype)) {
+ const struct nx_action_set_tunnel64 *nast64;
const struct nx_action_set_tunnel *nast;
const struct nx_action_set_queue *nasq;
const struct nx_action_resubmit *nar;
nxm_format_reg_load(load, string);
return;
+ case NXAST_SET_TUNNEL64:
+ nast64 = (struct nx_action_set_tunnel64 *) nah;
+ ds_put_format(string, "set_tunnel64:%#"PRIx64,
+ ntohll(nast64->tun_id));
+ return;
+
case NXAST_SNAT__OBSOLETE:
default:
break;
wc->nw_dst_mask = ofputil_wcbits_to_netmask(ofpfw >> OFPFW_NW_DST_SHIFT);
if (flow_format == NXFF_TUN_ID_FROM_COOKIE && !(ofpfw & NXFW_TUN_ID)) {
- rule->flow.tun_id = htonl(ntohll(cookie) >> 32);
+ rule->flow.tun_id = htonll(ntohll(cookie) >> 32);
} else {
wc->wildcards |= FWW_TUN_ID;
- rule->flow.tun_id = htonl(0);
+ rule->flow.tun_id = htonll(0);
}
if (ofpfw & OFPFW_DL_DST) {
ofpfw |= NXFW_TUN_ID;
} else {
uint32_t cookie_lo = ntohll(cookie_in);
- uint32_t cookie_hi = ntohl(rule->flow.tun_id);
+ uint32_t cookie_hi = ntohll(rule->flow.tun_id);
cookie_in = htonll(cookie_lo | ((uint64_t) cookie_hi << 32));
}
}
|| !regs_fully_wildcarded(wc)
|| (!(wc->wildcards & FWW_TUN_ID)
&& (!cookie_support
- || (cookie_hi && cookie_hi != rule->flow.tun_id)))) {
+ || (cookie_hi && cookie_hi != ntohll(rule->flow.tun_id))))) {
return NXFF_NXM;
} else if (!(wc->wildcards & FWW_TUN_ID)) {
return NXFF_TUN_ID_FROM_COOKIE;
case NXAST_NOTE:
return 0;
+ case NXAST_SET_TUNNEL64:
+ return check_action_exact_len(a, len,
+ sizeof(struct nx_action_set_tunnel64));
+
case NXAST_SNAT__OBSOLETE:
default:
return ofp_mkerr(OFPET_BAD_ACTION, OFPBAC_BAD_VENDOR_TYPE);
const struct nlattr *odp_actions, unsigned int actions_len,
struct ofpbuf *packet)
{
- if (actions_len == NLA_ALIGN(NLA_HDRLEN + sizeof(uint32_t))
+ if (actions_len == NLA_ALIGN(NLA_HDRLEN + sizeof(uint64_t))
&& odp_actions->nla_type == ODPAT_CONTROLLER) {
/* As an optimization, avoid a round-trip from userspace to kernel to
* userspace. This also avoids possibly filling up kernel packet
msg->type = _ODPL_ACTION_NR;
msg->length = sizeof(struct odp_msg) + packet->size;
msg->port = in_port;
- msg->reserved = 0;
- msg->arg = nl_attr_get_u32(odp_actions);
+ msg->arg = nl_attr_get_u64(odp_actions);
send_packet_in(ofproto, packet);
&ctx->nf_output_iface, ctx->odp_actions);
break;
case OFPP_CONTROLLER:
- nl_msg_put_u32(ctx->odp_actions, ODPAT_CONTROLLER, max_len);
+ nl_msg_put_u64(ctx->odp_actions, ODPAT_CONTROLLER, max_len);
break;
case OFPP_LOCAL:
add_output_action(ctx, ODPP_LOCAL);
const struct nx_action_set_tunnel *nast;
const struct nx_action_set_queue *nasq;
enum nx_action_subtype subtype = ntohs(nah->subtype);
+ ovs_be64 tun_id;
assert(nah->vendor == htonl(NX_VENDOR_ID));
switch (subtype) {
case NXAST_SET_TUNNEL:
nast = (const struct nx_action_set_tunnel *) nah;
- nl_msg_put_be32(ctx->odp_actions, ODPAT_SET_TUNNEL, nast->tun_id);
- ctx->flow.tun_id = nast->tun_id;
+ tun_id = htonll(ntohl(nast->tun_id));
+ nl_msg_put_be64(ctx->odp_actions, ODPAT_SET_TUNNEL, tun_id);
+ ctx->flow.tun_id = tun_id;
break;
case NXAST_DROP_SPOOFED_ARP:
/* Nothing to do. */
break;
+ case NXAST_SET_TUNNEL64:
+ tun_id = ((const struct nx_action_set_tunnel64 *) nah)->tun_id;
+ nl_msg_put_be64(ctx->odp_actions, ODPAT_SET_TUNNEL, tun_id);
+ ctx->flow.tun_id = tun_id;
+ break;
+
/* If you add a new action here that modifies flow data, don't forget to
* update the flow key in ctx->flow at the same time. */
cookie=0x123456789abcdef hard_timeout=10 priority=60000 actions=controller
actions=note:41.42.43,note:00.01.02.03.04.05.06.07,note
tun_id=0x1234,cookie=0x5678,actions=flood
+actions=set_tunnel:0x1234,set_tunnel64:0x9876,set_tunnel:0x123456789
actions=drop
])
AT_CHECK([ovs-ofctl parse-flows flows.txt], [0], [stdout], [stderr])
OFPT_FLOW_MOD: ADD actions=note:41.42.43.00.00.00,note:00.01.02.03.04.05.06.07.00.00.00.00.00.00,note:00.00.00.00.00.00
NXT_TUN_ID_FROM_COOKIE: set=1
OFPT_FLOW_MOD: ADD cookie:0x123400005678 actions=FLOOD
+OFPT_FLOW_MOD: ADD actions=set_tunnel:0x1234,set_tunnel64:0x9876,set_tunnel64:0x123456789
OFPT_FLOW_MOD: ADD actions=drop
])
AT_CHECK([sed 's/.*|//' stderr], [0], [dnl
CONSTANT_HTONL(0xc0a04455) };
static ovs_be32 nw_dst_values[] = { CONSTANT_HTONL(0xc0a80002),
CONSTANT_HTONL(0xc0a04455) };
-static ovs_be32 tun_id_values[] = { 0, 0xffff0000 };
+static ovs_be64 tun_id_values[] = {
+ 0,
+ CONSTANT_HTONLL(UINT64_C(0xfedcba9876543210)) };
static uint16_t in_port_values[] = { 1, ODPP_LOCAL };
static ovs_be16 vlan_tci_values[] = { CONSTANT_HTONS(101), CONSTANT_HTONS(0) };
static ovs_be16 dl_type_values[]
\fBresubmit\fR actions are ignored.
.
.IP \fBset_tunnel\fB:\fIid\fR
-If outputting to a port that encapsulates the packet in a tunnel and supports
-an identifier (such as GRE), sets the identifier to \fBid\fR.
+.IQ \fBset_tunnel64\fB:\fIid\fR
+If outputting to a port that encapsulates the packet in a tunnel and
+supports an identifier (such as GRE), sets the identifier to \fBid\fR.
+If the \fBset_tunnel\fR form is used and \fIid\fR fits in 32 bits,
+then this uses an action extension that is supported by Open vSwitch
+1.0 and later. Otherwise, if \fIid\fR is a 64-bit value, it requires
+Open vSwitch 1.1 or later.
.
.IP \fBdrop_spoofed_arp\fR
Stops processing further actions, if the packet being processed is an