if (unlikely(err)) {
kfree_skb(skb);
return ERR_PTR(err);
- }
+ }
/* GSO is not implemented for packets with an 802.1Q header, so
* we have to do segmentation before we add that header.
* (limited to a page for sanity)
* offset -- number of records to skip
*/
-static int brc_get_fdb_entries(struct net_device *dev, void __user *userbuf,
+static int brc_get_fdb_entries(struct net_device *dev, void __user *userbuf,
unsigned long maxnum, unsigned long offset)
{
struct nlattr *attrs[BRC_GENL_A_MAX + 1];
void *data;
ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!ans_skb)
+ if (!ans_skb)
return -ENOMEM;
data = genlmsg_put_reply(ans_skb, info, &brc_genl_family,
goto error;
err = genl_register_ops(&brc_genl_family, &brc_genl_ops_query_dp);
- if (err != 0)
+ if (err != 0)
goto err_unregister;
err = genl_register_ops(&brc_genl_family, &brc_genl_ops_dp_result);
- if (err != 0)
+ if (err != 0)
goto err_unregister;
err = genl_register_ops(&brc_genl_family, &brc_genl_ops_set_proc);
- if (err != 0)
+ if (err != 0)
goto err_unregister;
strcpy(brc_mc_group.name, "brcompat");
skb->ip_summed);
/* None seems the safest... */
OVS_CB(skb)->ip_summed = OVS_CSUM_NONE;
- }
+ }
#if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
/* Xen has a special way of representing CHECKSUM_PARTIAL on older
}
success:
copy_bytes = tot_copy_bytes = min_t(size_t, skb->len, nbytes);
-
+
retval = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (copy_bytes == skb->len) {
#include "vport-internal_dev.h"
#include "vport-netdev.h"
-static int dp_device_event(struct notifier_block *unused, unsigned long event,
- void *ptr)
+static int dp_device_event(struct notifier_block *unused, unsigned long event,
+ void *ptr)
{
struct net_device *dev = ptr;
struct vport *vport;
/* xxx We use a default value of 0 for all fields. If the caller is
* xxx attempting to set the value to our default, just silently
- * xxx ignore the request.
+ * xxx ignore the request.
*/
if (val != 0) {
struct datapath *dp;
dp = sysfs_get_dp(to_net_dev(d));
if (dp)
- printk("%s: xxx writing dp parms not supported yet!\n",
+ printk("%s: xxx writing dp parms not supported yet!\n",
dp_name(dp));
else
result = -ENODEV;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- printk("%s: xxx writing port parms not supported yet!\n",
+ printk("%s: xxx writing port parms not supported yet!\n",
dp_name(p->dp));
return ret;
/* RCU callback used by flow_deferred_free_acts. */
static void rcu_free_acts_callback(struct rcu_head *rcu)
{
- struct sw_flow_actions *sf_acts = container_of(rcu,
+ struct sw_flow_actions *sf_acts = container_of(rcu,
struct sw_flow_actions, rcu);
kfree(sf_acts);
}
key->nw_proto = ntohs(arp->ar_op);
}
- if (key->nw_proto == ARPOP_REQUEST
+ if (key->nw_proto == ARPOP_REQUEST
|| key->nw_proto == ARPOP_REPLY) {
memcpy(&key->nw_src, arp->ar_sip, sizeof(key->nw_src));
memcpy(&key->nw_dst, arp->ar_tip, sizeof(key->nw_dst));
};
/* No CONFIG_DMI before 2.6.16 */
-#if defined(CONFIG_DMI) || defined(CONFIG_X86_32)
+#if defined(CONFIG_DMI) || defined(CONFIG_X86_32)
extern int dmi_check_system(struct dmi_system_id *list);
extern char * dmi_get_system_info(int field);
#endif /* linux kernel < 2.6.22 */
-#endif
+#endif
{
up(&lock->sema);
}
-#else
+#else
#include_next <linux/mutex.h>
#endif /* linux version < 2.6.16 */
-#endif
+#endif
#endif /* linux version < 2.6.22 */
-#endif
+#endif
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
-/* Emulate Linux 2.6.17 and later behavior, in which kfree_skb silently ignores
+/* Emulate Linux 2.6.17 and later behavior, in which kfree_skb silently ignores
* null pointer arguments. */
#define kfree_skb(skb) kfree_skb_maybe_null(skb)
static inline void kfree_skb_maybe_null(struct sk_buff *skb)
#include <linux/genetlink.h>
/*----------------------------------------------------------------------------
- * In 2.6.23, registering of multicast groups was added. Our compatability
+ * In 2.6.23, registering of multicast groups was added. Our compatability
* layer just supports registering a single group, since that's all we
* need.
*/
#define genlmsg_multicast(s, p, g, f) \
genlmsg_multicast_flags((s), (p), (g), (f))
-static inline int genlmsg_multicast_flags(struct sk_buff *skb, u32 pid,
+static inline int genlmsg_multicast_flags(struct sk_buff *skb, u32 pid,
unsigned int group, gfp_t flags)
{
int err;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
-/* "set_normalized_timespec" is defined but not exported in kernels
+/* "set_normalized_timespec" is defined but not exported in kernels
* before 2.6.26. */
/**
* 0 <= tv_nsec < NSEC_PER_SEC
* For negative values only the tv_sec field is negative !
*/
-void set_normalized_timespec(struct timespec *ts,
+void set_normalized_timespec(struct timespec *ts,
time_t sec, long nsec)
{
while (nsec >= NSEC_PER_SEC) {
err = sock_create(AF_INET, SOCK_DGRAM, 0, &capwap_rcv_socket);
if (err)
goto error;
-
+
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = INADDR_ANY;
sin.sin_port = htons(CAPWAP_DST_PORT);
uint8_t dl_src[6]; /* Ethernet source address. */
uint8_t dl_dst[6]; /* Ethernet destination address. */
uint8_t nw_proto; /* IP protocol or lower 8 bits of
- ARP opcode. */
+ ARP opcode. */
uint8_t dl_vlan_pcp; /* Input VLAN priority. */
uint8_t nw_tos; /* IP ToS (DSCP field, 6 bits). */
uint8_t reserved[3]; /* Align to 32-bits...must be zeroed. */
return NULL;
}
-/* Checks if the flow defined by 'target' with 'wildcards' at 'priority'
- * overlaps with any other rule at the same priority in the classifier.
+/* Checks if the flow defined by 'target' with 'wildcards' at 'priority'
+ * overlaps with any other rule at the same priority in the classifier.
* Two rules are considered overlapping if a packet could match both. */
bool
classifier_rule_overlaps(const struct classifier *cls,
LIST_FOR_EACH (rule, struct cls_rule, node.list,
&bucket->rules) {
- if (rule->priority == priority
+ if (rule->priority == priority
&& rules_match_2wild(rule, &target_rule, 0)) {
return true;
}
rules_match_2wild(const struct cls_rule *wild1, const struct cls_rule *wild2,
int field_idx)
{
- return rules_match(wild1, wild2,
- wild1->wc.wildcards | wild2->wc.wildcards,
+ return rules_match(wild1, wild2,
+ wild1->wc.wildcards | wild2->wc.wildcards,
wild1->wc.nw_src_mask & wild2->wc.nw_src_mask,
- wild1->wc.nw_dst_mask & wild2->wc.nw_dst_mask,
+ wild1->wc.nw_dst_mask & wild2->wc.nw_dst_mask,
field_idx);
}
const flow_t *);
struct cls_rule *classifier_lookup_exact(const struct classifier *,
const flow_t *);
-bool classifier_rule_overlaps(const struct classifier *, const flow_t *,
+bool classifier_rule_overlaps(const struct classifier *, const flow_t *,
uint32_t wildcards, unsigned int priority);
typedef void cls_cb_func(struct cls_rule *, void *aux);
{
char short_options[UCHAR_MAX * 3 + 1];
char *p = short_options;
-
+
for (; options->name; options++) {
const struct option *o = options;
if (o->flag == NULL && o->val > 0 && o->val <= UCHAR_MAX) {
}
}
*p = '\0';
-
+
return xstrdup(short_options);
}
}
/* Logs the coverage counters at the given vlog 'level'. If
- * 'suppress_dups' is true, then duplicate events are not displayed.
+ * 'suppress_dups' is true, then duplicate events are not displayed.
* Care should be taken in the value used for 'level'. Depending on the
* configuration, syslog can write changes synchronously, which can
* cause the coverage messages to take several seconds to write. */
/* Returns the file name that would be used for a pidfile if 'name' were
* provided to set_pidfile(). The caller must free the returned string. */
char *
-make_pidfile_name(const char *name)
+make_pidfile_name(const char *name)
{
return (!name
? xasprintf("%s/%s.pid", ovs_rundir, program_name)
bool am_bound;
if (cli->state != state) {
- VLOG_DBG("%s: entering %s", cli_name, state_name(state));
+ VLOG_DBG("%s: entering %s", cli_name, state_name(state));
cli->state = state;
}
cli->state_entered = time_now();
if (!error) {
if (VLOG_IS_DBG_ENABLED()) {
VLOG_DBG_RL(&rl, "%s: received %s", cli_name,
- dhcp_msg_to_string(msg, false, &cli->s));
+ dhcp_msg_to_string(msg, false, &cli->s));
} else {
VLOG_INFO_RL(&rl, "%s: received %s",
cli_name, dhcp_type_name(msg->type));
if (b.size <= ETH_TOTAL_MAX) {
if (VLOG_IS_DBG_ENABLED()) {
VLOG_DBG("%s: sending %s",
- cli_name, dhcp_msg_to_string(msg, false, &cli->s));
+ cli_name, dhcp_msg_to_string(msg, false, &cli->s));
} else {
VLOG_INFO("%s: sending %s", cli_name, dhcp_type_name(msg->type));
}
/* Modify the TCI field of 'packet'. If a VLAN tag is not present, one
- * is added with the TCI field set to 'tci'. If a VLAN tag is present,
+ * is added with the TCI field set to 'tci'. If a VLAN tag is present,
* then 'mask' bits are cleared before 'tci' is logically OR'd into the
* TCI field.
*
if (dpif) {
struct registered_dpif_class *registered_class;
- registered_class = shash_find_data(&dpif_classes,
+ registered_class = shash_find_data(&dpif_classes,
dpif->dpif_class->type);
assert(registered_class);
assert(registered_class->refcount);
}
void
-ds_clear(struct ds *ds)
+ds_clear(struct ds *ds)
{
ds->length = 0;
}
}
void
-ds_put_printable(struct ds *ds, const char *s, size_t n)
+ds_put_printable(struct ds *ds, const char *s, size_t n)
{
ds_reserve(ds, ds->length + n);
while (n-- > 0) {
ds->length += used;
return;
}
- ds_reserve(ds, ds->length + (avail < 32 ? 64 : 2 * avail));
+ ds_reserve(ds, ds->length + (avail < 32 ? 64 : 2 * avail));
}
}
static void
unlink_files(void *aux OVS_UNUSED)
{
- do_unlink_files();
+ do_unlink_files();
}
static void
}
static struct tcp_header *
-pull_tcp(struct ofpbuf *packet)
+pull_tcp(struct ofpbuf *packet)
{
if (packet->size >= TCP_HEADER_LEN) {
struct tcp_header *tcp = packet->data;
}
static struct udp_header *
-pull_udp(struct ofpbuf *packet)
+pull_udp(struct ofpbuf *packet)
{
return ofpbuf_try_pull(packet, UDP_HEADER_LEN);
}
static struct icmp_header *
-pull_icmp(struct ofpbuf *packet)
+pull_icmp(struct ofpbuf *packet)
{
return ofpbuf_try_pull(packet, ICMP_HEADER_LEN);
}
} else if (flow->dl_type == htons(ETH_TYPE_ARP)) {
const struct arp_eth_header *arp = pull_arp(&b);
if (arp && arp->ar_hrd == htons(1)
- && arp->ar_pro == htons(ETH_TYPE_IP)
+ && arp->ar_pro == htons(ETH_TYPE_IP)
&& arp->ar_hln == ETH_ADDR_LEN
&& arp->ar_pln == 4) {
/* We only match on the lower 8 bits of the opcode. */
flow->nw_proto = ntohs(arp->ar_op);
}
- if ((flow->nw_proto == ARP_OP_REQUEST)
+ if ((flow->nw_proto == ARP_OP_REQUEST)
|| (flow->nw_proto == ARP_OP_REPLY)) {
flow->nw_src = arp->ar_spa;
flow->nw_dst = arp->ar_tpa;
* arguments must have been initialized through a call to flow_extract().
*/
void
-flow_extract_stats(const flow_t *flow, struct ofpbuf *packet,
+flow_extract_stats(const flow_t *flow, struct ofpbuf *packet,
struct odp_flow_stats *stats)
{
memset(stats, '\0', sizeof(*stats));
}
void
-flow_print(FILE *stream, const flow_t *flow)
+flow_print(FILE *stream, const flow_t *flow)
{
char *s = flow_to_string(flow);
fputs(s, stream);
typedef struct odp_flow_key flow_t;
int flow_extract(struct ofpbuf *, uint32_t tun_id, uint16_t in_port, flow_t *);
-void flow_extract_stats(const flow_t *flow, struct ofpbuf *packet,
+void flow_extract_stats(const flow_t *flow, struct ofpbuf *packet,
struct odp_flow_stats *stats);
void flow_to_match(const flow_t *, uint32_t wildcards, bool tun_id_cookie,
struct ofp_match *);
static void queue_tx(struct lswitch *, struct rconn *, struct ofpbuf *);
static void send_features_request(struct lswitch *, struct rconn *);
-static void send_default_flows(struct lswitch *sw, struct rconn *rconn,
+static void send_default_flows(struct lswitch *sw, struct rconn *rconn,
FILE *default_flows);
typedef void packet_handler_func(struct lswitch *, struct rconn *, void *);
}
static void
-send_default_flows(struct lswitch *sw, struct rconn *rconn,
+send_default_flows(struct lswitch *sw, struct rconn *rconn,
FILE *default_flows)
{
char line[1024];
uint16_t priority, idle_timeout, hard_timeout;
uint64_t cookie;
struct ofp_match match;
-
+
char *comment;
-
+
/* Delete comments. */
comment = strchr(line, '#');
- if (comment) {
+ if (comment) {
*comment = '\0';
}
-
+
/* Drop empty lines. */
if (line[strspn(line, " \t\n")] == '\0') {
continue;
- }
-
+ }
+
/* Parse and send. str_to_flow() will expand and reallocate the data
* in 'buffer', so we can't keep pointers to across the str_to_flow()
* call. */
struct rconn;
struct lswitch *lswitch_create(struct rconn *, bool learn_macs,
- bool exact_flows, int max_idle,
+ bool exact_flows, int max_idle,
bool action_normal, FILE *default_flows);
void lswitch_set_queue(struct lswitch *sw, uint32_t queue);
void lswitch_run(struct lswitch *);
if (!(netdev_dev->cache_valid & VALID_IS_PSEUDO)) {
const char *name = netdev_dev_get_name(&netdev_dev->netdev_dev);
const char *type = netdev_dev_get_type(&netdev_dev->netdev_dev);
-
+
netdev_dev->is_tap = !strcmp(type, "tap");
netdev_dev->is_internal = false;
if (!netdev_dev->is_tap) {
iface, &dest, &gateway, &flags, &refcnt,
&use, &metric, &mask, &mtu, &window, &irtt) != 11) {
- VLOG_WARN_RL(&rl, "%s: could not parse line %d: %s",
+ VLOG_WARN_RL(&rl, "%s: could not parse line %d: %s",
fn, ln, line);
continue;
}
}
/* The output of 'dest', 'mask', and 'gateway' were given in
- * network byte order, so we don't need need any endian
+ * network byte order, so we don't need need any endian
* conversions here. */
if ((dest & mask) == (host->s_addr & mask)) {
if (!gateway) {
* implementations. */
struct netdev_dev {
char *name; /* Name of network device. */
- const struct netdev_class *netdev_class; /* Functions to control
+ const struct netdev_class *netdev_class; /* Functions to control
this device. */
int ref_cnt; /* Times this devices was opened. */
struct shash_node *node; /* Pointer to element in global map. */
struct netdev_vport_notifier {
struct netdev_notifier notifier;
struct list list_node;
- struct shash_node *shash_node;
+ struct shash_node *shash_node;
};
static struct shash netdev_vport_notifiers =
new_args = shash_sort(args);
for (i = 0; i < dev->n_args; i++) {
- if (strcmp(dev->args[i].key, new_args[i]->name) ||
+ if (strcmp(dev->args[i].key, new_args[i]->name) ||
strcmp(dev->args[i].value, new_args[i]->data)) {
result = false;
goto finish;
return EINVAL;
}
- error = netdev_dev->netdev_class->open(netdev_dev, options->ethertype,
+ error = netdev_dev->netdev_class->open(netdev_dev, options->ethertype,
netdevp);
if (!error) {
int error;
error = (netdev_get_dev(netdev)->netdev_class->get_in4
- ? netdev_get_dev(netdev)->netdev_class->get_in4(netdev,
+ ? netdev_get_dev(netdev)->netdev_class->get_in4(netdev,
&address, &netmask)
: EOPNOTSUPP);
if (address_) {
int error;
error = (netdev_get_dev(netdev)->netdev_class->get_in6
- ? netdev_get_dev(netdev)->netdev_class->get_in6(netdev,
+ ? netdev_get_dev(netdev)->netdev_class->get_in6(netdev,
in6 ? in6 : &dummy)
: EOPNOTSUPP);
if (error && in6) {
enum netdev_flags old_flags;
int error;
- error = netdev_get_dev(netdev)->netdev_class->update_flags(netdev,
+ error = netdev_get_dev(netdev)->netdev_class->update_flags(netdev,
off & ~on, on, &old_flags);
if (error) {
VLOG_WARN_RL(&rl, "failed to %s flags for network device %s: %s",
uint32_t ip, uint8_t mac[ETH_ADDR_LEN])
{
int error = (netdev_get_dev(netdev)->netdev_class->arp_lookup
- ? netdev_get_dev(netdev)->netdev_class->arp_lookup(netdev,
+ ? netdev_get_dev(netdev)->netdev_class->arp_lookup(netdev,
ip, mac)
: EOPNOTSUPP);
if (error) {
netdev_get_carrier(const struct netdev *netdev, bool *carrier)
{
int error = (netdev_get_dev(netdev)->netdev_class->get_carrier
- ? netdev_get_dev(netdev)->netdev_class->get_carrier(netdev,
+ ? netdev_get_dev(netdev)->netdev_class->get_carrier(netdev,
carrier)
: EOPNOTSUPP);
if (error) {
uint32_t kbits_burst)
{
return (netdev_get_dev(netdev)->netdev_class->set_policing
- ? netdev_get_dev(netdev)->netdev_class->set_policing(netdev,
+ ? netdev_get_dev(netdev)->netdev_class->set_policing(netdev,
kbits_rate, kbits_burst)
: EOPNOTSUPP);
}
netdev_get_vlan_vid(const struct netdev *netdev, int *vlan_vid)
{
int error = (netdev_get_dev(netdev)->netdev_class->get_vlan_vid
- ? netdev_get_dev(netdev)->netdev_class->get_vlan_vid(netdev,
+ ? netdev_get_dev(netdev)->netdev_class->get_vlan_vid(netdev,
vlan_vid)
: ENOENT);
if (error) {
}
-/* Returns the class type of 'netdev'.
+/* Returns the class type of 'netdev'.
*
* The caller must not free the returned value. */
const char *
};
/* Next nlmsghdr sequence number.
- *
+ *
* This implementation uses sequence numbers that are unique process-wide, to
* avoid a hypothetical race: send request, close socket, open new socket that
* reuses the old socket's PID value, send request on new socket, receive reply
/* Destroys netlink socket 'sock'. */
void
-nl_sock_destroy(struct nl_sock *sock)
+nl_sock_destroy(struct nl_sock *sock)
{
if (sock) {
close(sock->fd);
* 'wait' is true, then the send will wait until buffer space is ready;
* otherwise, returns EAGAIN if the 'sock' send buffer is full. */
int
-nl_sock_send(struct nl_sock *sock, const struct ofpbuf *msg, bool wait)
+nl_sock_send(struct nl_sock *sock, const struct ofpbuf *msg, bool wait)
{
struct nlmsghdr *nlmsg = nl_msg_nlmsghdr(msg);
int error;
* returns EAGAIN if the 'sock' send buffer is full. */
int
nl_sock_sendv(struct nl_sock *sock, const struct iovec iov[], size_t n_iov,
- bool wait)
+ bool wait)
{
struct msghdr msg;
int error;
* If 'wait' is true, nl_sock_recv waits for a message to be ready; otherwise,
* returns EAGAIN if the 'sock' receive buffer is empty. */
int
-nl_sock_recv(struct nl_sock *sock, struct ofpbuf **bufp, bool wait)
+nl_sock_recv(struct nl_sock *sock, struct ofpbuf **bufp, bool wait)
{
uint8_t tmp;
ssize_t bufsize = 2048;
try_again:
/* Attempt to read the message. We don't know the size of the data
* yet, so we take a guess at 2048. If we're wrong, we keep trying
- * and doubling the buffer size each time.
+ * and doubling the buffer size each time.
*/
nlmsghdr = ofpbuf_put_uninit(buf, bufsize);
iov.iov_base = nlmsghdr;
iov.iov_len = bufsize;
do {
- nbytes = recvmsg(sock->fd, &msg, (wait ? 0 : MSG_DONTWAIT) | MSG_PEEK);
+ nbytes = recvmsg(sock->fd, &msg, (wait ? 0 : MSG_DONTWAIT) | MSG_PEEK);
} while (nbytes < 0 && errno == EINTR);
if (nbytes < 0) {
ofpbuf_delete(buf);
*
* Bare Netlink is an unreliable transport protocol. This function layers
* reliable delivery and reply semantics on top of bare Netlink.
- *
+ *
* In Netlink, sending a request to the kernel is reliable enough, because the
* kernel will tell us if the message cannot be queued (and we will in that
* case put it on the transmit queue and wait until it can be delivered).
- *
+ *
* Receiving the reply is the real problem: if the socket buffer is full when
* the kernel tries to send the reply, the reply will be dropped. However, the
* kernel sets a flag that a reply has been dropped. The next call to recv
*/
int
nl_sock_transact(struct nl_sock *sock,
- const struct ofpbuf *request, struct ofpbuf **replyp)
+ const struct ofpbuf *request, struct ofpbuf **replyp)
{
uint32_t seq = nl_msg_nlmsghdr(request)->nlmsg_seq;
struct nlmsghdr *nlmsghdr;
/* Ensure that we get a reply even if this message doesn't ordinarily call
* for one. */
nl_msg_nlmsghdr(request)->nlmsg_flags |= NLM_F_ACK;
-
+
send:
retval = nl_sock_send(sock, request, true);
if (retval) {
*
* 'msg' must be at least as large as a nlmsghdr. */
struct nlmsghdr *
-nl_msg_nlmsghdr(const struct ofpbuf *msg)
+nl_msg_nlmsghdr(const struct ofpbuf *msg)
{
return ofpbuf_at_assert(msg, 0, NLMSG_HDRLEN);
}
* Returns a null pointer if 'msg' is not large enough to contain an nlmsghdr
* and a genlmsghdr. */
struct genlmsghdr *
-nl_msg_genlmsghdr(const struct ofpbuf *msg)
+nl_msg_genlmsghdr(const struct ofpbuf *msg)
{
return ofpbuf_at(msg, NLMSG_HDRLEN, GENL_HDRLEN);
}
*
* 'msg' must be at least as large as a nlmsghdr. */
bool
-nl_msg_nlmsgerr(const struct ofpbuf *msg, int *errorp)
+nl_msg_nlmsgerr(const struct ofpbuf *msg, int *errorp)
{
if (nl_msg_nlmsghdr(msg)->nlmsg_type == NLMSG_ERROR) {
struct nlmsgerr *err = ofpbuf_at(msg, NLMSG_HDRLEN, sizeof *err);
/* Ensures that 'b' has room for at least 'size' bytes plus netlink padding at
* its tail end, reallocating and copying its data if necessary. */
void
-nl_msg_reserve(struct ofpbuf *msg, size_t size)
+nl_msg_reserve(struct ofpbuf *msg, size_t size)
{
ofpbuf_prealloc_tailroom(msg, NLMSG_ALIGN(size));
}
* message. */
void
nl_msg_put_nlmsghdr(struct ofpbuf *msg,
- size_t expected_payload, uint32_t type, uint32_t flags)
+ size_t expected_payload, uint32_t type, uint32_t flags)
{
struct nlmsghdr *nlmsghdr;
* the tail end of 'msg'. Data in 'msg' is reallocated and copied if
* necessary. */
void
-nl_msg_put(struct ofpbuf *msg, const void *data, size_t size)
+nl_msg_put(struct ofpbuf *msg, const void *data, size_t size)
{
memcpy(nl_msg_put_uninit(msg, size), data, size);
}
* end of 'msg', reallocating and copying its data if necessary. Returns a
* pointer to the first byte of the new data, which is left uninitialized. */
void *
-nl_msg_put_uninit(struct ofpbuf *msg, size_t size)
+nl_msg_put_uninit(struct ofpbuf *msg, size_t size)
{
size_t pad = NLMSG_ALIGN(size) - size;
char *p = ofpbuf_put_uninit(msg, size + pad);
if (pad) {
- memset(p + size, 0, pad);
+ memset(p + size, 0, pad);
}
return p;
}
* 'msg', reallocating and copying its data if necessary. Returns a pointer to
* the first byte of data in the attribute, which is left uninitialized. */
void *
-nl_msg_put_unspec_uninit(struct ofpbuf *msg, uint16_t type, size_t size)
+nl_msg_put_unspec_uninit(struct ofpbuf *msg, uint16_t type, size_t size)
{
size_t total_size = NLA_HDRLEN + size;
struct nlattr* nla = nl_msg_put_uninit(msg, total_size);
* attribute, which is left uninitialized. */
void
nl_msg_put_unspec(struct ofpbuf *msg, uint16_t type,
- const void *data, size_t size)
+ const void *data, size_t size)
{
memcpy(nl_msg_put_unspec_uninit(msg, type, size), data, size);
}
* (Some Netlink protocols use the presence or absence of an attribute as a
* Boolean flag.) */
void
-nl_msg_put_flag(struct ofpbuf *msg, uint16_t type)
+nl_msg_put_flag(struct ofpbuf *msg, uint16_t type)
{
nl_msg_put_unspec(msg, type, NULL, 0);
}
/* Appends a Netlink attribute of the given 'type' and the given 8-bit 'value'
* to 'msg'. */
void
-nl_msg_put_u8(struct ofpbuf *msg, uint16_t type, uint8_t value)
+nl_msg_put_u8(struct ofpbuf *msg, uint16_t type, uint8_t value)
{
nl_msg_put_unspec(msg, type, &value, sizeof value);
}
/* Returns the first byte in the payload of attribute 'nla'. */
const void *
-nl_attr_get(const struct nlattr *nla)
+nl_attr_get(const struct nlattr *nla)
{
assert(nla->nla_len >= NLA_HDRLEN);
return nla + 1;
/* Returns the number of bytes in the payload of attribute 'nla'. */
size_t
-nl_attr_get_size(const struct nlattr *nla)
+nl_attr_get_size(const struct nlattr *nla)
{
assert(nla->nla_len >= NLA_HDRLEN);
return nla->nla_len - NLA_HDRLEN;
/* Asserts that 'nla''s payload is at least 'size' bytes long, and returns the
* first byte of the payload. */
const void *
-nl_attr_get_unspec(const struct nlattr *nla, size_t size)
+nl_attr_get_unspec(const struct nlattr *nla, size_t size)
{
assert(nla->nla_len >= NLA_HDRLEN + size);
return nla + 1;
/* Returns true if 'nla' is nonnull. (Some Netlink protocols use the presence
* or absence of an attribute as a Boolean flag.) */
bool
-nl_attr_get_flag(const struct nlattr *nla)
+nl_attr_get_flag(const struct nlattr *nla)
{
return nla != NULL;
}
*
* Asserts that 'nla''s payload is at least 1 byte long. */
uint8_t
-nl_attr_get_u8(const struct nlattr *nla)
+nl_attr_get_u8(const struct nlattr *nla)
{
return NL_ATTR_GET_AS(nla, uint8_t);
}
*
* Asserts that 'nla''s payload is at least 2 bytes long. */
uint16_t
-nl_attr_get_u16(const struct nlattr *nla)
+nl_attr_get_u16(const struct nlattr *nla)
{
return NL_ATTR_GET_AS(nla, uint16_t);
}
*
* Asserts that 'nla''s payload is at least 4 bytes long. */
uint32_t
-nl_attr_get_u32(const struct nlattr *nla)
+nl_attr_get_u32(const struct nlattr *nla)
{
return NL_ATTR_GET_AS(nla, uint32_t);
}
*
* Asserts that 'nla''s payload is at least 8 bytes long. */
uint64_t
-nl_attr_get_u64(const struct nlattr *nla)
+nl_attr_get_u64(const struct nlattr *nla)
{
return NL_ATTR_GET_AS(nla, uint64_t);
}
*
* Asserts that 'nla''s payload contains a null-terminated string. */
const char *
-nl_attr_get_string(const struct nlattr *nla)
+nl_attr_get_string(const struct nlattr *nla)
{
assert(nla->nla_len > NLA_HDRLEN);
assert(memchr(nl_attr_get(nla), '\0', nla->nla_len - NLA_HDRLEN) != NULL);
\f
/* Miscellaneous. */
-static const struct nl_policy family_policy[CTRL_ATTR_MAX + 1] = {
+static const struct nl_policy family_policy[CTRL_ATTR_MAX + 1] = {
[CTRL_ATTR_FAMILY_ID] = {.type = NL_A_U16},
};
-static int do_lookup_genl_family(const char *name)
+static int do_lookup_genl_family(const char *name)
{
struct nl_sock *sock;
struct ofpbuf request, *reply;
* may use '*number' as the family number. On failure, returns a positive
* errno value and '*number' caches the errno value. */
int
-nl_lookup_genl_family(const char *name, int *number)
+nl_lookup_genl_family(const char *name, int *number)
{
if (*number == 0) {
*number = do_lookup_genl_family(name);
unsigned int bits;
const char *name;
};
- static const struct nlmsg_flag flags[] = {
+ static const struct nlmsg_flag flags[] = {
{ NLM_F_REQUEST, "REQUEST" },
{ NLM_F_MULTI, "MULTI" },
{ NLM_F_ACK, "ACK" },
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
- *
+ *
* http://www.apache.org/licenses/LICENSE-2.0
- *
+ *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
- */
-
+ */
+
/* OpenFlow protocol string to flow parser. */
#ifndef OFP_PARSE_H
struct ofp_match;
struct ofpbuf;
-void parse_ofp_str(char *string, struct ofp_match *match,
- struct ofpbuf *actions, uint8_t *table_idx,
- uint16_t *out_port, uint16_t *priority,
- uint16_t *idle_timeout, uint16_t *hard_timeout,
+void parse_ofp_str(char *string, struct ofp_match *match,
+ struct ofpbuf *actions, uint8_t *table_idx,
+ uint16_t *out_port, uint16_t *priority,
+ uint16_t *idle_timeout, uint16_t *hard_timeout,
uint64_t *cookie);
#endif /* ofp-parse.h */
if (WEXITSTATUS(status))
ovs_error(0, "tcpdump exited with status %d", WEXITSTATUS(status));
} else if (WIFSIGNALED(status)) {
- ovs_error(0, "tcpdump exited with signal %d", WTERMSIG(status));
+ ovs_error(0, "tcpdump exited with signal %d", WTERMSIG(status));
}
return ds_cstr(&ds);
}
}
if (verbosity > 1) {
char *packet = ofp_packet_to_string(op->data, data_len,
- ntohs(op->total_len));
+ ntohs(op->total_len));
ds_put_cstr(string, packet);
free(packet);
}
}
-static void ofp_print_port_name(struct ds *string, uint16_t port)
+static void ofp_print_port_name(struct ds *string, uint16_t port)
{
const char *name;
switch (port) {
}
static int
-ofp_print_action(struct ds *string, const struct ofp_action_header *ah,
- size_t actions_len)
+ofp_print_action(struct ds *string, const struct ofp_action_header *ah,
+ size_t actions_len)
{
uint16_t type;
size_t len;
}
if ((len % 8) != 0) {
- ds_put_format(string,
+ ds_put_format(string,
"***action %"PRIu16" length not a multiple of 8***\n",
type);
return -1;
if (type < ARRAY_SIZE(of_actions)) {
const struct openflow_action *act = &of_actions[type];
if ((len < act->min_size) || (len > act->max_size)) {
- ds_put_format(string,
+ ds_put_format(string,
"***action %"PRIu16" wrong length: %zu***\n", type, len);
return -1;
}
}
-
+
switch (type) {
case OFPAT_OUTPUT: {
struct ofp_action_output *oa = (struct ofp_action_output *)ah;
- uint16_t port = ntohs(oa->port);
+ uint16_t port = ntohs(oa->port);
if (port < OFPP_MAX) {
ds_put_format(string, "output:%"PRIu16, port);
} else {
case OFPAT_SET_DL_SRC: {
struct ofp_action_dl_addr *da = (struct ofp_action_dl_addr *)ah;
- ds_put_format(string, "mod_dl_src:"ETH_ADDR_FMT,
+ ds_put_format(string, "mod_dl_src:"ETH_ADDR_FMT,
ETH_ADDR_ARGS(da->dl_addr));
break;
}
case OFPAT_SET_DL_DST: {
struct ofp_action_dl_addr *da = (struct ofp_action_dl_addr *)ah;
- ds_put_format(string, "mod_dl_dst:"ETH_ADDR_FMT,
+ ds_put_format(string, "mod_dl_dst:"ETH_ADDR_FMT,
ETH_ADDR_ARGS(da->dl_addr));
break;
}
}
case OFPAT_VENDOR: {
- struct ofp_action_vendor_header *avh
+ struct ofp_action_vendor_header *avh
= (struct ofp_action_vendor_header *)ah;
if (len < sizeof *avh) {
ds_put_format(string, "***ofpat_vendor truncated***\n");
return len;
}
-void
+void
ofp_print_actions(struct ds *string, const struct ofp_action_header *action,
- size_t actions_len)
+ size_t actions_len)
{
uint8_t *p = (uint8_t *)action;
int len = 0;
if (len) {
ds_put_cstr(string, ",");
}
- len = ofp_print_action(string, (struct ofp_action_header *)p,
+ len = ofp_print_action(string, (struct ofp_action_header *)p,
actions_len);
if (len < 0) {
return;
/* Pretty-print the OFPT_PACKET_OUT packet of 'len' bytes at 'oh' to 'string'
* at the given 'verbosity' level. */
static void ofp_packet_out(struct ds *string, const void *oh, size_t len,
- int verbosity)
+ int verbosity)
{
const struct ofp_packet_out *opo = oh;
size_t actions_len = ntohs(opo->actions_len);
int n_ports;
int i;
- ds_put_format(string, " ver:0x%x, dpid:%016"PRIx64"\n",
+ ds_put_format(string, " ver:0x%x, dpid:%016"PRIx64"\n",
osf->header.version, ntohll(osf->datapath_id));
ds_put_format(string, "n_tables:%d, n_buffers:%d\n", osf->n_tables,
ntohl(osf->n_buffers));
}
n_ports = (len - sizeof *osf) / sizeof *osf->ports;
- port_list = xmemdup(osf->ports, len - sizeof *osf);
+ port_list = xmemdup(osf->ports, len - sizeof *osf);
qsort(port_list, n_ports, sizeof *port_list, compare_ports);
for (i = 0; i < n_ports; i++) {
ofp_print_phy_port(string, &port_list[i]);
}
static void print_wild(struct ds *string, const char *leader, int is_wild,
- int verbosity, const char *format, ...)
+ int verbosity, const char *format, ...)
__attribute__((format(printf, 5, 6)));
static void print_wild(struct ds *string, const char *leader, int is_wild,
- int verbosity, const char *format, ...)
+ int verbosity, const char *format, ...)
{
if (is_wild && verbosity < 2) {
return;
/* Pretty-print the OFPT_FLOW_MOD packet of 'len' bytes at 'oh' to 'string'
* at the given 'verbosity' level. */
static void
-ofp_print_flow_mod(struct ds *string, const void *oh, size_t len,
+ofp_print_flow_mod(struct ds *string, const void *oh, size_t len,
int verbosity)
{
const struct ofp_flow_mod *ofm = oh;
ds_put_format(string, " cmd:%d ", ntohs(ofm->command));
}
ds_put_format(string, "cookie:0x%"PRIx64" idle:%d hard:%d pri:%d "
- "buf:%#x flags:%"PRIx16" ", ntohll(ofm->cookie),
+ "buf:%#x flags:%"PRIx16" ", ntohll(ofm->cookie),
ntohs(ofm->idle_timeout), ntohs(ofm->hard_timeout),
ofm->match.wildcards ? ntohs(ofm->priority) : (uint16_t)-1,
ntohl(ofm->buffer_id), ntohs(ofm->flags));
/* Pretty-print the OFPT_FLOW_REMOVED packet of 'len' bytes at 'oh' to 'string'
* at the given 'verbosity' level. */
static void
-ofp_print_flow_removed(struct ds *string, const void *oh,
+ofp_print_flow_removed(struct ds *string, const void *oh,
size_t len OVS_UNUSED, int verbosity)
{
const struct ofp_flow_removed *ofr = oh;
ds_put_format(string, "**%"PRIu8"**", ofr->reason);
break;
}
- ds_put_format(string,
+ ds_put_format(string,
" cookie0x%"PRIx64" pri%"PRIu16" secs%"PRIu32" nsecs%"PRIu32
- " idle%"PRIu16" pkts%"PRIu64" bytes%"PRIu64"\n",
+ " idle%"PRIu16" pkts%"PRIu64" bytes%"PRIu64"\n",
ntohll(ofr->cookie),
ofr->match.wildcards ? ntohs(ofr->priority) : (uint16_t)-1,
ntohl(ofr->duration_sec), ntohl(ofr->duration_nsec),
- ntohs(ofr->idle_timeout), ntohll(ofr->packet_count),
+ ntohs(ofr->idle_timeout), ntohll(ofr->packet_count),
ntohll(ofr->byte_count));
}
const struct ofp_port_mod *opm = oh;
ds_put_format(string, "port: %d: addr:"ETH_ADDR_FMT", config: %#x, mask:%#x\n",
- ntohs(opm->port_no), ETH_ADDR_ARGS(opm->hw_addr),
+ ntohs(opm->port_no), ETH_ADDR_ARGS(opm->hw_addr),
ntohl(opm->config), ntohl(opm->mask));
ds_put_format(string, " advertise: ");
if (opm->advertise) {
/* Pretty-print the OFPT_ERROR packet of 'len' bytes at 'oh' to 'string'
* at the given 'verbosity' level. */
static void
-ofp_print_error_msg(struct ds *string, const void *oh, size_t len,
+ofp_print_error_msg(struct ds *string, const void *oh, size_t len,
int verbosity OVS_UNUSED)
{
const struct ofp_error_msg *oem = oh;
{
const struct ofp_desc_stats *ods = body;
- ds_put_format(string, "Manufacturer: %.*s\n",
+ ds_put_format(string, "Manufacturer: %.*s\n",
(int) sizeof ods->mfr_desc, ods->mfr_desc);
ds_put_format(string, "Hardware: %.*s\n",
(int) sizeof ods->hw_desc, ods->hw_desc);
}
ds_put_format(string, " cookie=0x%"PRIx64", ", ntohll(fs->cookie));
- ds_put_format(string, "duration_sec=%"PRIu32"s, ",
+ ds_put_format(string, "duration_sec=%"PRIu32"s, ",
ntohl(fs->duration_sec));
- ds_put_format(string, "duration_nsec=%"PRIu32"ns, ",
+ ds_put_format(string, "duration_nsec=%"PRIu32"ns, ",
ntohl(fs->duration_nsec));
ds_put_format(string, "table_id=%"PRIu8", ", fs->table_id);
- ds_put_format(string, "priority=%"PRIu16", ",
+ ds_put_format(string, "priority=%"PRIu16", ",
fs->match.wildcards ? ntohs(fs->priority) : (uint16_t)-1);
ds_put_format(string, "n_packets=%"PRIu64", ",
ntohll(fs->packet_count));
ds_put_format(string, " flow_count=%"PRIu32, ntohl(asr->flow_count));
}
-static void print_port_stat(struct ds *string, const char *leader,
+static void print_port_stat(struct ds *string, const char *leader,
uint64_t stat, int more)
{
ds_put_cstr(string, leader);
ds_put_format(string, "max=%6"PRIu32", ", ntohl(ts->max_entries));
ds_put_format(string, "active=%"PRIu32"\n", ntohl(ts->active_count));
ds_put_cstr(string, " ");
- ds_put_format(string, "lookup=%"PRIu64", ",
+ ds_put_format(string, "lookup=%"PRIu64", ",
ntohll(ts->lookup_count));
ds_put_format(string, "matched=%"PRIu64"\n",
ntohll(ts->matched_count));
{
OFPST_PORT,
"port",
- { sizeof(struct ofp_port_stats_request),
- sizeof(struct ofp_port_stats_request),
+ { sizeof(struct ofp_port_stats_request),
+ sizeof(struct ofp_port_stats_request),
ofp_port_stats_request },
{ 0, SIZE_MAX, ofp_port_stats_reply },
},
ds_put_format(string, " %zu bytes of payload\n", len - sizeof *hdr);
if (verbosity > 1) {
- ds_put_hex_dump(string, hdr, len - sizeof *hdr, 0, true);
+ ds_put_hex_dump(string, hdr, len - sizeof *hdr, 0, true);
}
}
} else if (!pkt->printer) {
if (len > sizeof *oh) {
ds_put_format(&string, " length=%"PRIu16" (decoder not implemented)\n",
- ntohs(oh->length));
+ ntohs(oh->length));
}
} else {
pkt->printer(&string, oh, len, verbosity);
}
\f
static void
-print_and_free(FILE *stream, char *string)
+print_and_free(FILE *stream, char *string)
{
fputs(string, stream);
free(string);
/* Updates the 'length' field of the OpenFlow message in 'buffer' to
* 'buffer->size'. */
void
-update_openflow_length(struct ofpbuf *buffer)
+update_openflow_length(struct ofpbuf *buffer)
{
struct ofp_header *oh = ofpbuf_at_assert(buffer, 0, sizeof *oh);
- oh->length = htons(buffer->size);
+ oh->length = htons(buffer->size);
}
struct ofpbuf *
}
static int
-check_message_type(uint8_t got_type, uint8_t want_type)
+check_message_type(uint8_t got_type, uint8_t want_type)
{
if (got_type != want_type) {
char *want_type_name = ofp_message_type_to_string(want_type);
/* Frees memory that 'b' points to. */
void
-ofpbuf_uninit(struct ofpbuf *b)
+ofpbuf_uninit(struct ofpbuf *b)
{
if (b) {
free(b->base);
/* Frees memory that 'b' points to, as well as 'b' itself. */
void
-ofpbuf_delete(struct ofpbuf *b)
+ofpbuf_delete(struct ofpbuf *b)
{
if (b) {
ofpbuf_uninit(b);
* reallocating and copying its data if necessary. Its headroom, if any, is
* preserved. */
void
-ofpbuf_prealloc_tailroom(struct ofpbuf *b, size_t size)
+ofpbuf_prealloc_tailroom(struct ofpbuf *b, size_t size)
{
if (size > ofpbuf_tailroom(b)) {
ofpbuf_resize_tailroom__(b, MAX(size, 64));
}
void
-ofpbuf_prealloc_headroom(struct ofpbuf *b, size_t size)
+ofpbuf_prealloc_headroom(struct ofpbuf *b, size_t size)
{
assert(size <= ofpbuf_headroom(b));
}
* copying its data if necessary. Returns a pointer to the first byte of the
* new data, which is left uninitialized. */
void *
-ofpbuf_put_uninit(struct ofpbuf *b, size_t size)
+ofpbuf_put_uninit(struct ofpbuf *b, size_t size)
{
void *p;
ofpbuf_prealloc_tailroom(b, size);
* is reallocated and copied if necessary. Returns a pointer to the first
* byte of the data's location in the ofpbuf. */
void *
-ofpbuf_put(struct ofpbuf *b, const void *p, size_t size)
+ofpbuf_put(struct ofpbuf *b, const void *p, size_t size)
{
void *dst = ofpbuf_put_uninit(b, size);
memcpy(dst, p, size);
/* Reserves 'size' bytes of headroom so that they can be later allocated with
* ofpbuf_push_uninit() without reallocating the ofpbuf. */
void
-ofpbuf_reserve(struct ofpbuf *b, size_t size)
+ofpbuf_reserve(struct ofpbuf *b, size_t size)
{
assert(!b->size);
ofpbuf_prealloc_tailroom(b, size);
}
void *
-ofpbuf_push_uninit(struct ofpbuf *b, size_t size)
+ofpbuf_push_uninit(struct ofpbuf *b, size_t size)
{
ofpbuf_prealloc_headroom(b, size);
b->data = (char*)b->data - size;
}
void *
-ofpbuf_push(struct ofpbuf *b, const void *p, size_t size)
+ofpbuf_push(struct ofpbuf *b, const void *p, size_t size)
{
void *dst = ofpbuf_push_uninit(b, size);
memcpy(dst, p, size);
/* If 'b' contains at least 'offset + size' bytes of data, returns a pointer to
* byte 'offset'. Otherwise, returns a null pointer. */
void *
-ofpbuf_at(const struct ofpbuf *b, size_t offset, size_t size)
+ofpbuf_at(const struct ofpbuf *b, size_t offset, size_t size)
{
return offset + size <= b->size ? (char *) b->data + offset : NULL;
}
/* Returns a pointer to byte 'offset' in 'b', which must contain at least
* 'offset + size' bytes of data. */
void *
-ofpbuf_at_assert(const struct ofpbuf *b, size_t offset, size_t size)
+ofpbuf_at_assert(const struct ofpbuf *b, size_t offset, size_t size)
{
assert(offset + size <= b->size);
return ((char *) b->data) + offset;
/* Returns the byte following the last byte of data in use in 'b'. */
void *
-ofpbuf_tail(const struct ofpbuf *b)
+ofpbuf_tail(const struct ofpbuf *b)
{
return (char *) b->data + b->size;
}
/* Returns the byte following the last byte allocated for use (but not
* necessarily in use) by 'b'. */
void *
-ofpbuf_end(const struct ofpbuf *b)
+ofpbuf_end(const struct ofpbuf *b)
{
return (char *) b->base + b->allocated;
}
/* Clears any data from 'b'. */
void
-ofpbuf_clear(struct ofpbuf *b)
+ofpbuf_clear(struct ofpbuf *b)
{
b->data = b->base;
b->size = 0;
/* Removes 'size' bytes from the head end of 'b', which must contain at least
* 'size' bytes of data. Returns the first byte of data removed. */
void *
-ofpbuf_pull(struct ofpbuf *b, size_t size)
+ofpbuf_pull(struct ofpbuf *b, size_t size)
{
void *data = b->data;
assert(b->size >= size);
* head end of 'b' and returns the first byte removed. Otherwise, returns a
* null pointer without modifying 'b'. */
void *
-ofpbuf_try_pull(struct ofpbuf *b, size_t size)
+ofpbuf_try_pull(struct ofpbuf *b, size_t size)
{
return b->size >= size ? ofpbuf_pull(b, size) : NULL;
}
{
return ea[0] & 1;
}
-static inline bool eth_addr_is_local(const uint8_t ea[6])
+static inline bool eth_addr_is_local(const uint8_t ea[6])
{
/* Local if it is either a locally administered address or a Nicira random
* address. */
return !!(ea[0] & 2)
|| (ea[0] == 0x00 && ea[1] == 0x23 && ea[2] == 0x20 && !!(ea[3] & 0x80));
}
-static inline bool eth_addr_is_zero(const uint8_t ea[6])
+static inline bool eth_addr_is_zero(const uint8_t ea[6])
{
return !(ea[0] | ea[1] | ea[2] | ea[3] | ea[4] | ea[5]);
}
static inline bool eth_addr_equals(const uint8_t a[ETH_ADDR_LEN],
- const uint8_t b[ETH_ADDR_LEN])
+ const uint8_t b[ETH_ADDR_LEN])
{
return !memcmp(a, b, ETH_ADDR_LEN);
}
/* Returns the IP address of the peer, or 0 if the peer's IP address is not
* known. */
uint32_t
-rconn_get_remote_ip(const struct rconn *rconn)
+rconn_get_remote_ip(const struct rconn *rconn)
{
return rconn->remote_ip;
}
/* Returns the transport port of the peer, or 0 if the peer's port is not
* known. */
uint16_t
-rconn_get_remote_port(const struct rconn *rconn)
+rconn_get_remote_port(const struct rconn *rconn)
{
return rconn->remote_port;
}
/* Returns the IP address used to connect to the peer, or 0 if the
- * connection is not an IP-based protocol or if its IP address is not
+ * connection is not an IP-based protocol or if its IP address is not
* known. */
uint32_t
-rconn_get_local_ip(const struct rconn *rconn)
+rconn_get_local_ip(const struct rconn *rconn)
{
return rconn->local_ip;
}
/* Returns the transport port used to connect to the peer, or 0 if the
* connection does not contain a port or if the port is not known. */
uint16_t
-rconn_get_local_port(const struct rconn *rconn)
+rconn_get_local_port(const struct rconn *rconn)
{
return rconn->vconn ? vconn_get_local_port(rconn->vconn) : 0;
}
}
static void
-question_connectivity(struct rconn *rc)
+question_connectivity(struct rconn *rc)
{
time_t now = time_now();
if (now - rc->last_questioned > 60) {
}
static bool
-is_connected_state(enum state state)
+is_connected_state(enum state state)
{
return (state & (S_ACTIVE | S_IDLE)) != 0;
}
/* decoded ethernet header */
typedef struct _SFLSampled_ethernet {
- u_int32_t eth_len; /* The length of the MAC packet excluding
+ u_int32_t eth_len; /* The length of the MAC packet excluding
lower layer encapsulations */
u_int8_t src_mac[8]; /* 6 bytes + 2 pad */
u_int8_t dst_mac[8];
SFLEXTENDED_AS_SET = 1, /* Unordered set of ASs */
SFLEXTENDED_AS_SEQUENCE = 2 /* Ordered sequence of ASs */
};
-
+
typedef struct _SFLExtended_as_path_segment {
u_int32_t type; /* enum SFLExtended_as_path_segment_type */
u_int32_t length; /* number of AS numbers in set/sequence */
} SFLLabelStack;
typedef struct _SFLExtended_mpls {
- SFLAddress nextHop; /* Address of the next hop */
+ SFLAddress nextHop; /* Address of the next hop */
SFLLabelStack in_stack;
SFLLabelStack out_stack;
} SFLExtended_mpls;
/* Extended NAT data
Packet header records report addresses as seen at the sFlowDataSource.
The extended_nat structure reports on translated source and/or destination
- addesses for this packet. If an address was not translated it should
+ addesses for this packet. If an address was not translated it should
be equal to that reported for the header. */
typedef struct _SFLExtended_nat {
u_int32_t mplsFecAddrPrefixLength;
} SFLExtended_mpls_LDP_FEC;
-/* Extended VLAN tunnel information
- Record outer VLAN encapsulations that have
- been stripped. extended_vlantunnel information
- should only be reported if all the following conditions are satisfied:
- 1. The packet has nested vlan tags, AND
- 2. The reporting device is VLAN aware, AND
- 3. One or more VLAN tags have been stripped, either
- because they represent proprietary encapsulations, or
- because switch hardware automatically strips the outer VLAN
- encapsulation.
- Reporting extended_vlantunnel information is not a substitute for
- reporting extended_switch information. extended_switch data must
- always be reported to describe the ingress/egress VLAN information
- for the packet. The extended_vlantunnel information only applies to
- nested VLAN tags, and then only when one or more tags has been
- stripped. */
+/* Extended VLAN tunnel information
+ Record outer VLAN encapsulations that have
+ been stripped. extended_vlantunnel information
+ should only be reported if all the following conditions are satisfied:
+ 1. The packet has nested vlan tags, AND
+ 2. The reporting device is VLAN aware, AND
+ 3. One or more VLAN tags have been stripped, either
+ because they represent proprietary encapsulations, or
+ because switch hardware automatically strips the outer VLAN
+ encapsulation.
+ Reporting extended_vlantunnel information is not a substitute for
+ reporting extended_switch information. extended_switch data must
+ always be reported to describe the ingress/egress VLAN information
+ for the packet. The extended_vlantunnel information only applies to
+ nested VLAN tags, and then only when one or more tags has been
+ stripped. */
typedef SFLLabelStack SFLVlanStack;
-typedef struct _SFLExtended_vlan_tunnel {
- SFLVlanStack stack; /* List of stripped 802.1Q TPID/TCI layers. Each
- TPID,TCI pair is represented as a single 32 bit
- integer. Layers listed from outermost to
- innermost. */
+typedef struct _SFLExtended_vlan_tunnel {
+ SFLVlanStack stack; /* List of stripped 802.1Q TPID/TCI layers. Each
+ TPID,TCI pair is represented as a single 32 bit
+ integer. Layers listed from outermost to
+ innermost. */
} SFLExtended_vlan_tunnel;
enum SFLFlow_type_tag {
SFLFLOW_SAMPLE_EXPANDED = 3, /* enterprise = 0 : format = 3 */
SFLCOUNTERS_SAMPLE_EXPANDED = 4 /* enterprise = 0 : format = 4 */
};
-
+
/* Format of a single flow sample */
typedef struct _SFLFlow_sample {
agent->errorFn = errorFn;
agent->sendFn = sendFn;
-#ifdef SFLOW_DO_SOCKET
+#ifdef SFLOW_DO_SOCKET
if(sendFn == NULL) {
/* open the socket - really need one for v4 and another for v6? */
if((agent->receiverSocket4 = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) == -1)
if(cmp < 0) break; /* insert here */
}
/* either we found the insert point, or reached the end of the list...*/
-
+
{
SFLSampler *newsm = (SFLSampler *)sflAlloc(agent, sizeof(SFLSampler));
sfl_sampler_init(newsm, agent, pdsi);
if(prev) prev->nxt = newsm;
else agent->samplers = newsm;
newsm->nxt = sm;
-
+
/* see if we should go in the ifIndex jumpTable */
if(SFL_DS_CLASS(newsm->dsi) == 0) {
SFLSampler *test = sfl_agent_getSamplerByIfIndex(agent, SFL_DS_INDEX(newsm->dsi));
for(; sm != NULL; sm = sm->nxt)
if(sfl_sampler_get_sFlowFsReceiver(sm) == rcvIdx) sfl_sampler_set_sFlowFsReceiver(sm, 0);
-
+
for(; pl != NULL; pl = pl->nxt)
if(sfl_poller_get_sFlowCpReceiver(pl) == rcvIdx) sfl_poller_set_sFlowCpReceiver(pl, 0);
}
}
}
-
+
/*_________________---------------------------__________________
_________________ sfl_agent_error __________________
-----------------___________________________------------------
#endif
#include "sflow.h"
-
+
/* define SFLOW_SOFTWARE_SAMPLING to 1 if you need to use the
sfl_sampler_takeSample routine and give it every packet */
/* #define SFLOW_SOFTWARE_SAMPLING */
__________________________________
| cls| index | instance |
----------------------------------
-
+
but now is opened up to a 12-byte struct to ensure
that ds_index has a full 32-bit field, and to make
accessing the components simpler. The macros have
/* clear everything */
memset(poller, 0, sizeof(*poller));
-
+
/* restore the linked list ptr */
poller->nxt = nxtPtr;
-
+
/* now copy in the parameters */
poller->agent = agent;
poller->dsi = dsi; /* structure copy */
}
void sfl_receiver_set_sFlowRcvrTimeout(SFLReceiver *receiver, time_t sFlowRcvrTimeout) {
receiver->sFlowRcvrTimeout =sFlowRcvrTimeout;
-}
+}
u_int32_t sfl_receiver_get_sFlowRcvrMaximumDatagramSize(SFLReceiver *receiver) {
return receiver->sFlowRcvrMaximumDatagramSize;
}
_________________ receiver write utilities __________________
-----------------_____________________________------------------
*/
-
+
inline static void put32(SFLReceiver *receiver, u_int32_t val)
{
*receiver->sampleCollector.datap++ = val;
inline static u_int32_t gatewayEncodingLength(SFLExtended_gateway *gw) {
u_int32_t elemSiz = addressEncodingLength(&gw->nexthop);
u_int32_t seg = 0;
- elemSiz += 16; // as, src_as, src_peer_as, dst_as_path_segments
+ elemSiz += 16; // as, src_as, src_peer_as, dst_as_path_segments
for(; seg < gw->dst_as_path_segments; seg++) {
- elemSiz += 8; // type, length
+ elemSiz += 8; // type, length
elemSiz += 4 * gw->dst_as_path[seg].length; // set/seq bytes
}
elemSiz += 4; // communities_length
// it over the limit, then we should send it now before going on.
if((receiver->sampleCollector.pktlen + packedSize) >= receiver->sFlowRcvrMaximumDatagramSize)
sendSample(receiver);
-
+
receiver->sampleCollector.numSamples++;
#ifdef SFL_USE_32BIT_INDEX
{
SFLFlow_sample_element *elem = fs->elements;
for(; elem != NULL; elem = elem->nxt) {
-
+
putNet32(receiver, elem->tag);
putNet32(receiver, elem->length); // length cached in computeFlowSampleSize()
-
+
switch(elem->tag) {
case SFLFLOW_HEADER:
putNet32(receiver, elem->flowType.header.header_protocol);
// if the sample pkt is full enough so that this sample might put
// it over the limit, then we should send it now.
if((packedSize = computeCountersSampleSize(receiver, cs)) == -1) return -1;
-
+
// check in case this one sample alone is too big for the datagram
// in fact - if it is even half as big then we should ditch it. Very
// important to avoid overruning the packet buffer.
sflError(receiver, "counters sample too big for datagram");
return -1;
}
-
+
if((receiver->sampleCollector.pktlen + packedSize) >= receiver->sFlowRcvrMaximumDatagramSize)
sendSample(receiver);
-
+
receiver->sampleCollector.numSamples++;
-
+
#ifdef SFL_USE_32BIT_INDEX
putNet32(receiver, SFLCOUNTERS_SAMPLE_EXPANDED);
#else
#endif
putNet32(receiver, cs->num_elements);
-
+
{
SFLCounters_sample_element *elem = cs->elements;
for(; elem != NULL; elem = elem->nxt) {
-
+
putNet32(receiver, elem->tag);
putNet32(receiver, elem->length); // length cached in computeCountersSampleSize()
-
+
switch(elem->tag) {
case SFLCOUNTERS_GENERIC:
putGenericCounters(receiver, &(elem->counterBlock.generic));
*/
static void sendSample(SFLReceiver *receiver)
-{
+{
/* construct and send out the sample, then reset for the next one... */
/* first fill in the header with the latest values */
/* version, agent_address and sub_agent_id were pre-set. */
if(receiver->agent->sendFn) (*receiver->agent->sendFn)(receiver->agent->magic,
receiver->agent,
receiver,
- (u_char *)receiver->sampleCollector.data,
+ (u_char *)receiver->sampleCollector.data,
receiver->sampleCollector.pktlen);
else {
#ifdef SFLOW_DO_SOCKET
and to Andy Kitchingman for pointing out that it applies to the hash_nxt ptr too) */
SFLSampler *nxtPtr = sampler->nxt;
SFLSampler *hashPtr = sampler->hash_nxt;
-
+
/* clear everything */
memset(sampler, 0, sizeof(*sampler));
-
+
/* restore the linked list and hash-table ptr */
sampler->nxt = nxtPtr;
sampler->hash_nxt = hashPtr;
-
+
/* now copy in the parameters */
sampler->agent = agent;
sampler->dsi = dsi;
-
+
/* set defaults */
sampler->sFlowFsMaximumHeaderSize = SFL_DEFAULT_HEADER_SIZE;
sampler->sFlowFsPacketSamplingRate = SFL_DEFAULT_SAMPLING_RATE;
{
if(mean == 0 || mean == 1) return 1;
return ((random() % ((2 * mean) - 1)) + 1);
-}
+}
/*_________________---------------------------__________________
_________________ sfl_sampler_takeSample __________________
* address, into a numeric IP address in '*addr'. Returns 0 if successful,
* otherwise a positive errno value. */
int
-lookup_ip(const char *host_name, struct in_addr *addr)
+lookup_ip(const char *host_name, struct in_addr *addr)
{
if (!inet_aton(host_name, addr)) {
struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
/* Returns the error condition associated with socket 'fd' and resets the
* socket's error status. */
int
-get_socket_error(int fd)
+get_socket_error(int fd)
{
int error;
socklen_t len = sizeof(error);
}
int
-check_connection_completion(int fd)
+check_connection_completion(int fd)
{
struct pollfd pfd;
int retval;
/* Returns true if SSL is at least partially configured. */
bool
-stream_ssl_is_configured(void)
+stream_ssl_is_configured(void)
{
return private_key.file_name || certificate.file_name || ca_cert.file_name;
}
/* Define the long options for SSL support.
*
- * Note that the definition includes a final comma, and therefore a comma
- * must not be supplied when using the definition. This is done so that
+ * Note that the definition includes a final comma, and therefore a comma
+ * must not be supplied when using the definition. This is done so that
* compilation succeeds whether or not HAVE_OPENSSL is defined. */
#define STREAM_SSL_LONG_OPTIONS \
{"private-key", required_argument, 0, 'p'}, \
stream_ssl_set_ca_cert_file(optarg, false); \
break;
#else /* !HAVE_OPENSSL */
-static inline bool stream_ssl_is_configured(void)
+static inline bool stream_ssl_is_configured(void)
{
return false;
}
}
void
-svec_clear(struct svec *svec)
+svec_clear(struct svec *svec)
{
size_t i;
#include <stdbool.h>
#include <stddef.h>
-
+
#ifdef __cplusplus
extern "C" {
#endif
? (NAME) = (SVEC)->names[INDEX], 1 \
: 0); \
(INDEX)++)
-
+
#ifdef __cplusplus
}
#endif
rusage.ru_nivcsw - last_rusage->ru_nivcsw);
}
- /* Care should be taken in the value chosen for logging. Depending
- * on the configuration, syslog can write changes synchronously,
- * which can cause the coverage messages to take longer to log
+ /* Care should be taken in the value chosen for logging. Depending
+ * on the configuration, syslog can write changes synchronously,
+ * which can cause the coverage messages to take longer to log
* than the processing delay that triggered it. */
coverage_log(VLL_INFO, true);
}
#ifndef UNIXCTL_H
#define UNIXCTL_H 1
-
+
#ifdef __cplusplus
extern "C" {
#endif
unixctl_cb_func *cb, void *aux);
void unixctl_command_reply(struct unixctl_conn *, int code,
const char *body);
-
+
#ifdef __cplusplus
}
#endif
const char *program_name;
void
-out_of_memory(void)
+out_of_memory(void)
{
ovs_fatal(0, "virtual memory exhausted");
}
void *
-xcalloc(size_t count, size_t size)
+xcalloc(size_t count, size_t size)
{
void *p = count && size ? calloc(count, size) : malloc(1);
COVERAGE_INC(util_xalloc);
}
void *
-xmalloc(size_t size)
+xmalloc(size_t size)
{
void *p = malloc(size ? size : 1);
COVERAGE_INC(util_xalloc);
}
void *
-xrealloc(void *p, size_t size)
+xrealloc(void *p, size_t size)
{
p = realloc(p, size ? size : 1);
COVERAGE_INC(util_xalloc);
}
char *
-xstrdup(const char *s)
+xstrdup(const char *s)
{
return xmemdup0(s, strlen(s));
}
/* Print the version information for the program. */
void
-ovs_print_version(char *date, char *time,
+ovs_print_version(char *date, char *time,
uint8_t min_ofp, uint8_t max_ofp)
{
printf("%s (Open vSwitch) "VERSION BUILDNR"\n", program_name);
/* Pass a value to this function if it is marked with
- * __attribute__((warn_unused_result)) and you genuinely want to ignore
- * its return value. (Note that every scalar type can be implicitly
+ * __attribute__((warn_unused_result)) and you genuinely want to ignore
+ * its return value. (Note that every scalar type can be implicitly
* converted to bool.) */
void ignore(bool x OVS_UNUSED) { }
void set_program_name(const char *);
-void ovs_print_version(char *date, char *time,
+void ovs_print_version(char *date, char *time,
uint8_t min_ofp, uint8_t max_ofp);
#define OVS_PRINT_VERSION(min_ofp, max_ofp) \
ovs_print_version(__DATE__, __TIME__, (min_ofp), (max_ofp))
/* Really this should be implemented via callbacks into the vconn
* providers, but that seems too heavy-weight to bother with at the
* moment. */
-
+
printf("\n");
if (active) {
printf("Active OpenFlow connection methods:\n");
/* Returns the IP address of the peer, or 0 if the peer is not connected over
* an IP-based protocol or if its IP address is not yet known. */
uint32_t
-vconn_get_remote_ip(const struct vconn *vconn)
+vconn_get_remote_ip(const struct vconn *vconn)
{
return vconn->remote_ip;
}
-/* Returns the transport port of the peer, or 0 if the connection does not
+/* Returns the transport port of the peer, or 0 if the connection does not
* contain a port or if the port is not yet known. */
uint16_t
-vconn_get_remote_port(const struct vconn *vconn)
+vconn_get_remote_port(const struct vconn *vconn)
{
return vconn->remote_port;
}
-/* Returns the IP address used to connect to the peer, or 0 if the
- * connection is not an IP-based protocol or if its IP address is not
+/* Returns the IP address used to connect to the peer, or 0 if the
+ * connection is not an IP-based protocol or if its IP address is not
* yet known. */
uint32_t
-vconn_get_local_ip(const struct vconn *vconn)
+vconn_get_local_ip(const struct vconn *vconn)
{
return vconn->local_ip;
}
-/* Returns the transport port used to connect to the peer, or 0 if the
+/* Returns the transport port used to connect to the peer, or 0 if the
* connection does not contain a port or if the port is not yet known. */
uint16_t
-vconn_get_local_port(const struct vconn *vconn)
+vconn_get_local_port(const struct vconn *vconn)
{
return vconn->local_port;
}
static void
-vcs_connecting(struct vconn *vconn)
+vcs_connecting(struct vconn *vconn)
{
int retval = (vconn->class->connect)(vconn);
assert(retval != EINPROGRESS);
vconn->remote_port = port;
}
-void
+void
vconn_set_local_ip(struct vconn *vconn, uint32_t ip)
{
vconn->local_ip = ip;
}
-void
+void
vconn_set_local_port(struct vconn *vconn, uint16_t port)
{
vconn->local_port = port;
/* Searches the 'n_names' in 'names'. Returns the index of a match for
* 'target', or 'n_names' if no name matches. */
static size_t
-search_name_array(const char *target, const char **names, size_t n_names)
+search_name_array(const char *target, const char **names, size_t n_names)
{
size_t i;
/* Returns the logging level with the given 'name', or VLL_N_LEVELS if 'name'
* is not the name of a logging level. */
enum vlog_level
-vlog_get_level_val(const char *name)
+vlog_get_level_val(const char *name)
{
return search_name_array(name, level_names, ARRAY_SIZE(level_names));
}
/* Returns the name for logging facility 'facility'. */
const char *
-vlog_get_facility_name(enum vlog_facility facility)
+vlog_get_facility_name(enum vlog_facility facility)
{
assert(facility < VLF_N_FACILITIES);
return facilities[facility].name;
/* Returns the logging facility named 'name', or VLF_N_FACILITIES if 'name' is
* not the name of a logging facility. */
enum vlog_facility
-vlog_get_facility_val(const char *name)
+vlog_get_facility_val(const char *name)
{
size_t i;
/* Returns the current logging level for the given 'module' and 'facility'. */
enum vlog_level
-vlog_get_level(const struct vlog_module *module, enum vlog_facility facility)
+vlog_get_level(const struct vlog_module *module, enum vlog_facility facility)
{
assert(facility < VLF_N_FACILITIES);
return module->levels[facility];
* across all modules or facilities, respectively. */
void
vlog_set_levels(struct vlog_module *module, enum vlog_facility facility,
- enum vlog_level level)
+ enum vlog_level level)
{
assert(facility < VLF_N_FACILITIES || facility == VLF_ANY_FACILITY);
if (facility == VLF_ANY_FACILITY) {
}
static void
-do_set_pattern(enum vlog_facility facility, const char *pattern)
+do_set_pattern(enum vlog_facility facility, const char *pattern)
{
struct facility *f = &facilities[facility];
if (!f->default_pattern) {
/* Initializes the logging subsystem and registers its unixctl server
* commands. */
void
-vlog_init(void)
+vlog_init(void)
{
time_t now;
/* Closes the logging subsystem. */
void
-vlog_exit(void)
+vlog_exit(void)
{
if (vlog_inited) {
closelog();
}
void
-vlog_usage(void)
+vlog_usage(void)
{
printf("\nLogging options:\n"
" -v, --verbose=MODULE[:FACILITY[:LEVEL]] set logging levels\n"
#include <stdbool.h>
#include <time.h>
#include "util.h"
-
+
#ifdef __cplusplus
extern "C" {
#endif
if (d->dhcp) {
status_reply_put(sr, "state=%s", dhclient_get_state(d->dhcp));
status_reply_put(sr, "state-elapsed=%u",
- dhclient_get_state_elapsed(d->dhcp));
+ dhclient_get_state_elapsed(d->dhcp));
if (dhclient_is_bound(d->dhcp)) {
uint32_t ip = dhclient_get_ip(d->dhcp);
uint32_t netmask = dhclient_get_netmask(d->dhcp);
discovery_question_connectivity(struct discovery *d)
{
if (d->dhcp) {
- dhclient_force_renew(d->dhcp, 15);
+ dhclient_force_renew(d->dhcp, 15);
}
}
discovery_wait(struct discovery *d)
{
if (d->dhcp) {
- dhclient_wait(d->dhcp);
+ dhclient_wait(d->dhcp);
}
}
VLOG_DEFINE_THIS_MODULE(in_band)
/* In-band control allows a single network to be used for OpenFlow
- * traffic and other data traffic. Refer to ovs-vswitchd.conf(5) and
+ * traffic and other data traffic. Refer to ovs-vswitchd.conf(5) and
* secchan(8) for a description of configuring in-band control.
*
* This comment is an attempt to describe how in-band control works at a
* In-band also sets up the following rules for each unique next-hop MAC
* address for the remotes' IPs (the "next hop" is either the remote
* itself, if it is on a local subnet, or the gateway to reach the remote):
- *
+ *
* (d) ARP replies to the next hop's MAC address.
* (e) ARP requests from the next hop's MAC address.
*
* The goal of these rules is to be as narrow as possible to allow a
* switch to join a network and be able to communicate with the
* remotes. As mentioned earlier, these rules have higher priority
- * than the controller's rules, so if they are too broad, they may
+ * than the controller's rules, so if they are too broad, they may
* prevent the controller from implementing its policy. As such,
* in-band actively monitors some aspects of flow and packet processing
* so that the rules can be made more precise.
* match entries, so in-band control is able to be very precise about
* the flows it prevents. Flows that miss in the datapath are sent to
* userspace to be processed, so preventing these flows from being
- * cached in the "fast path" does not affect correctness. The only type
- * of flow that is currently prevented is one that would prevent DHCP
- * replies from being seen by the local port. For example, a rule that
- * forwarded all DHCP traffic to the controller would not be allowed,
+ * cached in the "fast path" does not affect correctness. The only type
+ * of flow that is currently prevented is one that would prevent DHCP
+ * replies from being seen by the local port. For example, a rule that
+ * forwarded all DHCP traffic to the controller would not be allowed,
* but one that forwarded to all ports (including the local port) would.
*
* As mentioned earlier, packets that miss in the datapath are sent to
* the userspace for processing. The userspace has its own flow table,
- * the "classifier", so in-band checks whether any special processing
- * is needed before the classifier is consulted. If a packet is a DHCP
- * response to a request from the local port, the packet is forwarded to
- * the local port, regardless of the flow table. Note that this requires
- * L7 processing of DHCP replies to determine whether the 'chaddr' field
+ * the "classifier", so in-band checks whether any special processing
+ * is needed before the classifier is consulted. If a packet is a DHCP
+ * response to a request from the local port, the packet is forwarded to
+ * the local port, regardless of the flow table. Note that this requires
+ * L7 processing of DHCP replies to determine whether the 'chaddr' field
* matches the MAC address of the local port.
*
* It is interesting to note that for an L3-based in-band control
- * mechanism, the majority of rules are devoted to ARP traffic. At first
- * glance, some of these rules appear redundant. However, each serves an
- * important role. First, in order to determine the MAC address of the
- * remote side (controller or gateway) for other ARP rules, we must allow
- * ARP traffic for our local port with rules (b) and (c). If we are
- * between a switch and its connection to the remote, we have to
- * allow the other switch's ARP traffic to through. This is done with
+ * mechanism, the majority of rules are devoted to ARP traffic. At first
+ * glance, some of these rules appear redundant. However, each serves an
+ * important role. First, in order to determine the MAC address of the
+ * remote side (controller or gateway) for other ARP rules, we must allow
+ * ARP traffic for our local port with rules (b) and (c). If we are
+ * between a switch and its connection to the remote, we have to
+ * allow the other switch's ARP traffic to through. This is done with
* rules (d) and (e), since we do not know the addresses of the other
- * switches a priori, but do know the remote's or gateway's. Finally,
- * if the remote is running in a local guest VM that is not reached
- * through the local port, the switch that is connected to the VM must
- * allow ARP traffic based on the remote's IP address, since it will
- * not know the MAC address of the local port that is sending the traffic
+ * switches a priori, but do know the remote's or gateway's. Finally,
+ * if the remote is running in a local guest VM that is not reached
+ * through the local port, the switch that is connected to the VM must
+ * allow ARP traffic based on the remote's IP address, since it will
+ * not know the MAC address of the local port that is sending the traffic
* or the MAC address of the remote in the guest VM.
*
* With a few notable exceptions below, in-band should work in most
* network setups. The following are considered "supported' in the
- * current implementation:
+ * current implementation:
*
* - Locally Connected. The switch and remote are on the same
* subnet. This uses rules (a), (b), (c), (h), and (i).
* "Between Switch and Remote" configuration described earlier.
*
* - Remote on Local VM. The remote is a guest VM on the
- * system running in-band control. This uses rules (a), (b), (c),
+ * system running in-band control. This uses rules (a), (b), (c),
* (h), and (i).
*
* - Remote on Local VM with Different Networks. The remote
* IP address has not been configured for that port on the switch.
* As such, the switch will use eth0 to connect to the remote,
* and eth1's rules about the local port will not work. In the
- * example, the switch attached to eth0 would use rules (a), (b),
- * (c), (h), and (i) on eth0. The switch attached to eth1 would use
+ * example, the switch attached to eth0 would use rules (a), (b),
+ * (c), (h), and (i) on eth0. The switch attached to eth1 would use
* rules (f), (g), (h), and (i).
*
* The following are explicitly *not* supported by in-band control:
*
- * - Specify Remote by Name. Currently, the remote must be
+ * - Specify Remote by Name. Currently, the remote must be
* identified by IP address. A naive approach would be to permit
* all DNS traffic. Unfortunately, this would prevent the
* controller from defining any policy over DNS. Since switches
- * that are located behind us need to connect to the remote,
+ * that are located behind us need to connect to the remote,
* in-band cannot simply add a rule that allows DNS traffic from
* the local port. The "correct" way to support this is to parse
* DNS requests to allow all traffic related to a request for the
* the time-being.
*
* - Differing Remotes for Switches. All switches must know
- * the L3 addresses for all the remotes that other switches
+ * the L3 addresses for all the remotes that other switches
* may use, since rules need to be set up to allow traffic related
* to those remotes through. See rules (f), (g), (h), and (i).
*
- * - Differing Routes for Switches. In order for the switch to
- * allow other switches to connect to a remote through a
+ * - Differing Routes for Switches. In order for the switch to
+ * allow other switches to connect to a remote through a
* gateway, it allows the gateway's traffic through with rules (d)
* and (e). If the routes to the remote differ for the two
- * switches, we will not know the MAC address of the alternate
+ * switches, we will not know the MAC address of the alternate
* gateway.
*/
}
/* Returns true if 'packet' should be sent to the local port regardless
- * of the flow table. */
+ * of the flow table. */
bool
-in_band_msg_in_hook(struct in_band *in_band, const flow_t *flow,
+in_band_msg_in_hook(struct in_band *in_band, const flow_t *flow,
const struct ofpbuf *packet)
{
if (!in_band) {
return false;
}
-/* Returns true if the rule that would match 'flow' with 'actions' is
+/* Returns true if the rule that would match 'flow' with 'actions' is
* allowed to be set up in the datapath. */
bool
in_band_rule_check(struct in_band *in_band, const flow_t *flow,
* by the local port. */
if (flow->dl_type == htons(ETH_TYPE_IP)
&& flow->nw_proto == IP_TYPE_UDP
- && flow->tp_src == htons(DHCP_SERVER_PORT)
+ && flow->tp_src == htons(DHCP_SERVER_PORT)
&& flow->tp_dst == htons(DHCP_CLIENT_PORT)) {
int i;
for (i=0; i<actions->n_actions; i++) {
- if (actions->actions[i].output.type == ODPAT_OUTPUT
+ if (actions->actions[i].output.type == ODPAT_OUTPUT
&& actions->actions[i].output.port == ODPP_LOCAL) {
return true;
- }
+ }
}
return false;
}
void in_band_run(struct in_band *);
void in_band_wait(struct in_band *);
-bool in_band_msg_in_hook(struct in_band *, const flow_t *,
+bool in_band_msg_in_hook(struct in_band *, const flow_t *,
const struct ofpbuf *packet);
bool in_band_rule_check(struct in_band *, const flow_t *,
const struct odp_actions *);
uint8_t engine_id; /* Value of engine_id to use. */
long long int boot_time; /* Time when netflow_create() was called. */
struct collectors *collectors; /* NetFlow collectors. */
- bool add_id_to_iface; /* Put the 7 least signficiant bits of
- * 'engine_id' into the most signficant
+ bool add_id_to_iface; /* Put the 7 least signficiant bits of
+ * 'engine_id' into the most signficant
* bits of the interface fields. */
uint32_t netflow_cnt; /* Flow sequence number for NetFlow. */
struct ofpbuf packet; /* NetFlow packet being accumulated. */
struct rule {
struct cls_rule cr;
- uint64_t flow_cookie; /* Controller-issued identifier.
+ uint64_t flow_cookie; /* Controller-issued identifier.
(Kept in network-byte order.) */
uint16_t idle_timeout; /* In seconds from time of last use. */
uint16_t hard_timeout; /* In seconds from time of creation. */
{
struct rule *rule;
rule = rule_create(p, NULL, actions, n_actions,
- idle_timeout >= 0 ? idle_timeout : 5 /* XXX */,
+ idle_timeout >= 0 ? idle_timeout : 5 /* XXX */,
0, 0, false);
cls_rule_from_flow(flow, wildcards, priority, &rule->cr);
rule_insert(p, rule, NULL, 0);
}
static void
-append_port_stat(struct ofport *port, uint16_t port_no, struct ofconn *ofconn,
+append_port_stat(struct ofport *port, uint16_t port_no, struct ofconn *ofconn,
struct ofpbuf **msgp)
{
struct netdev_stats stats;
struct ofp_port_stats *ops;
- /* Intentionally ignore return value, since errors will set
- * 'stats' to all-1s, which is correct for OpenFlow, and
+ /* Intentionally ignore return value, since errors will set
+ * 'stats' to all-1s, which is correct for OpenFlow, and
* netdev_get_stats() will log errors. */
netdev_get_stats(port->netdev, &stats);
msg = start_stats_reply(osr, sizeof *ops * 16);
if (psr->port_no != htons(OFPP_NONE)) {
- port = port_array_get(&p->ports,
+ port = port_array_get(&p->ports,
ofp_port_to_odp_port(ntohs(psr->port_no)));
if (port) {
append_port_stat(port, ntohs(psr->port_no), ofconn, &msg);
ds_put_cstr(results, "\n");
}
-/* Adds a pretty-printed description of all flows to 'results', including
+/* Adds a pretty-printed description of all flows to 'results', including
* those marked hidden by secchan (e.g., by in-band control). */
void
ofproto_get_all_flows(struct ofproto *p, struct ds *results)
status_reply_put(sr, "name=%s", rconn_get_target(rconn));
if (remote_ip) {
status_reply_put(sr, "remote-ip="IP_FMT, IP_ARGS(&remote_ip));
- status_reply_put(sr, "remote-port=%d",
+ status_reply_put(sr, "remote-port=%d",
ntohs(rconn_get_remote_port(rconn)));
status_reply_put(sr, "local-ip="IP_FMT, IP_ARGS(&local_ip));
- status_reply_put(sr, "local-port=%d",
+ status_reply_put(sr, "local-port=%d",
ntohs(rconn_get_local_port(rconn)));
}
status_reply_put(sr, "state=%s", rconn_get_state(rconn));
* elements in order into 'list'. */
static void
make_list(struct list *list, struct element elements[],
- int values[], size_t n)
+ int values[], size_t n)
{
size_t i;
-
+
list_init(list);
for (i = 0; i < n; i++) {
elements[i].value = i;
/* Verifies that 'list' contains exactly the 'n' values in 'values', in the
* specified order. */
static void
-check_list(struct list *list, const int values[], size_t n)
+check_list(struct list *list, const int values[], size_t n)
{
struct element *e;
size_t i;
-
+
i = 0;
LIST_FOR_EACH (e, struct element, node, list) {
assert(i < n);
#if 0
/* Prints the values in 'list', plus 'name' as a title. */
static void
-print_list(const char *name, struct list *list)
+print_list(const char *name, struct list *list)
{
struct element *e;
-
+
printf("%s:", name);
LIST_FOR_EACH (e, struct element, node, list) {
printf(" %d", e->value);
/* Tests basic list construction. */
static void
-test_list_construction(void)
+test_list_construction(void)
{
enum { MAX_ELEMS = 100 };
size_t n;
struct element elements[MAX_ELEMS];
int values[MAX_ELEMS];
struct list list;
-
+
make_list(&list, elements, values, n);
check_list(&list, values, n);
}
/* Tests that LIST_FOR_EACH_SAFE properly allows for deletion of the current
* element of a list. */
static void
-test_list_for_each_safe(void)
+test_list_for_each_safe(void)
{
enum { MAX_ELEMS = 10 };
size_t n;
struct element *e, *next;
size_t values_idx, n_remaining;
int i;
-
+
make_list(&list, elements, values, n);
i = 0;
}
static void
-run_test(void (*function)(void))
+run_test(void (*function)(void))
{
function();
printf(".");
}
int
-main(void)
+main(void)
{
run_test(test_list_construction);
run_test(test_list_for_each_safe);
MUST_SUCCEED(TYPE_MINIMUM(type) == minimum);
int
-main (void)
+main (void)
{
TEST_TYPE(char, CHAR_MIN, CHAR_MAX, (CHAR_MIN < 0));
/* -q, --queue: OpenFlow queue to use, or the default queue if UINT32_MAX. */
static uint32_t queue_id = UINT32_MAX;
-/* --with-flows: File with flows to send to switch, or null to not load
+/* --with-flows: File with flows to send to switch, or null to not load
* any default flows. */
static FILE *flow_file = NULL;
int n_ports;
int port_idx;
unsigned int port_no;
-
+
/* Check if the argument is a port index. Otherwise, treat it as
* the port name. */
/* Parse and send. */
ofm = make_openflow(sizeof *ofm, OFPT_FLOW_MOD, &buffer);
- parse_ofp_str(argc > 2 ? argv[2] : "", &ofm->match, NULL, NULL,
+ parse_ofp_str(argc > 2 ? argv[2] : "", &ofm->match, NULL, NULL,
&out_port, &priority, NULL, NULL, NULL);
if (strict) {
ofm->command = htons(OFPFC_DELETE_STRICT);
int n_ports;
int port_idx;
int port_no;
-
+
/* Check if the argument is a port index. Otherwise, treat it as
* the port name. */
port_no = -1;
}
- /* Send a "Features Request" to get the information we need in order
+ /* Send a "Features Request" to get the information we need in order
* to modify the port. */
make_openflow(sizeof(struct ofp_header), OFPT_FEATURES_REQUEST, &request);
open_vconn(argv[1], &vconn);
}
} else {
/* Check argument as an interface name */
- if (!strncmp((char *)osf->ports[port_idx].name, argv[2],
+ if (!strncmp((char *)osf->ports[port_idx].name, argv[2],
sizeof osf->ports[0].name)) {
break;
}
if (!strncasecmp(argv[3], MOD_PORT_CMD_UP, sizeof MOD_PORT_CMD_UP)) {
opm->mask |= htonl(OFPPC_PORT_DOWN);
- } else if (!strncasecmp(argv[3], MOD_PORT_CMD_DOWN,
+ } else if (!strncasecmp(argv[3], MOD_PORT_CMD_DOWN,
sizeof MOD_PORT_CMD_DOWN)) {
opm->mask |= htonl(OFPPC_PORT_DOWN);
opm->config |= htonl(OFPPC_PORT_DOWN);
- } else if (!strncasecmp(argv[3], MOD_PORT_CMD_FLOOD,
+ } else if (!strncasecmp(argv[3], MOD_PORT_CMD_FLOOD,
sizeof MOD_PORT_CMD_FLOOD)) {
opm->mask |= htonl(OFPPC_NO_FLOOD);
- } else if (!strncasecmp(argv[3], MOD_PORT_CMD_NOFLOOD,
+ } else if (!strncasecmp(argv[3], MOD_PORT_CMD_NOFLOOD,
sizeof MOD_PORT_CMD_NOFLOOD)) {
opm->mask |= htonl(OFPPC_NO_FLOOD);
opm->config |= htonl(OFPPC_NO_FLOOD);
static void mirror_reconfigure_one(struct mirror *, struct ovsrec_mirror *);
static bool vlan_is_mirrored(const struct mirror *, int vlan);
-static struct iface *iface_create(struct port *port,
+static struct iface *iface_create(struct port *port,
const struct ovsrec_interface *if_cfg);
static void iface_destroy(struct iface *);
static struct iface *iface_lookup(const struct bridge *, const char *name);
opts.collectors.n = nf_cfg->n_targets;
opts.collectors.names = nf_cfg->targets;
if (ofproto_set_netflow(br->ofproto, &opts)) {
- VLOG_ERR("bridge %s: problem setting netflow collectors",
+ VLOG_ERR("bridge %s: problem setting netflow collectors",
br->name);
}
} else {
{
struct bridge *br;
struct ds results;
-
+
br = bridge_lookup(args);
if (!br) {
unixctl_command_reply(conn, 501, "Unknown bridge");
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
VLOG_WARN_RL(&rl, "bridge %s: received packet on unknown "
- "interface %"PRIu16, br->name, flow->in_port);
+ "interface %"PRIu16, br->name, flow->in_port);
}
*in_portp = NULL;
/* We need to make the same determination as the Linux bonding
* code to determine whether a slave should be consider "up".
- * The Linux function bond_miimon_inspect() supports four
+ * The Linux function bond_miimon_inspect() supports four
* BOND_LINK_* states:
- *
+ *
* - BOND_LINK_UP: carrier detected, updelay has passed.
* - BOND_LINK_FAIL: carrier lost, downdelay in progress.
* - BOND_LINK_DOWN: carrier lost, downdelay has passed.
* - BOND_LINK_BACK: carrier detected, updelay in progress.
*
- * The function bond_info_show_slave() only considers BOND_LINK_UP
+ * The function bond_info_show_slave() only considers BOND_LINK_UP
* to be "up" and anything else to be "down".
*/
slave->up = iface->enabled && iface->delay_expires == LLONG_MAX;
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 60);
-/* Maximum number of milliseconds to wait before pruning port entries that
+/* Maximum number of milliseconds to wait before pruning port entries that
* no longer exist. If set to zero, ports are never pruned. */
static int prune_timeout = 5000;
}
static void
-do_get_bridge_parts(const struct ovsrec_bridge *br, struct svec *parts,
+do_get_bridge_parts(const struct ovsrec_bridge *br, struct svec *parts,
int vlan, bool break_down_bonds)
{
struct svec ports;
* reported. If 'vlan' > 0, only interfaces with implicit VLAN 'vlan' are
* reported. */
static void
-get_bridge_ifaces(const struct ovsrec_bridge *br, struct svec *ifaces,
+get_bridge_ifaces(const struct ovsrec_bridge *br, struct svec *ifaces,
int vlan)
{
do_get_bridge_parts(br, ifaces, vlan, true);
* only trunk ports or ports with implicit VLAN 0 are reported. If 'vlan' > 0,
* only port with implicit VLAN 'vlan' are reported. */
static void
-get_bridge_ports(const struct ovsrec_bridge *br, struct svec *ports,
+get_bridge_ports(const struct ovsrec_bridge *br, struct svec *ports,
int vlan)
{
do_get_bridge_parts(br, ports, vlan, false);
struct ovsrec_bridge *bridge)
{
struct ovsrec_bridge **bridges;
- size_t i;
+ size_t i;
bridges = xmalloc(sizeof *ovs->bridges * (ovs->n_bridges + 1));
for (i = 0; i < ovs->n_bridges; i++) {
bridges[ovs->n_bridges] = bridge;
ovsrec_open_vswitch_set_bridges(ovs, bridges, ovs->n_bridges + 1);
free(bridges);
-}
+}
static struct json *
where_uuid_equals(const struct uuid *uuid)
port = ovsrec_port_insert(txn_from_openvswitch(ovs));
ovsrec_port_set_name(port, br_name);
ovsrec_port_set_interfaces(port, &iface, 1);
-
+
br = ovsrec_bridge_insert(txn_from_openvswitch(ovs));
ovsrec_bridge_set_name(br, br_name);
ovsrec_bridge_set_ports(br, &port, 1);
-
+
ovs_insert_bridge(ovs, br);
return commit_txn(txn, true);
}
static void
-add_port(const struct ovsrec_open_vswitch *ovs,
+add_port(const struct ovsrec_open_vswitch *ovs,
const struct ovsrec_bridge *br, const char *port_name)
{
struct ovsrec_interface *iface;
static int
handle_bridge_cmd(struct ovsdb_idl *idl,
- const struct ovsrec_open_vswitch *ovs,
+ const struct ovsrec_open_vswitch *ovs,
struct ofpbuf *buffer, bool add)
{
const char *br_name;
}
/* Figure out vswitchd bridge and VLAN. */
- error = linux_bridge_to_ovs_bridge(ovs, linux_name,
+ error = linux_bridge_to_ovs_bridge(ovs, linux_name,
&ovs_bridge, &br_vlan);
if (error) {
send_simple_reply(seq, error);
return error;
}
- error = linux_bridge_to_ovs_bridge(ovs, linux_name,
+ error = linux_bridge_to_ovs_bridge(ovs, linux_name,
&ovs_bridge, &br_vlan);
if (error) {
send_simple_reply(seq, error);
} else if (error == ENOBUFS) {
VLOG_WARN_RL(&rl, "network monitor socket overflowed");
} else if (error) {
- VLOG_WARN_RL(&rl, "error on network monitor socket: %s",
+ VLOG_WARN_RL(&rl, "error on network monitor socket: %s",
strerror(error));
} else {
struct nlattr *attrs[ARRAY_SIZE(rtnlgrp_link_policy)];
VLOG_WARN_RL(&rl, "received bad rtnl message (no ifinfomsg)");
ofpbuf_delete(buf);
return;
- }
-
+ }
+
if (!nl_policy_parse(buf, NLMSG_HDRLEN + sizeof(struct ifinfomsg),
rtnlgrp_link_policy,
attrs, ARRAY_SIZE(rtnlgrp_link_policy))) {
br = find_bridge(ovs, br_name);
if (!br) {
- VLOG_WARN("no bridge named %s from which to remove %s",
+ VLOG_WARN("no bridge named %s from which to remove %s",
br_name, port_name);
ofpbuf_delete(buf);
return;
netdev_run();
/* If 'prune_timeout' is non-zero, we actively prune from the
- * configuration of port entries that are no longer valid. We
- * use two methods:
+ * configuration of port entries that are no longer valid. We
+ * use two methods:
*
* 1) The kernel explicitly notifies us of removed ports
* through the RTNL messages.