+ return error;
+}
+
+#define POLICE_ADD_CMD "/sbin/tc qdisc add dev %s handle ffff: ingress"
+#define POLICE_CONFIG_CMD "/sbin/tc filter add dev %s parent ffff: protocol ip prio 50 u32 match ip src 0.0.0.0/0 police rate %dkbit burst %dk mtu 65535 drop flowid :1"
+
+/* Remove ingress policing from 'netdev'. Returns 0 if successful, otherwise a
+ * positive errno value.
+ *
+ * This function is equivalent to running
+ * /sbin/tc qdisc del dev %s handle ffff: ingress
+ * but it is much, much faster.
+ */
+static int
+netdev_linux_remove_policing(struct netdev *netdev)
+{
+ struct netdev_dev_linux *netdev_dev =
+ netdev_dev_linux_cast(netdev_get_dev(netdev));
+ const char *netdev_name = netdev_get_name(netdev);
+
+ struct ofpbuf request;
+ struct tcmsg *tcmsg;
+ int error;
+
+ tcmsg = tc_make_request(netdev, RTM_DELQDISC, 0, &request);
+ if (!tcmsg) {
+ return ENODEV;
+ }
+ tcmsg->tcm_handle = tc_make_handle(0xffff, 0);
+ tcmsg->tcm_parent = TC_H_INGRESS;
+ nl_msg_put_string(&request, TCA_KIND, "ingress");
+ nl_msg_put_unspec(&request, TCA_OPTIONS, NULL, 0);
+
+ error = tc_transact(&request, NULL);
+ if (error && error != ENOENT && error != EINVAL) {
+ VLOG_WARN_RL(&rl, "%s: removing policing failed: %s",
+ netdev_name, strerror(error));
+ return error;
+ }
+
+ netdev_dev->kbits_rate = 0;
+ netdev_dev->kbits_burst = 0;
+ netdev_dev->cache_valid |= VALID_POLICING;
+ return 0;
+}
+
+/* Attempts to set input rate limiting (policing) policy. */
+static int
+netdev_linux_set_policing(struct netdev *netdev,
+ uint32_t kbits_rate, uint32_t kbits_burst)
+{
+ struct netdev_dev_linux *netdev_dev =
+ netdev_dev_linux_cast(netdev_get_dev(netdev));
+ const char *netdev_name = netdev_get_name(netdev);
+ char command[1024];
+
+ COVERAGE_INC(netdev_set_policing);
+
+ kbits_burst = (!kbits_rate ? 0 /* Force to 0 if no rate specified. */
+ : !kbits_burst ? 1000 /* Default to 1000 kbits if 0. */
+ : kbits_burst); /* Stick with user-specified value. */
+
+ if (netdev_dev->cache_valid & VALID_POLICING
+ && netdev_dev->kbits_rate == kbits_rate
+ && netdev_dev->kbits_burst == kbits_burst) {
+ /* Assume that settings haven't changed since we last set them. */
+ return 0;
+ }
+
+ netdev_linux_remove_policing(netdev);
+ if (kbits_rate) {
+ snprintf(command, sizeof(command), POLICE_ADD_CMD, netdev_name);
+ if (system(command) != 0) {
+ VLOG_WARN_RL(&rl, "%s: problem adding policing", netdev_name);
+ return -1;
+ }
+
+ snprintf(command, sizeof(command), POLICE_CONFIG_CMD, netdev_name,
+ kbits_rate, kbits_burst);
+ if (system(command) != 0) {
+ VLOG_WARN_RL(&rl, "%s: problem configuring policing",
+ netdev_name);
+ return -1;
+ }
+
+ netdev_dev->kbits_rate = kbits_rate;
+ netdev_dev->kbits_burst = kbits_burst;
+ netdev_dev->cache_valid |= VALID_POLICING;
+ }
+
+ return 0;
+}
+
+static int
+netdev_linux_get_qos_types(const struct netdev *netdev OVS_UNUSED,
+ struct sset *types)
+{
+ const struct tc_ops **opsp;
+
+ for (opsp = tcs; *opsp != NULL; opsp++) {
+ const struct tc_ops *ops = *opsp;
+ if (ops->tc_install && ops->ovs_name[0] != '\0') {
+ sset_add(types, ops->ovs_name);
+ }
+ }
+ return 0;
+}
+
+static const struct tc_ops *
+tc_lookup_ovs_name(const char *name)
+{
+ const struct tc_ops **opsp;
+
+ for (opsp = tcs; *opsp != NULL; opsp++) {
+ const struct tc_ops *ops = *opsp;
+ if (!strcmp(name, ops->ovs_name)) {
+ return ops;
+ }
+ }
+ return NULL;
+}
+
+static const struct tc_ops *
+tc_lookup_linux_name(const char *name)
+{
+ const struct tc_ops **opsp;
+
+ for (opsp = tcs; *opsp != NULL; opsp++) {
+ const struct tc_ops *ops = *opsp;
+ if (ops->linux_name && !strcmp(name, ops->linux_name)) {
+ return ops;
+ }
+ }
+ return NULL;
+}
+
+static struct tc_queue *
+tc_find_queue__(const struct netdev *netdev, unsigned int queue_id,
+ size_t hash)
+{
+ struct netdev_dev_linux *netdev_dev =
+ netdev_dev_linux_cast(netdev_get_dev(netdev));
+ struct tc_queue *queue;
+
+ HMAP_FOR_EACH_IN_BUCKET (queue, hmap_node, hash, &netdev_dev->tc->queues) {
+ if (queue->queue_id == queue_id) {
+ return queue;
+ }
+ }
+ return NULL;
+}
+
+static struct tc_queue *
+tc_find_queue(const struct netdev *netdev, unsigned int queue_id)
+{
+ return tc_find_queue__(netdev, queue_id, hash_int(queue_id, 0));
+}
+
+static int
+netdev_linux_get_qos_capabilities(const struct netdev *netdev OVS_UNUSED,
+ const char *type,
+ struct netdev_qos_capabilities *caps)
+{
+ const struct tc_ops *ops = tc_lookup_ovs_name(type);
+ if (!ops) {
+ return EOPNOTSUPP;
+ }
+ caps->n_queues = ops->n_queues;
+ return 0;
+}
+
+static int
+netdev_linux_get_qos(const struct netdev *netdev,
+ const char **typep, struct shash *details)
+{
+ struct netdev_dev_linux *netdev_dev =
+ netdev_dev_linux_cast(netdev_get_dev(netdev));
+ int error;
+
+ error = tc_query_qdisc(netdev);
+ if (error) {
+ return error;
+ }
+
+ *typep = netdev_dev->tc->ops->ovs_name;
+ return (netdev_dev->tc->ops->qdisc_get
+ ? netdev_dev->tc->ops->qdisc_get(netdev, details)
+ : 0);
+}
+
+static int
+netdev_linux_set_qos(struct netdev *netdev,
+ const char *type, const struct shash *details)
+{
+ struct netdev_dev_linux *netdev_dev =
+ netdev_dev_linux_cast(netdev_get_dev(netdev));
+ const struct tc_ops *new_ops;
+ int error;
+
+ new_ops = tc_lookup_ovs_name(type);
+ if (!new_ops || !new_ops->tc_install) {
+ return EOPNOTSUPP;
+ }
+
+ error = tc_query_qdisc(netdev);
+ if (error) {
+ return error;
+ }
+
+ if (new_ops == netdev_dev->tc->ops) {
+ return new_ops->qdisc_set ? new_ops->qdisc_set(netdev, details) : 0;
+ } else {
+ /* Delete existing qdisc. */
+ error = tc_del_qdisc(netdev);
+ if (error) {
+ return error;
+ }
+ assert(netdev_dev->tc == NULL);
+
+ /* Install new qdisc. */
+ error = new_ops->tc_install(netdev, details);
+ assert((error == 0) == (netdev_dev->tc != NULL));
+
+ return error;
+ }
+}
+
+static int
+netdev_linux_get_queue(const struct netdev *netdev,
+ unsigned int queue_id, struct shash *details)
+{
+ struct netdev_dev_linux *netdev_dev =
+ netdev_dev_linux_cast(netdev_get_dev(netdev));
+ int error;
+
+ error = tc_query_qdisc(netdev);
+ if (error) {
+ return error;
+ } else {
+ struct tc_queue *queue = tc_find_queue(netdev, queue_id);
+ return (queue
+ ? netdev_dev->tc->ops->class_get(netdev, queue, details)
+ : ENOENT);
+ }
+}
+
+static int
+netdev_linux_set_queue(struct netdev *netdev,
+ unsigned int queue_id, const struct shash *details)
+{
+ struct netdev_dev_linux *netdev_dev =
+ netdev_dev_linux_cast(netdev_get_dev(netdev));
+ int error;
+
+ error = tc_query_qdisc(netdev);
+ if (error) {
+ return error;
+ } else if (queue_id >= netdev_dev->tc->ops->n_queues
+ || !netdev_dev->tc->ops->class_set) {
+ return EINVAL;
+ }
+
+ return netdev_dev->tc->ops->class_set(netdev, queue_id, details);
+}
+
+static int
+netdev_linux_delete_queue(struct netdev *netdev, unsigned int queue_id)
+{
+ struct netdev_dev_linux *netdev_dev =
+ netdev_dev_linux_cast(netdev_get_dev(netdev));
+ int error;
+
+ error = tc_query_qdisc(netdev);
+ if (error) {
+ return error;
+ } else if (!netdev_dev->tc->ops->class_delete) {
+ return EINVAL;
+ } else {
+ struct tc_queue *queue = tc_find_queue(netdev, queue_id);
+ return (queue
+ ? netdev_dev->tc->ops->class_delete(netdev, queue)
+ : ENOENT);
+ }
+}
+
+static int
+netdev_linux_get_queue_stats(const struct netdev *netdev,
+ unsigned int queue_id,
+ struct netdev_queue_stats *stats)
+{
+ struct netdev_dev_linux *netdev_dev =
+ netdev_dev_linux_cast(netdev_get_dev(netdev));
+ int error;
+
+ error = tc_query_qdisc(netdev);
+ if (error) {
+ return error;
+ } else if (!netdev_dev->tc->ops->class_get_stats) {
+ return EOPNOTSUPP;
+ } else {
+ const struct tc_queue *queue = tc_find_queue(netdev, queue_id);
+ return (queue
+ ? netdev_dev->tc->ops->class_get_stats(netdev, queue, stats)
+ : ENOENT);
+ }
+}
+
+static bool
+start_queue_dump(const struct netdev *netdev, struct nl_dump *dump)
+{
+ struct ofpbuf request;
+ struct tcmsg *tcmsg;
+
+ tcmsg = tc_make_request(netdev, RTM_GETTCLASS, 0, &request);
+ if (!tcmsg) {
+ return false;
+ }
+ tcmsg->tcm_parent = 0;
+ nl_dump_start(dump, rtnl_sock, &request);
+ ofpbuf_uninit(&request);
+ return true;
+}
+
+static int
+netdev_linux_dump_queues(const struct netdev *netdev,
+ netdev_dump_queues_cb *cb, void *aux)
+{
+ struct netdev_dev_linux *netdev_dev =
+ netdev_dev_linux_cast(netdev_get_dev(netdev));
+ struct tc_queue *queue;
+ struct shash details;
+ int last_error;
+ int error;
+
+ error = tc_query_qdisc(netdev);
+ if (error) {
+ return error;
+ } else if (!netdev_dev->tc->ops->class_get) {
+ return EOPNOTSUPP;
+ }
+
+ last_error = 0;
+ shash_init(&details);
+ HMAP_FOR_EACH (queue, hmap_node, &netdev_dev->tc->queues) {
+ shash_clear(&details);
+
+ error = netdev_dev->tc->ops->class_get(netdev, queue, &details);
+ if (!error) {
+ (*cb)(queue->queue_id, &details, aux);
+ } else {
+ last_error = error;
+ }
+ }
+ shash_destroy(&details);
+
+ return last_error;
+}
+
+static int
+netdev_linux_dump_queue_stats(const struct netdev *netdev,
+ netdev_dump_queue_stats_cb *cb, void *aux)
+{
+ struct netdev_dev_linux *netdev_dev =
+ netdev_dev_linux_cast(netdev_get_dev(netdev));
+ struct nl_dump dump;
+ struct ofpbuf msg;
+ int last_error;
+ int error;
+
+ error = tc_query_qdisc(netdev);
+ if (error) {
+ return error;
+ } else if (!netdev_dev->tc->ops->class_dump_stats) {
+ return EOPNOTSUPP;
+ }
+
+ last_error = 0;
+ if (!start_queue_dump(netdev, &dump)) {
+ return ENODEV;
+ }
+ while (nl_dump_next(&dump, &msg)) {
+ error = netdev_dev->tc->ops->class_dump_stats(netdev, &msg, cb, aux);
+ if (error) {
+ last_error = error;
+ }
+ }
+
+ error = nl_dump_done(&dump);
+ return error ? error : last_error;
+}
+
+static int
+netdev_linux_get_in4(const struct netdev *netdev_,
+ struct in_addr *address, struct in_addr *netmask)
+{
+ struct netdev_dev_linux *netdev_dev =
+ netdev_dev_linux_cast(netdev_get_dev(netdev_));
+
+ if (!(netdev_dev->cache_valid & VALID_IN4)) {
+ int error;
+
+ error = netdev_linux_get_ipv4(netdev_, &netdev_dev->address,
+ SIOCGIFADDR, "SIOCGIFADDR");
+ if (error) {
+ return error;
+ }
+
+ error = netdev_linux_get_ipv4(netdev_, &netdev_dev->netmask,
+ SIOCGIFNETMASK, "SIOCGIFNETMASK");
+ if (error) {
+ return error;
+ }
+
+ netdev_dev->cache_valid |= VALID_IN4;
+ }
+ *address = netdev_dev->address;
+ *netmask = netdev_dev->netmask;
+ return address->s_addr == INADDR_ANY ? EADDRNOTAVAIL : 0;
+}
+
+static int
+netdev_linux_set_in4(struct netdev *netdev_, struct in_addr address,
+ struct in_addr netmask)
+{
+ struct netdev_dev_linux *netdev_dev =
+ netdev_dev_linux_cast(netdev_get_dev(netdev_));
+ int error;
+
+ error = do_set_addr(netdev_, SIOCSIFADDR, "SIOCSIFADDR", address);
+ if (!error) {
+ netdev_dev->cache_valid |= VALID_IN4;
+ netdev_dev->address = address;
+ netdev_dev->netmask = netmask;
+ if (address.s_addr != INADDR_ANY) {
+ error = do_set_addr(netdev_, SIOCSIFNETMASK,
+ "SIOCSIFNETMASK", netmask);
+ }
+ }
+ return error;
+}
+
+static bool
+parse_if_inet6_line(const char *line,
+ struct in6_addr *in6, char ifname[16 + 1])
+{
+ uint8_t *s6 = in6->s6_addr;
+#define X8 "%2"SCNx8
+ return sscanf(line,
+ " "X8 X8 X8 X8 X8 X8 X8 X8 X8 X8 X8 X8 X8 X8 X8 X8
+ "%*x %*x %*x %*x %16s\n",
+ &s6[0], &s6[1], &s6[2], &s6[3],
+ &s6[4], &s6[5], &s6[6], &s6[7],
+ &s6[8], &s6[9], &s6[10], &s6[11],
+ &s6[12], &s6[13], &s6[14], &s6[15],
+ ifname) == 17;
+}
+
+/* If 'netdev' has an assigned IPv6 address, sets '*in6' to that address (if
+ * 'in6' is non-null) and returns true. Otherwise, returns false. */
+static int
+netdev_linux_get_in6(const struct netdev *netdev_, struct in6_addr *in6)
+{
+ struct netdev_dev_linux *netdev_dev =
+ netdev_dev_linux_cast(netdev_get_dev(netdev_));
+ if (!(netdev_dev->cache_valid & VALID_IN6)) {
+ FILE *file;
+ char line[128];
+
+ netdev_dev->in6 = in6addr_any;
+
+ file = fopen("/proc/net/if_inet6", "r");
+ if (file != NULL) {
+ const char *name = netdev_get_name(netdev_);
+ while (fgets(line, sizeof line, file)) {
+ struct in6_addr in6_tmp;
+ char ifname[16 + 1];
+ if (parse_if_inet6_line(line, &in6_tmp, ifname)
+ && !strcmp(name, ifname))
+ {
+ netdev_dev->in6 = in6_tmp;
+ break;
+ }
+ }
+ fclose(file);
+ }
+ netdev_dev->cache_valid |= VALID_IN6;
+ }
+ *in6 = netdev_dev->in6;
+ return 0;
+}
+
+static void
+make_in4_sockaddr(struct sockaddr *sa, struct in_addr addr)
+{
+ struct sockaddr_in sin;
+ memset(&sin, 0, sizeof sin);
+ sin.sin_family = AF_INET;
+ sin.sin_addr = addr;
+ sin.sin_port = 0;
+
+ memset(sa, 0, sizeof *sa);
+ memcpy(sa, &sin, sizeof sin);
+}
+
+static int
+do_set_addr(struct netdev *netdev,
+ int ioctl_nr, const char *ioctl_name, struct in_addr addr)
+{
+ struct ifreq ifr;
+ ovs_strzcpy(ifr.ifr_name, netdev_get_name(netdev), sizeof ifr.ifr_name);
+ make_in4_sockaddr(&ifr.ifr_addr, addr);
+
+ return netdev_linux_do_ioctl(netdev_get_name(netdev), &ifr, ioctl_nr,
+ ioctl_name);
+}
+
+/* Adds 'router' as a default IP gateway. */
+static int
+netdev_linux_add_router(struct netdev *netdev OVS_UNUSED, struct in_addr router)
+{
+ struct in_addr any = { INADDR_ANY };
+ struct rtentry rt;
+ int error;
+
+ memset(&rt, 0, sizeof rt);
+ make_in4_sockaddr(&rt.rt_dst, any);
+ make_in4_sockaddr(&rt.rt_gateway, router);
+ make_in4_sockaddr(&rt.rt_genmask, any);
+ rt.rt_flags = RTF_UP | RTF_GATEWAY;
+ error = ioctl(af_inet_sock, SIOCADDRT, &rt) < 0 ? errno : 0;
+ if (error) {
+ VLOG_WARN("ioctl(SIOCADDRT): %s", strerror(error));
+ }
+ return error;
+}
+
+static int
+netdev_linux_get_next_hop(const struct in_addr *host, struct in_addr *next_hop,
+ char **netdev_name)
+{
+ static const char fn[] = "/proc/net/route";
+ FILE *stream;
+ char line[256];
+ int ln;
+
+ *netdev_name = NULL;
+ stream = fopen(fn, "r");
+ if (stream == NULL) {
+ VLOG_WARN_RL(&rl, "%s: open failed: %s", fn, strerror(errno));
+ return errno;
+ }
+
+ ln = 0;
+ while (fgets(line, sizeof line, stream)) {
+ if (++ln >= 2) {
+ char iface[17];
+ ovs_be32 dest, gateway, mask;
+ int refcnt, metric, mtu;
+ unsigned int flags, use, window, irtt;
+
+ if (sscanf(line,
+ "%16s %"SCNx32" %"SCNx32" %04X %d %u %d %"SCNx32
+ " %d %u %u\n",
+ iface, &dest, &gateway, &flags, &refcnt,
+ &use, &metric, &mask, &mtu, &window, &irtt) != 11) {
+
+ VLOG_WARN_RL(&rl, "%s: could not parse line %d: %s",
+ fn, ln, line);
+ continue;
+ }
+ if (!(flags & RTF_UP)) {
+ /* Skip routes that aren't up. */
+ continue;
+ }
+
+ /* The output of 'dest', 'mask', and 'gateway' were given in
+ * network byte order, so we don't need need any endian
+ * conversions here. */
+ if ((dest & mask) == (host->s_addr & mask)) {
+ if (!gateway) {
+ /* The host is directly reachable. */
+ next_hop->s_addr = 0;
+ } else {
+ /* To reach the host, we must go through a gateway. */
+ next_hop->s_addr = gateway;
+ }
+ *netdev_name = xstrdup(iface);
+ fclose(stream);
+ return 0;
+ }
+ }
+ }
+
+ fclose(stream);
+ return ENXIO;
+}
+
+static int
+netdev_linux_get_status(const struct netdev *netdev, struct shash *sh)
+{
+ struct ethtool_drvinfo drvinfo;
+ int error;
+
+ memset(&drvinfo, 0, sizeof drvinfo);
+ error = netdev_linux_do_ethtool(netdev_get_name(netdev),
+ (struct ethtool_cmd *)&drvinfo,
+ ETHTOOL_GDRVINFO,
+ "ETHTOOL_GDRVINFO");
+ if (!error) {
+ shash_add(sh, "driver_name", xstrdup(drvinfo.driver));
+ shash_add(sh, "driver_version", xstrdup(drvinfo.version));
+ shash_add(sh, "firmware_version", xstrdup(drvinfo.fw_version));
+ }
+
+ return error;
+}
+
+/* Looks up the ARP table entry for 'ip' on 'netdev'. If one exists and can be
+ * successfully retrieved, it stores the corresponding MAC address in 'mac' and
+ * returns 0. Otherwise, it returns a positive errno value; in particular,
+ * ENXIO indicates that there is not ARP table entry for 'ip' on 'netdev'. */
+static int
+netdev_linux_arp_lookup(const struct netdev *netdev,
+ ovs_be32 ip, uint8_t mac[ETH_ADDR_LEN])
+{
+ struct arpreq r;
+ struct sockaddr_in sin;
+ int retval;
+
+ memset(&r, 0, sizeof r);
+ memset(&sin, 0, sizeof sin);
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = ip;
+ sin.sin_port = 0;
+ memcpy(&r.arp_pa, &sin, sizeof sin);
+ r.arp_ha.sa_family = ARPHRD_ETHER;
+ r.arp_flags = 0;
+ ovs_strzcpy(r.arp_dev, netdev_get_name(netdev), sizeof r.arp_dev);
+ COVERAGE_INC(netdev_arp_lookup);
+ retval = ioctl(af_inet_sock, SIOCGARP, &r) < 0 ? errno : 0;
+ if (!retval) {
+ memcpy(mac, r.arp_ha.sa_data, ETH_ADDR_LEN);
+ } else if (retval != ENXIO) {
+ VLOG_WARN_RL(&rl, "%s: could not look up ARP entry for "IP_FMT": %s",
+ netdev_get_name(netdev), IP_ARGS(&ip), strerror(retval));
+ }
+ return retval;
+}
+
+static int
+nd_to_iff_flags(enum netdev_flags nd)
+{
+ int iff = 0;
+ if (nd & NETDEV_UP) {
+ iff |= IFF_UP;
+ }
+ if (nd & NETDEV_PROMISC) {
+ iff |= IFF_PROMISC;
+ }
+ return iff;
+}
+
+static int
+iff_to_nd_flags(int iff)
+{
+ enum netdev_flags nd = 0;
+ if (iff & IFF_UP) {
+ nd |= NETDEV_UP;
+ }
+ if (iff & IFF_PROMISC) {
+ nd |= NETDEV_PROMISC;
+ }
+ return nd;
+}
+
+static int
+netdev_linux_update_flags(struct netdev *netdev, enum netdev_flags off,
+ enum netdev_flags on, enum netdev_flags *old_flagsp)
+{
+ int old_flags, new_flags;
+ int error;
+
+ error = get_flags(netdev, &old_flags);
+ if (!error) {
+ *old_flagsp = iff_to_nd_flags(old_flags);
+ new_flags = (old_flags & ~nd_to_iff_flags(off)) | nd_to_iff_flags(on);
+ if (new_flags != old_flags) {
+ error = set_flags(netdev, new_flags);
+ }
+ }
+ return error;
+}
+
+static unsigned int
+netdev_linux_change_seq(const struct netdev *netdev)
+{
+ return netdev_dev_linux_cast(netdev_get_dev(netdev))->change_seq;
+}
+
+#define NETDEV_LINUX_CLASS(NAME, CREATE, ENUMERATE, SET_STATS) \
+{ \
+ NAME, \
+ \
+ netdev_linux_init, \
+ netdev_linux_run, \
+ netdev_linux_wait, \
+ \
+ CREATE, \
+ netdev_linux_destroy, \
+ NULL, /* set_config */ \
+ NULL, /* config_equal */ \
+ \
+ netdev_linux_open, \
+ netdev_linux_close, \
+ \
+ ENUMERATE, \
+ \
+ netdev_linux_listen, \
+ netdev_linux_recv, \
+ netdev_linux_recv_wait, \
+ netdev_linux_drain, \
+ \
+ netdev_linux_send, \
+ netdev_linux_send_wait, \
+ \
+ netdev_linux_set_etheraddr, \
+ netdev_linux_get_etheraddr, \
+ netdev_linux_get_mtu, \
+ netdev_linux_get_ifindex, \
+ netdev_linux_get_carrier, \
+ netdev_linux_set_miimon_interval, \
+ netdev_linux_get_stats, \
+ SET_STATS, \
+ \
+ netdev_linux_get_features, \
+ netdev_linux_set_advertisements, \
+ netdev_linux_get_vlan_vid, \
+ \
+ netdev_linux_set_policing, \
+ netdev_linux_get_qos_types, \
+ netdev_linux_get_qos_capabilities, \
+ netdev_linux_get_qos, \
+ netdev_linux_set_qos, \
+ netdev_linux_get_queue, \
+ netdev_linux_set_queue, \
+ netdev_linux_delete_queue, \
+ netdev_linux_get_queue_stats, \
+ netdev_linux_dump_queues, \
+ netdev_linux_dump_queue_stats, \
+ \
+ netdev_linux_get_in4, \
+ netdev_linux_set_in4, \
+ netdev_linux_get_in6, \
+ netdev_linux_add_router, \
+ netdev_linux_get_next_hop, \
+ netdev_linux_get_status, \
+ netdev_linux_arp_lookup, \
+ \
+ netdev_linux_update_flags, \
+ \
+ netdev_linux_change_seq \
+}
+
+const struct netdev_class netdev_linux_class =
+ NETDEV_LINUX_CLASS(
+ "system",
+ netdev_linux_create,
+ netdev_linux_enumerate,
+ NULL); /* set_stats */
+
+const struct netdev_class netdev_tap_class =
+ NETDEV_LINUX_CLASS(
+ "tap",
+ netdev_linux_create_tap,
+ NULL, /* enumerate */
+ NULL); /* set_stats */
+
+const struct netdev_class netdev_internal_class =
+ NETDEV_LINUX_CLASS(
+ "internal",
+ netdev_linux_create,
+ NULL, /* enumerate */
+ netdev_vport_set_stats);
+\f
+/* HTB traffic control class. */
+
+#define HTB_N_QUEUES 0xf000
+
+struct htb {
+ struct tc tc;
+ unsigned int max_rate; /* In bytes/s. */
+};
+
+struct htb_class {
+ struct tc_queue tc_queue;
+ unsigned int min_rate; /* In bytes/s. */
+ unsigned int max_rate; /* In bytes/s. */
+ unsigned int burst; /* In bytes. */
+ unsigned int priority; /* Lower values are higher priorities. */
+};
+
+static struct htb *
+htb_get__(const struct netdev *netdev)
+{
+ struct netdev_dev_linux *netdev_dev =
+ netdev_dev_linux_cast(netdev_get_dev(netdev));
+ return CONTAINER_OF(netdev_dev->tc, struct htb, tc);
+}
+
+static void
+htb_install__(struct netdev *netdev, uint64_t max_rate)
+{
+ struct netdev_dev_linux *netdev_dev =
+ netdev_dev_linux_cast(netdev_get_dev(netdev));
+ struct htb *htb;
+
+ htb = xmalloc(sizeof *htb);
+ tc_init(&htb->tc, &tc_ops_htb);
+ htb->max_rate = max_rate;
+
+ netdev_dev->tc = &htb->tc;
+}
+
+/* Create an HTB qdisc.
+ *
+ * Equivalent to "tc qdisc add dev <dev> root handle 1: htb default 1". */
+static int
+htb_setup_qdisc__(struct netdev *netdev)
+{
+ size_t opt_offset;
+ struct tc_htb_glob opt;
+ struct ofpbuf request;
+ struct tcmsg *tcmsg;
+
+ tc_del_qdisc(netdev);
+
+ tcmsg = tc_make_request(netdev, RTM_NEWQDISC,
+ NLM_F_EXCL | NLM_F_CREATE, &request);
+ if (!tcmsg) {
+ return ENODEV;
+ }
+ tcmsg->tcm_handle = tc_make_handle(1, 0);
+ tcmsg->tcm_parent = TC_H_ROOT;
+
+ nl_msg_put_string(&request, TCA_KIND, "htb");
+
+ memset(&opt, 0, sizeof opt);
+ opt.rate2quantum = 10;
+ opt.version = 3;
+ opt.defcls = 1;
+
+ opt_offset = nl_msg_start_nested(&request, TCA_OPTIONS);
+ nl_msg_put_unspec(&request, TCA_HTB_INIT, &opt, sizeof opt);
+ nl_msg_end_nested(&request, opt_offset);
+
+ return tc_transact(&request, NULL);
+}
+
+/* Equivalent to "tc class replace <dev> classid <handle> parent <parent> htb
+ * rate <min_rate>bps ceil <max_rate>bps burst <burst>b prio <priority>". */
+static int
+htb_setup_class__(struct netdev *netdev, unsigned int handle,
+ unsigned int parent, struct htb_class *class)
+{
+ size_t opt_offset;
+ struct tc_htb_opt opt;
+ struct ofpbuf request;
+ struct tcmsg *tcmsg;
+ int error;
+ int mtu;
+
+ netdev_get_mtu(netdev, &mtu);
+ if (mtu == INT_MAX) {
+ VLOG_WARN_RL(&rl, "cannot set up HTB on device %s that lacks MTU",
+ netdev_get_name(netdev));
+ return EINVAL;
+ }
+
+ memset(&opt, 0, sizeof opt);
+ tc_fill_rate(&opt.rate, class->min_rate, mtu);
+ tc_fill_rate(&opt.ceil, class->max_rate, mtu);
+ opt.buffer = tc_calc_buffer(opt.rate.rate, mtu, class->burst);
+ opt.cbuffer = tc_calc_buffer(opt.ceil.rate, mtu, class->burst);
+ opt.prio = class->priority;
+
+ tcmsg = tc_make_request(netdev, RTM_NEWTCLASS, NLM_F_CREATE, &request);
+ if (!tcmsg) {
+ return ENODEV;
+ }
+ tcmsg->tcm_handle = handle;
+ tcmsg->tcm_parent = parent;
+
+ nl_msg_put_string(&request, TCA_KIND, "htb");
+ opt_offset = nl_msg_start_nested(&request, TCA_OPTIONS);
+ nl_msg_put_unspec(&request, TCA_HTB_PARMS, &opt, sizeof opt);
+ tc_put_rtab(&request, TCA_HTB_RTAB, &opt.rate);
+ tc_put_rtab(&request, TCA_HTB_CTAB, &opt.ceil);
+ nl_msg_end_nested(&request, opt_offset);
+
+ error = tc_transact(&request, NULL);
+ if (error) {
+ VLOG_WARN_RL(&rl, "failed to replace %s class %u:%u, parent %u:%u, "
+ "min_rate=%u max_rate=%u burst=%u prio=%u (%s)",
+ netdev_get_name(netdev),
+ tc_get_major(handle), tc_get_minor(handle),
+ tc_get_major(parent), tc_get_minor(parent),
+ class->min_rate, class->max_rate,
+ class->burst, class->priority, strerror(error));
+ }
+ return error;
+}
+
+/* Parses Netlink attributes in 'options' for HTB parameters and stores a
+ * description of them into 'details'. The description complies with the
+ * specification given in the vswitch database documentation for linux-htb
+ * queue details. */
+static int
+htb_parse_tca_options__(struct nlattr *nl_options, struct htb_class *class)
+{
+ static const struct nl_policy tca_htb_policy[] = {
+ [TCA_HTB_PARMS] = { .type = NL_A_UNSPEC, .optional = false,
+ .min_len = sizeof(struct tc_htb_opt) },
+ };
+
+ struct nlattr *attrs[ARRAY_SIZE(tca_htb_policy)];
+ const struct tc_htb_opt *htb;
+
+ if (!nl_parse_nested(nl_options, tca_htb_policy,
+ attrs, ARRAY_SIZE(tca_htb_policy))) {
+ VLOG_WARN_RL(&rl, "failed to parse HTB class options");
+ return EPROTO;
+ }
+
+ htb = nl_attr_get(attrs[TCA_HTB_PARMS]);
+ class->min_rate = htb->rate.rate;
+ class->max_rate = htb->ceil.rate;
+ class->burst = tc_ticks_to_bytes(htb->rate.rate, htb->buffer);
+ class->priority = htb->prio;
+ return 0;
+}
+
+static int
+htb_parse_tcmsg__(struct ofpbuf *tcmsg, unsigned int *queue_id,
+ struct htb_class *options,
+ struct netdev_queue_stats *stats)
+{
+ struct nlattr *nl_options;
+ unsigned int handle;
+ int error;
+
+ error = tc_parse_class(tcmsg, &handle, &nl_options, stats);
+ if (!error && queue_id) {
+ unsigned int major = tc_get_major(handle);
+ unsigned int minor = tc_get_minor(handle);
+ if (major == 1 && minor > 0 && minor <= HTB_N_QUEUES) {
+ *queue_id = minor - 1;
+ } else {
+ error = EPROTO;
+ }
+ }
+ if (!error && options) {
+ error = htb_parse_tca_options__(nl_options, options);
+ }
+ return error;
+}
+
+static void
+htb_parse_qdisc_details__(struct netdev *netdev,
+ const struct shash *details, struct htb_class *hc)
+{
+ const char *max_rate_s;
+
+ max_rate_s = shash_find_data(details, "max-rate");
+ hc->max_rate = max_rate_s ? strtoull(max_rate_s, NULL, 10) / 8 : 0;
+ if (!hc->max_rate) {
+ uint32_t current;
+
+ netdev_get_features(netdev, ¤t, NULL, NULL, NULL);
+ hc->max_rate = netdev_features_to_bps(current) / 8;
+ }
+ hc->min_rate = hc->max_rate;
+ hc->burst = 0;
+ hc->priority = 0;
+}
+
+static int
+htb_parse_class_details__(struct netdev *netdev,
+ const struct shash *details, struct htb_class *hc)
+{
+ const struct htb *htb = htb_get__(netdev);
+ const char *min_rate_s = shash_find_data(details, "min-rate");
+ const char *max_rate_s = shash_find_data(details, "max-rate");
+ const char *burst_s = shash_find_data(details, "burst");
+ const char *priority_s = shash_find_data(details, "priority");
+ int mtu;
+
+ netdev_get_mtu(netdev, &mtu);
+ if (mtu == INT_MAX) {
+ VLOG_WARN_RL(&rl, "cannot parse HTB class on device %s that lacks MTU",
+ netdev_get_name(netdev));
+ return EINVAL;
+ }
+
+ /* HTB requires at least an mtu sized min-rate to send any traffic even
+ * on uncongested links. */
+ hc->min_rate = min_rate_s ? strtoull(min_rate_s, NULL, 10) / 8 : 0;
+ hc->min_rate = MAX(hc->min_rate, mtu);
+ hc->min_rate = MIN(hc->min_rate, htb->max_rate);
+
+ /* max-rate */
+ hc->max_rate = (max_rate_s
+ ? strtoull(max_rate_s, NULL, 10) / 8
+ : htb->max_rate);
+ hc->max_rate = MAX(hc->max_rate, hc->min_rate);
+ hc->max_rate = MIN(hc->max_rate, htb->max_rate);
+
+ /* burst
+ *
+ * According to hints in the documentation that I've read, it is important
+ * that 'burst' be at least as big as the largest frame that might be
+ * transmitted. Also, making 'burst' a bit bigger than necessary is OK,
+ * but having it a bit too small is a problem. Since netdev_get_mtu()
+ * doesn't include the Ethernet header, we need to add at least 14 (18?) to
+ * the MTU. We actually add 64, instead of 14, as a guard against
+ * additional headers get tacked on somewhere that we're not aware of. */
+ hc->burst = burst_s ? strtoull(burst_s, NULL, 10) / 8 : 0;
+ hc->burst = MAX(hc->burst, mtu + 64);
+
+ /* priority */
+ hc->priority = priority_s ? strtoul(priority_s, NULL, 10) : 0;
+
+ return 0;
+}
+
+static int
+htb_query_class__(const struct netdev *netdev, unsigned int handle,
+ unsigned int parent, struct htb_class *options,
+ struct netdev_queue_stats *stats)
+{
+ struct ofpbuf *reply;
+ int error;
+
+ error = tc_query_class(netdev, handle, parent, &reply);
+ if (!error) {
+ error = htb_parse_tcmsg__(reply, NULL, options, stats);
+ ofpbuf_delete(reply);
+ }
+ return error;
+}
+
+static int
+htb_tc_install(struct netdev *netdev, const struct shash *details)
+{
+ int error;
+
+ error = htb_setup_qdisc__(netdev);
+ if (!error) {
+ struct htb_class hc;
+
+ htb_parse_qdisc_details__(netdev, details, &hc);
+ error = htb_setup_class__(netdev, tc_make_handle(1, 0xfffe),
+ tc_make_handle(1, 0), &hc);
+ if (!error) {
+ htb_install__(netdev, hc.max_rate);
+ }
+ }
+ return error;
+}
+
+static struct htb_class *
+htb_class_cast__(const struct tc_queue *queue)
+{
+ return CONTAINER_OF(queue, struct htb_class, tc_queue);
+}
+
+static void
+htb_update_queue__(struct netdev *netdev, unsigned int queue_id,
+ const struct htb_class *hc)
+{
+ struct htb *htb = htb_get__(netdev);
+ size_t hash = hash_int(queue_id, 0);
+ struct tc_queue *queue;
+ struct htb_class *hcp;
+
+ queue = tc_find_queue__(netdev, queue_id, hash);
+ if (queue) {
+ hcp = htb_class_cast__(queue);
+ } else {
+ hcp = xmalloc(sizeof *hcp);
+ queue = &hcp->tc_queue;
+ queue->queue_id = queue_id;
+ hmap_insert(&htb->tc.queues, &queue->hmap_node, hash);
+ }
+
+ hcp->min_rate = hc->min_rate;
+ hcp->max_rate = hc->max_rate;
+ hcp->burst = hc->burst;
+ hcp->priority = hc->priority;
+}
+
+static int
+htb_tc_load(struct netdev *netdev, struct ofpbuf *nlmsg OVS_UNUSED)
+{
+ struct ofpbuf msg;
+ struct nl_dump dump;
+ struct htb_class hc;
+
+ /* Get qdisc options. */
+ hc.max_rate = 0;
+ htb_query_class__(netdev, tc_make_handle(1, 0xfffe), 0, &hc, NULL);
+ htb_install__(netdev, hc.max_rate);
+
+ /* Get queues. */
+ if (!start_queue_dump(netdev, &dump)) {
+ return ENODEV;
+ }
+ while (nl_dump_next(&dump, &msg)) {
+ unsigned int queue_id;
+
+ if (!htb_parse_tcmsg__(&msg, &queue_id, &hc, NULL)) {
+ htb_update_queue__(netdev, queue_id, &hc);
+ }
+ }
+ nl_dump_done(&dump);
+
+ return 0;
+}
+
+static void
+htb_tc_destroy(struct tc *tc)
+{
+ struct htb *htb = CONTAINER_OF(tc, struct htb, tc);
+ struct htb_class *hc, *next;
+
+ HMAP_FOR_EACH_SAFE (hc, next, tc_queue.hmap_node, &htb->tc.queues) {
+ hmap_remove(&htb->tc.queues, &hc->tc_queue.hmap_node);
+ free(hc);
+ }
+ tc_destroy(tc);
+ free(htb);
+}
+
+static int
+htb_qdisc_get(const struct netdev *netdev, struct shash *details)
+{
+ const struct htb *htb = htb_get__(netdev);
+ shash_add(details, "max-rate", xasprintf("%llu", 8ULL * htb->max_rate));
+ return 0;
+}
+
+static int
+htb_qdisc_set(struct netdev *netdev, const struct shash *details)
+{
+ struct htb_class hc;
+ int error;
+
+ htb_parse_qdisc_details__(netdev, details, &hc);
+ error = htb_setup_class__(netdev, tc_make_handle(1, 0xfffe),
+ tc_make_handle(1, 0), &hc);
+ if (!error) {
+ htb_get__(netdev)->max_rate = hc.max_rate;
+ }
+ return error;
+}
+
+static int
+htb_class_get(const struct netdev *netdev OVS_UNUSED,
+ const struct tc_queue *queue, struct shash *details)
+{
+ const struct htb_class *hc = htb_class_cast__(queue);
+
+ shash_add(details, "min-rate", xasprintf("%llu", 8ULL * hc->min_rate));
+ if (hc->min_rate != hc->max_rate) {
+ shash_add(details, "max-rate", xasprintf("%llu", 8ULL * hc->max_rate));
+ }
+ shash_add(details, "burst", xasprintf("%llu", 8ULL * hc->burst));
+ if (hc->priority) {
+ shash_add(details, "priority", xasprintf("%u", hc->priority));
+ }
+ return 0;
+}
+
+static int
+htb_class_set(struct netdev *netdev, unsigned int queue_id,
+ const struct shash *details)
+{
+ struct htb_class hc;
+ int error;
+
+ error = htb_parse_class_details__(netdev, details, &hc);
+ if (error) {
+ return error;
+ }
+
+ error = htb_setup_class__(netdev, tc_make_handle(1, queue_id + 1),
+ tc_make_handle(1, 0xfffe), &hc);
+ if (error) {
+ return error;
+ }
+
+ htb_update_queue__(netdev, queue_id, &hc);
+ return 0;
+}
+
+static int
+htb_class_delete(struct netdev *netdev, struct tc_queue *queue)
+{
+ struct htb_class *hc = htb_class_cast__(queue);
+ struct htb *htb = htb_get__(netdev);
+ int error;
+
+ error = tc_delete_class(netdev, tc_make_handle(1, queue->queue_id + 1));
+ if (!error) {
+ hmap_remove(&htb->tc.queues, &hc->tc_queue.hmap_node);
+ free(hc);
+ }
+ return error;
+}
+
+static int
+htb_class_get_stats(const struct netdev *netdev, const struct tc_queue *queue,
+ struct netdev_queue_stats *stats)
+{
+ return htb_query_class__(netdev, tc_make_handle(1, queue->queue_id + 1),
+ tc_make_handle(1, 0xfffe), NULL, stats);
+}
+
+static int
+htb_class_dump_stats(const struct netdev *netdev OVS_UNUSED,
+ const struct ofpbuf *nlmsg,
+ netdev_dump_queue_stats_cb *cb, void *aux)
+{
+ struct netdev_queue_stats stats;
+ unsigned int handle, major, minor;
+ int error;
+
+ error = tc_parse_class(nlmsg, &handle, NULL, &stats);
+ if (error) {
+ return error;
+ }
+
+ major = tc_get_major(handle);
+ minor = tc_get_minor(handle);
+ if (major == 1 && minor > 0 && minor <= HTB_N_QUEUES) {
+ (*cb)(minor - 1, &stats, aux);
+ }
+ return 0;
+}
+
+static const struct tc_ops tc_ops_htb = {
+ "htb", /* linux_name */
+ "linux-htb", /* ovs_name */
+ HTB_N_QUEUES, /* n_queues */
+ htb_tc_install,
+ htb_tc_load,
+ htb_tc_destroy,
+ htb_qdisc_get,
+ htb_qdisc_set,
+ htb_class_get,
+ htb_class_set,
+ htb_class_delete,
+ htb_class_get_stats,
+ htb_class_dump_stats
+};
+\f
+/* "linux-hfsc" traffic control class. */
+
+#define HFSC_N_QUEUES 0xf000
+
+struct hfsc {
+ struct tc tc;
+ uint32_t max_rate;
+};
+
+struct hfsc_class {
+ struct tc_queue tc_queue;
+ uint32_t min_rate;
+ uint32_t max_rate;
+};
+
+static struct hfsc *
+hfsc_get__(const struct netdev *netdev)
+{
+ struct netdev_dev_linux *netdev_dev;
+ netdev_dev = netdev_dev_linux_cast(netdev_get_dev(netdev));
+ return CONTAINER_OF(netdev_dev->tc, struct hfsc, tc);
+}
+
+static struct hfsc_class *
+hfsc_class_cast__(const struct tc_queue *queue)
+{
+ return CONTAINER_OF(queue, struct hfsc_class, tc_queue);
+}
+
+static void
+hfsc_install__(struct netdev *netdev, uint32_t max_rate)
+{
+ struct netdev_dev_linux * netdev_dev;
+ struct hfsc *hfsc;
+
+ netdev_dev = netdev_dev_linux_cast(netdev_get_dev(netdev));
+ hfsc = xmalloc(sizeof *hfsc);
+ tc_init(&hfsc->tc, &tc_ops_hfsc);
+ hfsc->max_rate = max_rate;
+ netdev_dev->tc = &hfsc->tc;
+}
+
+static void
+hfsc_update_queue__(struct netdev *netdev, unsigned int queue_id,
+ const struct hfsc_class *hc)
+{
+ size_t hash;
+ struct hfsc *hfsc;
+ struct hfsc_class *hcp;
+ struct tc_queue *queue;
+
+ hfsc = hfsc_get__(netdev);
+ hash = hash_int(queue_id, 0);
+
+ queue = tc_find_queue__(netdev, queue_id, hash);
+ if (queue) {
+ hcp = hfsc_class_cast__(queue);
+ } else {
+ hcp = xmalloc(sizeof *hcp);
+ queue = &hcp->tc_queue;
+ queue->queue_id = queue_id;
+ hmap_insert(&hfsc->tc.queues, &queue->hmap_node, hash);
+ }
+
+ hcp->min_rate = hc->min_rate;
+ hcp->max_rate = hc->max_rate;
+}
+
+static int
+hfsc_parse_tca_options__(struct nlattr *nl_options, struct hfsc_class *class)
+{
+ const struct tc_service_curve *rsc, *fsc, *usc;
+ static const struct nl_policy tca_hfsc_policy[] = {
+ [TCA_HFSC_RSC] = {
+ .type = NL_A_UNSPEC,
+ .optional = false,
+ .min_len = sizeof(struct tc_service_curve),
+ },
+ [TCA_HFSC_FSC] = {
+ .type = NL_A_UNSPEC,
+ .optional = false,
+ .min_len = sizeof(struct tc_service_curve),
+ },
+ [TCA_HFSC_USC] = {
+ .type = NL_A_UNSPEC,
+ .optional = false,
+ .min_len = sizeof(struct tc_service_curve),
+ },
+ };
+ struct nlattr *attrs[ARRAY_SIZE(tca_hfsc_policy)];
+
+ if (!nl_parse_nested(nl_options, tca_hfsc_policy,
+ attrs, ARRAY_SIZE(tca_hfsc_policy))) {
+ VLOG_WARN_RL(&rl, "failed to parse HFSC class options");
+ return EPROTO;
+ }
+
+ rsc = nl_attr_get(attrs[TCA_HFSC_RSC]);
+ fsc = nl_attr_get(attrs[TCA_HFSC_FSC]);
+ usc = nl_attr_get(attrs[TCA_HFSC_USC]);
+
+ if (rsc->m1 != 0 || rsc->d != 0 ||
+ fsc->m1 != 0 || fsc->d != 0 ||
+ usc->m1 != 0 || usc->d != 0) {
+ VLOG_WARN_RL(&rl, "failed to parse HFSC class options. "
+ "Non-linear service curves are not supported.");
+ return EPROTO;
+ }
+
+ if (rsc->m2 != fsc->m2) {
+ VLOG_WARN_RL(&rl, "failed to parse HFSC class options. "
+ "Real-time service curves are not supported ");
+ return EPROTO;
+ }
+
+ if (rsc->m2 > usc->m2) {
+ VLOG_WARN_RL(&rl, "failed to parse HFSC class options. "
+ "Min-rate service curve is greater than "
+ "the max-rate service curve.");
+ return EPROTO;
+ }
+
+ class->min_rate = fsc->m2;
+ class->max_rate = usc->m2;
+ return 0;
+}
+
+static int
+hfsc_parse_tcmsg__(struct ofpbuf *tcmsg, unsigned int *queue_id,
+ struct hfsc_class *options,
+ struct netdev_queue_stats *stats)
+{
+ int error;
+ unsigned int handle;
+ struct nlattr *nl_options;
+
+ error = tc_parse_class(tcmsg, &handle, &nl_options, stats);
+ if (error) {
+ return error;
+ }
+
+ if (queue_id) {
+ unsigned int major, minor;
+
+ major = tc_get_major(handle);
+ minor = tc_get_minor(handle);
+ if (major == 1 && minor > 0 && minor <= HFSC_N_QUEUES) {
+ *queue_id = minor - 1;
+ } else {
+ return EPROTO;
+ }
+ }
+
+ if (options) {
+ error = hfsc_parse_tca_options__(nl_options, options);
+ }
+
+ return error;
+}
+
+static int
+hfsc_query_class__(const struct netdev *netdev, unsigned int handle,
+ unsigned int parent, struct hfsc_class *options,
+ struct netdev_queue_stats *stats)
+{
+ int error;
+ struct ofpbuf *reply;
+
+ error = tc_query_class(netdev, handle, parent, &reply);
+ if (error) {
+ return error;
+ }
+
+ error = hfsc_parse_tcmsg__(reply, NULL, options, stats);
+ ofpbuf_delete(reply);
+ return error;
+}
+
+static void
+hfsc_parse_qdisc_details__(struct netdev *netdev, const struct shash *details,
+ struct hfsc_class *class)
+{
+ uint32_t max_rate;
+ const char *max_rate_s;
+
+ max_rate_s = shash_find_data(details, "max-rate");
+ max_rate = max_rate_s ? strtoull(max_rate_s, NULL, 10) / 8 : 0;
+
+ if (!max_rate) {
+ uint32_t current;
+
+ netdev_get_features(netdev, ¤t, NULL, NULL, NULL);
+ max_rate = netdev_features_to_bps(current) / 8;
+ }