X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=ofproto%2Fofproto.c;h=e0f1b6632509bddd14e1c267b148ef7e98da7c02;hb=0a6f55420508c31af9fe41aafdd26ce8462cc1be;hp=2f6ca08c60fa8f99c4d23f7afe0ad861568219a9;hpb=c62b0064a0cdbd5f3ecfc2cda90999a9d7ec551d;p=openvswitch diff --git a/ofproto/ofproto.c b/ofproto/ofproto.c index 2f6ca08c..e0f1b663 100644 --- a/ofproto/ofproto.c +++ b/ofproto/ofproto.c @@ -59,6 +59,7 @@ #include "svec.h" #include "tag.h" #include "timeval.h" +#include "unaligned.h" #include "unixctl.h" #include "vconn.h" #include "vlog.h" @@ -138,7 +139,7 @@ struct action_xlate_ctx { int recurse; /* Recursion level, via xlate_table_action. */ int last_pop_priority; /* Offset in 'odp_actions' just past most - * recently added ODPAT_SET_PRIORITY. */ + * recent ODP_ACTION_ATTR_SET_PRIORITY. */ }; static void action_xlate_ctx_init(struct action_xlate_ctx *, @@ -301,7 +302,8 @@ struct ofconn { /* OFPT_PACKET_IN related data. */ struct rconn_packet_counter *packet_in_counter; /* # queued on 'rconn'. */ - struct pinsched *schedulers[2]; /* Indexed by reason code; see below. */ +#define N_SCHEDULERS 2 + struct pinsched *schedulers[N_SCHEDULERS]; struct pktbuf *pktbuf; /* OpenFlow packet buffers. */ int miss_send_len; /* Bytes to send of buffered packets. */ @@ -319,15 +321,6 @@ struct ofconn { enum ofproto_band band; /* In-band or out-of-band? */ }; -/* We use OFPR_NO_MATCH and OFPR_ACTION as indexes into struct ofconn's - * "schedulers" array. Their values are 0 and 1, and their meanings and values - * coincide with _ODPL_MISS_NR and _ODPL_ACTION_NR, so this is convenient. In - * case anything ever changes, check their values here. */ -#define N_SCHEDULERS 2 -BUILD_ASSERT_DECL(OFPR_NO_MATCH == 0); -BUILD_ASSERT_DECL(OFPR_NO_MATCH == _ODPL_MISS_NR); -BUILD_ASSERT_DECL(OFPR_ACTION == 1); -BUILD_ASSERT_DECL(OFPR_ACTION == _ODPL_ACTION_NR); static struct ofconn *ofconn_create(struct ofproto *, struct rconn *, enum ofconn_type); @@ -444,7 +437,10 @@ ofproto_create(const char *datapath, const char *datapath_type, VLOG_ERR("failed to open datapath %s: %s", datapath, strerror(error)); return error; } - error = dpif_recv_set_mask(dpif, ODPL_MISS | ODPL_ACTION | ODPL_SFLOW); + error = dpif_recv_set_mask(dpif, + ((1u << DPIF_UC_MISS) | + (1u << DPIF_UC_ACTION) | + (1u << DPIF_UC_SAMPLE))); if (error) { VLOG_ERR("failed to listen on datapath %s: %s", datapath, strerror(error)); @@ -1368,13 +1364,10 @@ ofproto_get_ofproto_controller_info(const struct ofproto * ofproto, cinfo->pairs.n = 0; - if (last_error == EOF) { - cinfo->pairs.keys[cinfo->pairs.n] = "last_error"; - cinfo->pairs.values[cinfo->pairs.n++] = xstrdup("End of file"); - } else if (last_error > 0) { + if (last_error) { cinfo->pairs.keys[cinfo->pairs.n] = "last_error"; cinfo->pairs.values[cinfo->pairs.n++] = - xstrdup(strerror(last_error)); + xstrdup(ovs_retval_to_string(last_error)); } cinfo->pairs.keys[cinfo->pairs.n] = "state"; @@ -1574,7 +1567,7 @@ make_ofport(const struct dpif_port *dpif_port) return NULL; } - ofport = xmalloc(sizeof *ofport); + ofport = xzalloc(sizeof *ofport); ofport->netdev = netdev; ofport->odp_port = dpif_port->port_no; ofport->opp.port_no = odp_port_to_ofp_port(dpif_port->port_no); @@ -2093,13 +2086,13 @@ execute_odp_actions(struct ofproto *ofproto, const struct flow *flow, struct ofpbuf *packet) { if (actions_len == NLA_ALIGN(NLA_HDRLEN + sizeof(uint64_t)) - && odp_actions->nla_type == ODPAT_CONTROLLER) { + && odp_actions->nla_type == ODP_ACTION_ATTR_CONTROLLER) { /* As an optimization, avoid a round-trip from userspace to kernel to * userspace. This also avoids possibly filling up kernel packet * buffers along the way. */ struct dpif_upcall upcall; - upcall.type = _ODPL_ACTION_NR; + upcall.type = DPIF_UC_ACTION; upcall.packet = packet; upcall.key = NULL; upcall.key_len = 0; @@ -2687,7 +2680,7 @@ add_output_action(struct action_xlate_ctx *ctx, uint16_t port) */ } - nl_msg_put_u32(ctx->odp_actions, ODPAT_OUTPUT, port); + nl_msg_put_u32(ctx->odp_actions, ODP_ACTION_ATTR_OUTPUT, port); ctx->nf_output_iface = port; } @@ -2738,7 +2731,7 @@ flood_packets(struct ofproto *ofproto, uint16_t odp_in_port, uint32_t mask, HMAP_FOR_EACH (ofport, hmap_node, &ofproto->ports) { uint16_t odp_port = ofport->odp_port; if (odp_port != odp_in_port && !(ofport->opp.config & mask)) { - nl_msg_put_u32(odp_actions, ODPAT_OUTPUT, odp_port); + nl_msg_put_u32(odp_actions, ODP_ACTION_ATTR_OUTPUT, odp_port); } } *nf_output_iface = NF_OUT_FLOOD; @@ -2778,7 +2771,7 @@ xlate_output_action__(struct action_xlate_ctx *ctx, &ctx->nf_output_iface, ctx->odp_actions); break; case OFPP_CONTROLLER: - nl_msg_put_u64(ctx->odp_actions, ODPAT_CONTROLLER, max_len); + nl_msg_put_u64(ctx->odp_actions, ODP_ACTION_ATTR_CONTROLLER, max_len); break; case OFPP_LOCAL: add_output_action(ctx, ODPP_LOCAL); @@ -2825,7 +2818,7 @@ static void add_pop_action(struct action_xlate_ctx *ctx) { if (ctx->odp_actions->size != ctx->last_pop_priority) { - nl_msg_put_flag(ctx->odp_actions, ODPAT_POP_PRIORITY); + nl_msg_put_flag(ctx->odp_actions, ODP_ACTION_ATTR_POP_PRIORITY); ctx->last_pop_priority = ctx->odp_actions->size; } } @@ -2856,7 +2849,7 @@ xlate_enqueue_action(struct action_xlate_ctx *ctx, /* Add ODP actions. */ remove_pop_action(ctx); - nl_msg_put_u32(ctx->odp_actions, ODPAT_SET_PRIORITY, priority); + nl_msg_put_u32(ctx->odp_actions, ODP_ACTION_ATTR_SET_PRIORITY, priority); add_output_action(ctx, odp_port); add_pop_action(ctx); @@ -2884,7 +2877,7 @@ xlate_set_queue_action(struct action_xlate_ctx *ctx, } remove_pop_action(ctx); - nl_msg_put_u32(ctx->odp_actions, ODPAT_SET_PRIORITY, priority); + nl_msg_put_u32(ctx->odp_actions, ODP_ACTION_ATTR_SET_PRIORITY, priority); } static void @@ -2892,9 +2885,9 @@ xlate_set_dl_tci(struct action_xlate_ctx *ctx) { ovs_be16 tci = ctx->flow.vlan_tci; if (!(tci & htons(VLAN_CFI))) { - nl_msg_put_flag(ctx->odp_actions, ODPAT_STRIP_VLAN); + nl_msg_put_flag(ctx->odp_actions, ODP_ACTION_ATTR_STRIP_VLAN); } else { - nl_msg_put_be16(ctx->odp_actions, ODPAT_SET_DL_TCI, + nl_msg_put_be16(ctx->odp_actions, ODP_ACTION_ATTR_SET_DL_TCI, tci & ~htons(VLAN_CFI)); } } @@ -2920,7 +2913,8 @@ update_reg_state(struct action_xlate_ctx *ctx, xlate_set_dl_tci(ctx); } if (ctx->flow.tun_id != state->tun_id) { - nl_msg_put_be64(ctx->odp_actions, ODPAT_SET_TUNNEL, ctx->flow.tun_id); + nl_msg_put_be64(ctx->odp_actions, + ODP_ACTION_ATTR_SET_TUNNEL, ctx->flow.tun_id); } } @@ -2946,13 +2940,14 @@ xlate_nicira_action(struct action_xlate_ctx *ctx, case NXAST_SET_TUNNEL: nast = (const struct nx_action_set_tunnel *) nah; tun_id = htonll(ntohl(nast->tun_id)); - nl_msg_put_be64(ctx->odp_actions, ODPAT_SET_TUNNEL, tun_id); + nl_msg_put_be64(ctx->odp_actions, ODP_ACTION_ATTR_SET_TUNNEL, tun_id); ctx->flow.tun_id = tun_id; break; case NXAST_DROP_SPOOFED_ARP: if (ctx->flow.dl_type == htons(ETH_TYPE_ARP)) { - nl_msg_put_flag(ctx->odp_actions, ODPAT_DROP_SPOOFED_ARP); + nl_msg_put_flag(ctx->odp_actions, + ODP_ACTION_ATTR_DROP_SPOOFED_ARP); } break; @@ -2985,7 +2980,7 @@ xlate_nicira_action(struct action_xlate_ctx *ctx, case NXAST_SET_TUNNEL64: tun_id = ((const struct nx_action_set_tunnel64 *) nah)->tun_id; - nl_msg_put_be64(ctx->odp_actions, ODPAT_SET_TUNNEL, tun_id); + nl_msg_put_be64(ctx->odp_actions, ODP_ACTION_ATTR_SET_TUNNEL, tun_id); ctx->flow.tun_id = tun_id; break; @@ -3049,44 +3044,44 @@ do_xlate_actions(const union ofp_action *in, size_t n_in, case OFPAT_SET_DL_SRC: oada = ((struct ofp_action_dl_addr *) ia); - nl_msg_put_unspec(ctx->odp_actions, ODPAT_SET_DL_SRC, + nl_msg_put_unspec(ctx->odp_actions, ODP_ACTION_ATTR_SET_DL_SRC, oada->dl_addr, ETH_ADDR_LEN); memcpy(ctx->flow.dl_src, oada->dl_addr, ETH_ADDR_LEN); break; case OFPAT_SET_DL_DST: oada = ((struct ofp_action_dl_addr *) ia); - nl_msg_put_unspec(ctx->odp_actions, ODPAT_SET_DL_DST, + nl_msg_put_unspec(ctx->odp_actions, ODP_ACTION_ATTR_SET_DL_DST, oada->dl_addr, ETH_ADDR_LEN); memcpy(ctx->flow.dl_dst, oada->dl_addr, ETH_ADDR_LEN); break; case OFPAT_SET_NW_SRC: - nl_msg_put_be32(ctx->odp_actions, ODPAT_SET_NW_SRC, + nl_msg_put_be32(ctx->odp_actions, ODP_ACTION_ATTR_SET_NW_SRC, ia->nw_addr.nw_addr); ctx->flow.nw_src = ia->nw_addr.nw_addr; break; case OFPAT_SET_NW_DST: - nl_msg_put_be32(ctx->odp_actions, ODPAT_SET_NW_DST, + nl_msg_put_be32(ctx->odp_actions, ODP_ACTION_ATTR_SET_NW_DST, ia->nw_addr.nw_addr); ctx->flow.nw_dst = ia->nw_addr.nw_addr; break; case OFPAT_SET_NW_TOS: - nl_msg_put_u8(ctx->odp_actions, ODPAT_SET_NW_TOS, + nl_msg_put_u8(ctx->odp_actions, ODP_ACTION_ATTR_SET_NW_TOS, ia->nw_tos.nw_tos); ctx->flow.nw_tos = ia->nw_tos.nw_tos; break; case OFPAT_SET_TP_SRC: - nl_msg_put_be16(ctx->odp_actions, ODPAT_SET_TP_SRC, + nl_msg_put_be16(ctx->odp_actions, ODP_ACTION_ATTR_SET_TP_SRC, ia->tp_port.tp_port); ctx->flow.tp_src = ia->tp_port.tp_port; break; case OFPAT_SET_TP_DST: - nl_msg_put_be16(ctx->odp_actions, ODPAT_SET_TP_DST, + nl_msg_put_be16(ctx->odp_actions, ODP_ACTION_ATTR_SET_TP_DST, ia->tp_port.tp_port); ctx->flow.tp_dst = ia->tp_port.tp_port; break; @@ -3389,8 +3384,8 @@ handle_table_stats_request(struct ofconn *ofconn, ? htonl(OFPFW_ALL) : htonl(OVSFW_ALL)); ots->max_entries = htonl(1024 * 1024); /* An arbitrary big number. */ ots->active_count = htonl(classifier_count(&p->cls)); - ots->lookup_count = htonll(0); /* XXX */ - ots->matched_count = htonll(0); /* XXX */ + put_32aligned_be64(&ots->lookup_count, htonll(0)); /* XXX */ + put_32aligned_be64(&ots->matched_count, htonll(0)); /* XXX */ queue_tx(msg, ofconn, ofconn->reply_counter); return 0; @@ -3411,18 +3406,18 @@ append_port_stat(struct ofport *port, struct ofconn *ofconn, ops = append_ofp_stats_reply(sizeof *ops, ofconn, msgp); ops->port_no = htons(port->opp.port_no); memset(ops->pad, 0, sizeof ops->pad); - ops->rx_packets = htonll(stats.rx_packets); - ops->tx_packets = htonll(stats.tx_packets); - ops->rx_bytes = htonll(stats.rx_bytes); - ops->tx_bytes = htonll(stats.tx_bytes); - ops->rx_dropped = htonll(stats.rx_dropped); - ops->tx_dropped = htonll(stats.tx_dropped); - ops->rx_errors = htonll(stats.rx_errors); - ops->tx_errors = htonll(stats.tx_errors); - ops->rx_frame_err = htonll(stats.rx_frame_errors); - ops->rx_over_err = htonll(stats.rx_over_errors); - ops->rx_crc_err = htonll(stats.rx_crc_errors); - ops->collisions = htonll(stats.collisions); + put_32aligned_be64(&ops->rx_packets, htonll(stats.rx_packets)); + put_32aligned_be64(&ops->tx_packets, htonll(stats.tx_packets)); + put_32aligned_be64(&ops->rx_bytes, htonll(stats.rx_bytes)); + put_32aligned_be64(&ops->tx_bytes, htonll(stats.tx_bytes)); + put_32aligned_be64(&ops->rx_dropped, htonll(stats.rx_dropped)); + put_32aligned_be64(&ops->tx_dropped, htonll(stats.tx_dropped)); + put_32aligned_be64(&ops->rx_errors, htonll(stats.rx_errors)); + put_32aligned_be64(&ops->tx_errors, htonll(stats.tx_errors)); + put_32aligned_be64(&ops->rx_frame_err, htonll(stats.rx_frame_errors)); + put_32aligned_be64(&ops->rx_over_err, htonll(stats.rx_over_errors)); + put_32aligned_be64(&ops->rx_crc_err, htonll(stats.rx_crc_errors)); + put_32aligned_be64(&ops->collisions, htonll(stats.collisions)); } static int @@ -3479,7 +3474,7 @@ query_stats(struct ofproto *p, struct rule *rule, ofpbuf_clear(&key); odp_flow_key_from_flow(&key, &facet->flow); - dpif_flow_get(p->dpif, 0, key.data, key.size, NULL, &stats); + dpif_flow_get(p->dpif, key.data, key.size, NULL, &stats); packet_count += stats.n_packets + facet->packet_count; byte_count += stats.n_bytes + facet->byte_count; @@ -3504,6 +3499,7 @@ put_ofp_flow_stats(struct ofconn *ofconn, struct rule *rule, { struct ofp_flow_stats *ofs; uint64_t packet_count, byte_count; + ovs_be64 cookie; size_t act_len, len; if (rule_is_hidden(rule) || !rule_has_out_port(rule, out_port)) { @@ -3520,14 +3516,15 @@ put_ofp_flow_stats(struct ofconn *ofconn, struct rule *rule, ofs->table_id = 0; ofs->pad = 0; ofputil_cls_rule_to_match(&rule->cr, ofconn->flow_format, &ofs->match, - rule->flow_cookie, &ofs->cookie); + rule->flow_cookie, &cookie); + put_32aligned_be64(&ofs->cookie, cookie); calc_flow_duration(rule->created, &ofs->duration_sec, &ofs->duration_nsec); ofs->priority = htons(rule->cr.priority); ofs->idle_timeout = htons(rule->idle_timeout); ofs->hard_timeout = htons(rule->hard_timeout); memset(ofs->pad2, 0, sizeof ofs->pad2); - ofs->packet_count = htonll(packet_count); - ofs->byte_count = htonll(byte_count); + put_32aligned_be64(&ofs->packet_count, htonll(packet_count)); + put_32aligned_be64(&ofs->byte_count, htonll(byte_count)); if (rule->n_actions > 0) { memcpy(ofs->actions, rule->actions, act_len); } @@ -3707,8 +3704,8 @@ query_aggregate_stats(struct ofproto *ofproto, struct cls_rule *target, } oasr->flow_count = htonl(n_flows); - oasr->packet_count = htonll(total_packets); - oasr->byte_count = htonll(total_bytes); + put_32aligned_be64(&oasr->packet_count, htonll(total_packets)); + put_32aligned_be64(&oasr->byte_count, htonll(total_bytes)); memset(oasr->pad, 0, sizeof oasr->pad); } @@ -3781,9 +3778,9 @@ put_queue_stats(struct queue_stats_cbdata *cbdata, uint32_t queue_id, reply->port_no = htons(cbdata->ofport->opp.port_no); memset(reply->pad, 0, sizeof reply->pad); reply->queue_id = htonl(queue_id); - reply->tx_bytes = htonll(stats->tx_bytes); - reply->tx_packets = htonll(stats->tx_packets); - reply->tx_errors = htonll(stats->tx_errors); + put_32aligned_be64(&reply->tx_bytes, htonll(stats->tx_bytes)); + put_32aligned_be64(&reply->tx_packets, htonll(stats->tx_packets)); + put_32aligned_be64(&reply->tx_errors, htonll(stats->tx_errors)); } static void @@ -4379,7 +4376,7 @@ handle_miss_upcall(struct ofproto *p, struct dpif_upcall *upcall) struct ofpbuf odp_actions; ofpbuf_init(&odp_actions, 32); - nl_msg_put_u32(&odp_actions, ODPAT_OUTPUT, ODPP_LOCAL); + nl_msg_put_u32(&odp_actions, ODP_ACTION_ATTR_OUTPUT, ODPP_LOCAL); dpif_execute(p->dpif, odp_actions.data, odp_actions.size, upcall->packet); ofpbuf_uninit(&odp_actions); @@ -4439,13 +4436,13 @@ handle_upcall(struct ofproto *p, struct dpif_upcall *upcall) struct flow flow; switch (upcall->type) { - case _ODPL_ACTION_NR: + case DPIF_UC_ACTION: COVERAGE_INC(ofproto_ctlr_action); odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow); send_packet_in(p, upcall, &flow, false); break; - case _ODPL_SFLOW_NR: + case DPIF_UC_SAMPLE: if (p->sflow) { odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow); ofproto_sflow_received(p->sflow, upcall, &flow); @@ -4453,10 +4450,11 @@ handle_upcall(struct ofproto *p, struct dpif_upcall *upcall) ofpbuf_delete(upcall->packet); break; - case _ODPL_MISS_NR: + case DPIF_UC_MISS: handle_miss_upcall(p, upcall); break; + case DPIF_N_UC_TYPES: default: VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32, upcall->type); break; @@ -4644,36 +4642,18 @@ facet_active_timeout(struct ofproto *ofproto, struct facet *facet) netflow_active_timeout_expired(ofproto->netflow, &facet->nf_flow)) { struct ofexpired expired; - expired.flow = facet->flow; - expired.packet_count = facet->packet_count; - expired.byte_count = facet->byte_count; - expired.used = facet->used; - - /* Get updated flow stats. - * - * XXX We could avoid this call entirely if (1) ofproto_update_used() - * updated TCP flags and (2) the dpif_flow_list_all() in - * ofproto_update_used() zeroed TCP flags. */ if (facet->installed) { - uint32_t keybuf[ODPUTIL_FLOW_KEY_U32S]; struct dpif_flow_stats stats; - struct ofpbuf key; - - ofpbuf_use_stack(&key, keybuf, sizeof keybuf); - odp_flow_key_from_flow(&key, &facet->flow); - - if (!dpif_flow_get(ofproto->dpif, ODPFF_ZERO_TCP_FLAGS, - key.data, key.size, NULL, &stats)) { - expired.packet_count += stats.n_packets; - expired.byte_count += stats.n_bytes; - if (stats.n_packets) { - facet_update_time(ofproto, facet, &stats); - netflow_flow_update_flags(&facet->nf_flow, - stats.tcp_flags); - } - } + + facet_put__(ofproto, facet, facet->actions, facet->actions_len, + &stats); + facet_update_stats(ofproto, facet, &stats); } + expired.flow = facet->flow; + expired.packet_count = facet->packet_count; + expired.byte_count = facet->byte_count; + expired.used = facet->used; netflow_expire(ofproto->netflow, &facet->nf_flow, &expired); } } @@ -4827,9 +4807,10 @@ schedule_packet_in(struct ofconn *ofconn, struct dpif_upcall *upcall, int total_len, send_len; struct ofpbuf *packet; uint32_t buffer_id; + int idx; /* Get OpenFlow buffer_id. */ - if (upcall->type == _ODPL_ACTION_NR) { + if (upcall->type == DPIF_UC_ACTION) { buffer_id = UINT32_MAX; } else if (ofproto->fail_open && fail_open_is_active(ofproto->fail_open)) { buffer_id = pktbuf_get_null(); @@ -4844,7 +4825,7 @@ schedule_packet_in(struct ofconn *ofconn, struct dpif_upcall *upcall, if (buffer_id != UINT32_MAX) { send_len = MIN(send_len, ofconn->miss_send_len); } - if (upcall->type == _ODPL_ACTION_NR) { + if (upcall->type == DPIF_UC_ACTION) { send_len = MIN(send_len, upcall->userdata); } @@ -4863,18 +4844,19 @@ schedule_packet_in(struct ofconn *ofconn, struct dpif_upcall *upcall, opi->header.type = OFPT_PACKET_IN; opi->total_len = htons(total_len); opi->in_port = htons(odp_port_to_ofp_port(flow->in_port)); - opi->reason = upcall->type == _ODPL_MISS_NR ? OFPR_NO_MATCH : OFPR_ACTION; + opi->reason = upcall->type == DPIF_UC_MISS ? OFPR_NO_MATCH : OFPR_ACTION; opi->buffer_id = htonl(buffer_id); update_openflow_length(packet); /* Hand over to packet scheduler. It might immediately call into * do_send_packet_in() or it might buffer it for a while (until a later * call to pinsched_run()). */ - pinsched_send(ofconn->schedulers[opi->reason], flow->in_port, + idx = upcall->type == DPIF_UC_MISS ? 0 : 1; + pinsched_send(ofconn->schedulers[idx], flow->in_port, packet, do_send_packet_in, ofconn); } -/* Given 'upcall', of type _ODPL_ACTION_NR or _ODPL_MISS_NR, sends an +/* Given 'upcall', of type DPIF_UC_ACTION or DPIF_UC_MISS, sends an * OFPT_PACKET_IN message to each OpenFlow controller as necessary according to * their individual configurations. * @@ -5132,7 +5114,7 @@ default_normal_ofhook_cb(const struct flow *flow, const struct ofpbuf *packet, flood_packets(ofproto, flow->in_port, OFPPC_NO_FLOOD, nf_output_iface, odp_actions); } else if (out_port != flow->in_port) { - nl_msg_put_u32(odp_actions, ODPAT_OUTPUT, out_port); + nl_msg_put_u32(odp_actions, ODP_ACTION_ATTR_OUTPUT, out_port); *nf_output_iface = out_port; } else { /* Drop. */