Skip to content

Instantly share code, notes, and snippets.

@mattmacy
Created May 30, 2018 03:15
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save mattmacy/a077cc924e6c0ea3f9e5c59aa53e1c8b to your computer and use it in GitHub Desktop.
Save mattmacy/a077cc924e6c0ea3f9e5c59aa53e1c8b to your computer and use it in GitHub Desktop.
diff --git a/sys/net/iflib.c b/sys/net/iflib.c
index f20593ed890..e7bb53ddcc4 100644
--- a/sys/net/iflib.c
+++ b/sys/net/iflib.c
@@ -149,19 +149,19 @@ static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid);
typedef struct iflib_filter_info {
driver_filter_t *ifi_filter;
- void *ifi_filter_arg;
+ void *ifi_filter_arg;
struct grouptask *ifi_task;
- void *ifi_ctx;
-} *iflib_filter_info_t;
+ void *ifi_ctx;
+} *iflib_filter_info_t;
struct iflib_ctx {
KOBJ_FIELDS;
- /*
- * Pointer to hardware driver's softc
- */
- void *ifc_softc;
+ /*
+ * Pointer to hardware driver's softc
+ */
+ void *ifc_softc;
device_t ifc_dev;
- if_t ifc_ifp;
+ if_t ifc_ifp;
cpuset_t ifc_cpus;
if_shared_ctx_t ifc_sctx;
@@ -177,11 +177,11 @@ struct iflib_ctx {
uint32_t ifc_if_flags;
uint32_t ifc_flags;
uint32_t ifc_max_fl_buf_size;
- int ifc_in_detach;
+ int ifc_in_detach;
- int ifc_link_state;
- int ifc_link_irq;
- int ifc_watchdog_events;
+ int ifc_link_state;
+ int ifc_link_irq;
+ int ifc_watchdog_events;
struct cdev *ifc_led_dev;
struct resource *ifc_msix_mem;
@@ -189,7 +189,7 @@ struct iflib_ctx {
struct grouptask ifc_admin_task;
struct grouptask ifc_vflr_task;
struct iflib_filter_info ifc_filter_info;
- struct ifmedia ifc_media;
+ struct ifmedia ifc_media;
struct sysctl_oid *ifc_sysctl_node;
uint16_t ifc_sysctl_ntxqs;
@@ -197,8 +197,8 @@ struct iflib_ctx {
uint16_t ifc_sysctl_qs_eq_override;
uint16_t ifc_sysctl_rx_budget;
- qidx_t ifc_sysctl_ntxds[8];
- qidx_t ifc_sysctl_nrxds[8];
+ qidx_t ifc_sysctl_ntxds[8];
+ qidx_t ifc_sysctl_nrxds[8];
struct if_txrx ifc_txrx;
#define isc_txd_encap ifc_txrx.ift_txd_encap
#define isc_txd_flush ifc_txrx.ift_txd_flush
@@ -212,12 +212,12 @@ struct iflib_ctx {
#define isc_legacy_intr ifc_txrx.ift_legacy_intr
eventhandler_tag ifc_vlan_attach_event;
eventhandler_tag ifc_vlan_detach_event;
- uint8_t ifc_mac[ETHER_ADDR_LEN];
- char ifc_mtx_name[16];
+ uint8_t ifc_mac[ETHER_ADDR_LEN];
+ char ifc_mtx_name[16];
};
-void *
+void *
iflib_get_softc(if_ctx_t ctx)
{
@@ -293,16 +293,16 @@ iflib_get_sctx(if_ctx_t ctx)
#define M_TOOBIG M_PROTO1
typedef struct iflib_sw_rx_desc_array {
- bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */
- struct mbuf **ifsd_m; /* pkthdr mbufs */
- caddr_t *ifsd_cl; /* direct cluster pointer for rx */
- uint8_t *ifsd_flags;
+ bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */
+ struct mbuf **ifsd_m; /* pkthdr mbufs */
+ caddr_t *ifsd_cl; /* direct cluster pointer for rx */
+ uint8_t *ifsd_flags;
} iflib_rxsd_array_t;
typedef struct iflib_sw_tx_desc_array {
- bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */
- struct mbuf **ifsd_m; /* pkthdr mbufs */
- uint8_t *ifsd_flags;
+ bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */
+ struct mbuf **ifsd_m; /* pkthdr mbufs */
+ uint8_t *ifsd_flags;
} if_txsd_vec_t;
@@ -331,90 +331,90 @@ typedef struct iflib_sw_tx_desc_array {
CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
struct iflib_txq {
- qidx_t ift_in_use;
- qidx_t ift_cidx;
- qidx_t ift_cidx_processed;
- qidx_t ift_pidx;
- uint8_t ift_gen;
- uint8_t ift_br_offset;
- uint16_t ift_npending;
- uint16_t ift_db_pending;
- uint16_t ift_rs_pending;
+ qidx_t ift_in_use;
+ qidx_t ift_cidx;
+ qidx_t ift_cidx_processed;
+ qidx_t ift_pidx;
+ uint8_t ift_gen;
+ uint8_t ift_br_offset;
+ uint16_t ift_npending;
+ uint16_t ift_db_pending;
+ uint16_t ift_rs_pending;
/* implicit pad */
- uint8_t ift_txd_size[8];
- uint64_t ift_processed;
- uint64_t ift_cleaned;
- uint64_t ift_cleaned_prev;
+ uint8_t ift_txd_size[8];
+ uint64_t ift_processed;
+ uint64_t ift_cleaned;
+ uint64_t ift_cleaned_prev;
#if MEMORY_LOGGING
- uint64_t ift_enqueued;
- uint64_t ift_dequeued;
+ uint64_t ift_enqueued;
+ uint64_t ift_dequeued;
#endif
- uint64_t ift_no_tx_dma_setup;
- uint64_t ift_no_desc_avail;
- uint64_t ift_mbuf_defrag_failed;
- uint64_t ift_mbuf_defrag;
- uint64_t ift_map_failed;
- uint64_t ift_txd_encap_efbig;
- uint64_t ift_pullups;
+ uint64_t ift_no_tx_dma_setup;
+ uint64_t ift_no_desc_avail;
+ uint64_t ift_mbuf_defrag_failed;
+ uint64_t ift_mbuf_defrag;
+ uint64_t ift_map_failed;
+ uint64_t ift_txd_encap_efbig;
+ uint64_t ift_pullups;
- struct mtx ift_mtx;
- struct mtx ift_db_mtx;
+ struct mtx ift_mtx;
+ struct mtx ift_db_mtx;
/* constant values */
- if_ctx_t ift_ctx;
- struct ifmp_ring *ift_br;
- struct grouptask ift_task;
- qidx_t ift_size;
- uint16_t ift_id;
- struct callout ift_timer;
-
- if_txsd_vec_t ift_sds;
- uint8_t ift_qstatus;
- uint8_t ift_closed;
- uint8_t ift_update_freq;
+ if_ctx_t ift_ctx;
+ struct ifmp_ring *ift_br;
+ struct grouptask ift_task;
+ qidx_t ift_size;
+ uint16_t ift_id;
+ struct callout ift_timer;
+
+ if_txsd_vec_t ift_sds;
+ uint8_t ift_qstatus;
+ uint8_t ift_closed;
+ uint8_t ift_update_freq;
struct iflib_filter_info ift_filter_info;
- bus_dma_tag_t ift_desc_tag;
- bus_dma_tag_t ift_tso_desc_tag;
- iflib_dma_info_t ift_ifdi;
+ bus_dma_tag_t ift_desc_tag;
+ bus_dma_tag_t ift_tso_desc_tag;
+ iflib_dma_info_t ift_ifdi;
#define MTX_NAME_LEN 16
- char ift_mtx_name[MTX_NAME_LEN];
- char ift_db_mtx_name[MTX_NAME_LEN];
- bus_dma_segment_t ift_segs[IFLIB_MAX_TX_SEGS] __aligned(CACHE_LINE_SIZE);
+ char ift_mtx_name[MTX_NAME_LEN];
+ char ift_db_mtx_name[MTX_NAME_LEN];
+ bus_dma_segment_t ift_segs[IFLIB_MAX_TX_SEGS] __aligned(CACHE_LINE_SIZE);
#ifdef IFLIB_DIAGNOSTICS
uint64_t ift_cpu_exec_count[256];
#endif
-} __aligned(CACHE_LINE_SIZE);
+} __aligned(CACHE_LINE_SIZE);
struct iflib_fl {
- qidx_t ifl_cidx;
- qidx_t ifl_pidx;
- qidx_t ifl_credits;
- uint8_t ifl_gen;
- uint8_t ifl_rxd_size;
+ qidx_t ifl_cidx;
+ qidx_t ifl_pidx;
+ qidx_t ifl_credits;
+ uint8_t ifl_gen;
+ uint8_t ifl_rxd_size;
#if MEMORY_LOGGING
- uint64_t ifl_m_enqueued;
- uint64_t ifl_m_dequeued;
- uint64_t ifl_cl_enqueued;
- uint64_t ifl_cl_dequeued;
+ uint64_t ifl_m_enqueued;
+ uint64_t ifl_m_dequeued;
+ uint64_t ifl_cl_enqueued;
+ uint64_t ifl_cl_dequeued;
#endif
/* implicit pad */
- bitstr_t *ifl_rx_bitmap;
- qidx_t ifl_fragidx;
+ bitstr_t *ifl_rx_bitmap;
+ qidx_t ifl_fragidx;
/* constant */
- qidx_t ifl_size;
- uint16_t ifl_buf_size;
- uint16_t ifl_cltype;
- uma_zone_t ifl_zone;
- iflib_rxsd_array_t ifl_sds;
- iflib_rxq_t ifl_rxq;
- uint8_t ifl_id;
- bus_dma_tag_t ifl_desc_tag;
- iflib_dma_info_t ifl_ifdi;
- uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE);
- caddr_t ifl_vm_addrs[IFLIB_MAX_RX_REFRESH];
+ qidx_t ifl_size;
+ uint16_t ifl_buf_size;
+ uint16_t ifl_cltype;
+ uma_zone_t ifl_zone;
+ iflib_rxsd_array_t ifl_sds;
+ iflib_rxq_t ifl_rxq;
+ uint8_t ifl_id;
+ bus_dma_tag_t ifl_desc_tag;
+ iflib_dma_info_t ifl_ifdi;
+ uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE);
+ caddr_t ifl_vm_addrs[IFLIB_MAX_RX_REFRESH];
qidx_t ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH];
-} __aligned(CACHE_LINE_SIZE);
+} __aligned(CACHE_LINE_SIZE);
static inline qidx_t
get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen)
@@ -441,42 +441,45 @@ get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen)
((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
struct iflib_rxq {
- /* If there is a separate completion queue -
- * these are the cq cidx and pidx. Otherwise
- * these are unused.
+ /*
+ * If there is a separate completion queue - these are the cq cidx
+ * and pidx. Otherwise these are unused.
*/
- qidx_t ifr_size;
- qidx_t ifr_cq_cidx;
- qidx_t ifr_cq_pidx;
- uint8_t ifr_cq_gen;
- uint8_t ifr_fl_offset;
-
- if_ctx_t ifr_ctx;
- iflib_fl_t ifr_fl;
- uint64_t ifr_rx_irq;
- uint16_t ifr_id;
- uint8_t ifr_lro_enabled;
- uint8_t ifr_nfl;
- uint8_t ifr_ntxqirq;
- uint8_t ifr_txqid[IFLIB_MAX_TX_SHARED_INTR];
- struct lro_ctrl ifr_lc;
- struct grouptask ifr_task;
+ qidx_t ifr_size;
+ qidx_t ifr_cq_cidx;
+ qidx_t ifr_cq_pidx;
+ uint8_t ifr_cq_gen;
+ uint8_t ifr_fl_offset;
+
+ if_ctx_t ifr_ctx;
+ iflib_fl_t ifr_fl;
+ uint64_t ifr_rx_irq;
+ uint16_t ifr_id;
+ uint8_t ifr_lro_enabled;
+ uint8_t ifr_nfl;
+ uint8_t ifr_ntxqirq;
+ uint8_t ifr_txqid[IFLIB_MAX_TX_SHARED_INTR];
+ struct lro_ctrl ifr_lc;
+ struct grouptask ifr_task;
struct iflib_filter_info ifr_filter_info;
- iflib_dma_info_t ifr_ifdi;
+ iflib_dma_info_t ifr_ifdi;
- /* dynamically allocate if any drivers need a value substantially larger than this */
- struct if_rxd_frag ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE);
+ /*
+ * dynamically allocate if any drivers need a value substantially
+ * larger than this
+ */
+ struct if_rxd_frag ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE);
#ifdef IFLIB_DIAGNOSTICS
uint64_t ifr_cpu_exec_count[256];
#endif
-} __aligned(CACHE_LINE_SIZE);
+} __aligned(CACHE_LINE_SIZE);
typedef struct if_rxsd {
caddr_t *ifsd_cl;
struct mbuf **ifsd_m;
iflib_fl_t ifsd_fl;
- qidx_t ifsd_cidx;
-} *if_rxsd_t;
+ qidx_t ifsd_cidx;
+} *if_rxsd_t;
/* multiple of word size */
#ifdef __LP64__
@@ -493,10 +496,10 @@ typedef struct if_rxsd {
typedef struct if_pkt_info_pad {
PKT_TYPE pkt_val[PKT_INFO_SIZE];
-} *if_pkt_info_pad_t;
+} *if_pkt_info_pad_t;
typedef struct if_rxd_info_pad {
PKT_TYPE rxd_val[RXD_INFO_SIZE];
-} *if_rxd_info_pad_t;
+} *if_rxd_info_pad_t;
CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info));
CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info));
@@ -508,12 +511,19 @@ pkt_info_zero(if_pkt_info_t pi)
if_pkt_info_pad_t pi_pad;
pi_pad = (if_pkt_info_pad_t)pi;
- pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0;
- pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0;
+ pi_pad->pkt_val[0] = 0;
+ pi_pad->pkt_val[1] = 0;
+ pi_pad->pkt_val[2] = 0;
+ pi_pad->pkt_val[3] = 0;
+ pi_pad->pkt_val[4] = 0;
+ pi_pad->pkt_val[5] = 0;
#ifndef __LP64__
- pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0;
- pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0;
-#endif
+ pi_pad->pkt_val[6] = 0;
+ pi_pad->pkt_val[7] = 0;
+ pi_pad->pkt_val[8] = 0;
+ pi_pad->pkt_val[9] = 0;
+ pi_pad->pkt_val[10] = 0;
+#endif
}
static device_method_t iflib_pseudo_methods[] = {
@@ -535,12 +545,12 @@ rxd_info_zero(if_rxd_info_t ri)
ri_pad = (if_rxd_info_pad_t)ri;
for (i = 0; i < RXD_LOOP_BOUND; i += 4) {
ri_pad->rxd_val[i] = 0;
- ri_pad->rxd_val[i+1] = 0;
- ri_pad->rxd_val[i+2] = 0;
- ri_pad->rxd_val[i+3] = 0;
+ ri_pad->rxd_val[i + 1] = 0;
+ ri_pad->rxd_val[i + 2] = 0;
+ ri_pad->rxd_val[i + 3] = 0;
}
#ifdef __LP64__
- ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0;
+ ri_pad->rxd_val[RXD_INFO_SIZE - 1] = 0;
#endif
}
@@ -570,7 +580,7 @@ rxd_info_zero(if_rxd_info_t ri)
/* Our boot-time initialization hook */
-static int iflib_module_event_handler(module_t, int, void *);
+static int iflib_module_event_handler(module_t, int, void *);
static moduledata_t iflib_moduledata = {
"iflib",
@@ -592,21 +602,23 @@ TASKQGROUP_DEFINE(if_config_tqg, 1, 1);
#define IFLIB_DEBUG_COUNTERS 1
#else
#define IFLIB_DEBUG_COUNTERS 0
-#endif /* !INVARIANTS */
+#endif /* !INVARIANTS */
#endif
static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD, 0,
- "iflib driver parameters");
+ "iflib driver parameters");
/*
- * XXX need to ensure that this can't accidentally cause the head to be moved backwards
+ * XXX need to ensure that this can't accidentally cause the head to be moved backwards
*/
static int iflib_min_tx_latency = 0;
+
SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW,
- &iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput");
+ &iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput");
static int iflib_no_tx_batch = 0;
+
SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW,
- &iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput");
+ &iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput");
#if IFLIB_DEBUG_COUNTERS
@@ -620,19 +632,19 @@ static int iflib_fl_refills_large;
static int iflib_tx_frees;
SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD,
- &iflib_tx_seen, 0, "# tx mbufs seen");
+ &iflib_tx_seen, 0, "# tx mbufs seen");
SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD,
- &iflib_tx_sent, 0, "# tx mbufs sent");
+ &iflib_tx_sent, 0, "# tx mbufs sent");
SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD,
- &iflib_tx_encap, 0, "# tx mbufs encapped");
+ &iflib_tx_encap, 0, "# tx mbufs encapped");
SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD,
- &iflib_tx_frees, 0, "# tx frees");
+ &iflib_tx_frees, 0, "# tx frees");
SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD,
- &iflib_rx_allocs, 0, "# rx allocations");
+ &iflib_rx_allocs, 0, "# rx allocations");
SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD,
- &iflib_fl_refills, 0, "# refills");
+ &iflib_fl_refills, 0, "# refills");
SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD,
- &iflib_fl_refills_large, 0, "# large refills");
+ &iflib_fl_refills_large, 0, "# large refills");
static int iflib_txq_drain_flushing;
@@ -641,13 +653,13 @@ static int iflib_txq_drain_notready;
static int iflib_txq_drain_encapfail;
SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD,
- &iflib_txq_drain_flushing, 0, "# drain flushes");
+ &iflib_txq_drain_flushing, 0, "# drain flushes");
SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD,
- &iflib_txq_drain_oactive, 0, "# drain oactives");
+ &iflib_txq_drain_oactive, 0, "# drain oactives");
SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD,
- &iflib_txq_drain_notready, 0, "# drain notready");
+ &iflib_txq_drain_notready, 0, "# drain notready");
SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_encapfail, CTLFLAG_RD,
- &iflib_txq_drain_encapfail, 0, "# drain encap fails");
+ &iflib_txq_drain_encapfail, 0, "# drain encap fails");
static int iflib_encap_load_mbuf_fail;
@@ -656,19 +668,19 @@ static int iflib_encap_txq_avail_fail;
static int iflib_encap_txd_encap_fail;
SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD,
- &iflib_encap_load_mbuf_fail, 0, "# busdma load failures");
+ &iflib_encap_load_mbuf_fail, 0, "# busdma load failures");
SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD,
- &iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures");
+ &iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures");
SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD,
- &iflib_encap_txq_avail_fail, 0, "# txq avail failures");
+ &iflib_encap_txq_avail_fail, 0, "# txq avail failures");
SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD,
- &iflib_encap_txd_encap_fail, 0, "# driver encap failures");
+ &iflib_encap_txd_encap_fail, 0, "# driver encap failures");
static int iflib_task_fn_rxs;
static int iflib_rx_intr_enables;
static int iflib_fast_intrs;
static int iflib_intr_link;
-static int iflib_intr_msix;
+static int iflib_intr_msix;
static int iflib_rx_unavail;
static int iflib_rx_ctx_inactive;
static int iflib_rx_zero_len;
@@ -679,49 +691,52 @@ static int iflib_rxd_flush;
static int iflib_verbose_debug;
SYSCTL_INT(_net_iflib, OID_AUTO, intr_link, CTLFLAG_RD,
- &iflib_intr_link, 0, "# intr link calls");
+ &iflib_intr_link, 0, "# intr link calls");
SYSCTL_INT(_net_iflib, OID_AUTO, intr_msix, CTLFLAG_RD,
- &iflib_intr_msix, 0, "# intr msix calls");
+ &iflib_intr_msix, 0, "# intr msix calls");
SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD,
- &iflib_task_fn_rxs, 0, "# task_fn_rx calls");
+ &iflib_task_fn_rxs, 0, "# task_fn_rx calls");
SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD,
- &iflib_rx_intr_enables, 0, "# rx intr enables");
+ &iflib_rx_intr_enables, 0, "# rx intr enables");
SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD,
- &iflib_fast_intrs, 0, "# fast_intr calls");
+ &iflib_fast_intrs, 0, "# fast_intr calls");
SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD,
- &iflib_rx_unavail, 0, "# times rxeof called with no available data");
+ &iflib_rx_unavail, 0, "# times rxeof called with no available data");
SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD,
- &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context");
+ &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context");
SYSCTL_INT(_net_iflib, OID_AUTO, rx_zero_len, CTLFLAG_RD,
- &iflib_rx_zero_len, 0, "# times rxeof saw zero len mbuf");
+ &iflib_rx_zero_len, 0, "# times rxeof saw zero len mbuf");
SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD,
- &iflib_rx_if_input, 0, "# times rxeof called if_input");
+ &iflib_rx_if_input, 0, "# times rxeof called if_input");
SYSCTL_INT(_net_iflib, OID_AUTO, rx_mbuf_null, CTLFLAG_RD,
- &iflib_rx_mbuf_null, 0, "# times rxeof got null mbuf");
+ &iflib_rx_mbuf_null, 0, "# times rxeof got null mbuf");
SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD,
- &iflib_rxd_flush, 0, "# times rxd_flush called");
+ &iflib_rxd_flush, 0, "# times rxd_flush called");
SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW,
- &iflib_verbose_debug, 0, "enable verbose debugging");
+ &iflib_verbose_debug, 0, "enable verbose debugging");
#define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1)
static void
iflib_debug_reset(void)
{
iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs =
- iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees =
- iflib_txq_drain_flushing = iflib_txq_drain_oactive =
- iflib_txq_drain_notready = iflib_txq_drain_encapfail =
- iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail =
- iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail =
- iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs =
- iflib_intr_link = iflib_intr_msix = iflib_rx_unavail =
- iflib_rx_ctx_inactive = iflib_rx_zero_len = iflib_rx_if_input =
- iflib_rx_mbuf_null = iflib_rxd_flush = 0;
+ iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees =
+ iflib_txq_drain_flushing = iflib_txq_drain_oactive =
+ iflib_txq_drain_notready = iflib_txq_drain_encapfail =
+ iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail =
+ iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail =
+ iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs =
+ iflib_intr_link = iflib_intr_msix = iflib_rx_unavail =
+ iflib_rx_ctx_inactive = iflib_rx_zero_len = iflib_rx_if_input =
+ iflib_rx_mbuf_null = iflib_rxd_flush = 0;
}
#else
#define DBG_COUNTER_INC(name)
-static void iflib_debug_reset(void) {}
+static void
+iflib_debug_reset(void)
+{
+}
#endif
#define IFLIB_DEBUG 0
@@ -744,7 +759,7 @@ static void iflib_ifmp_purge(iflib_txq_t txq);
static void _iflib_pre_assert(if_softc_ctx_t scctx);
static void iflib_if_init_locked(if_ctx_t ctx);
#ifndef __NO_STRICT_ALIGNMENT
-static struct mbuf * iflib_fixup_rx(struct mbuf *m);
+static struct mbuf *iflib_fixup_rx(struct mbuf *m);
#endif
NETDUMP_DEFINE(iflib);
@@ -774,11 +789,13 @@ SYSCTL_DECL(_dev_netmap);
* The xl driver by default strips CRCs and we do not override it.
*/
-int iflib_crcstrip = 1;
+int iflib_crcstrip = 1;
+
SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip,
CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on rx frames");
-int iflib_rx_miss, iflib_rx_miss_bufs;
+int iflib_rx_miss, iflib_rx_miss_bufs;
+
SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss,
CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed rx intr");
SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs,
@@ -812,8 +829,9 @@ iflib_netmap_register(struct netmap_adapter *na, int onoff)
}
iflib_stop(ctx);
iflib_init_locked(ctx);
- IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ?
- status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1;
+ IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip);
+ //XXX why twice ?
+ status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1;
if (status)
nm_clear_native_flags(na);
CTX_UNLOCK(ctx);
@@ -835,7 +853,7 @@ netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, boo
if (nm_i == head && __predict_true(!init))
return 0;
- iru_init(&iru, rxq, 0 /* flid */);
+ iru_init(&iru, rxq, 0 /* flid */ );
map = fl->ifl_sds.ifsd_map;
refill_pidx = netmap_idx_k2n(kring, nm_i);
/*
@@ -849,12 +867,13 @@ netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, boo
struct netmap_slot *slot = &ring->slot[nm_i];
void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[tmp_pidx]);
uint32_t nic_i_dma = refill_pidx;
+
nic_i = netmap_idx_k2n(kring, nm_i);
MPASS(tmp_pidx < IFLIB_MAX_RX_REFRESH);
- if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
- return netmap_ring_reinit(kring);
+ if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
+ return netmap_ring_reinit(kring);
fl->ifl_vm_addrs[tmp_pidx] = addr;
if (__predict_false(init) && map) {
@@ -867,11 +886,11 @@ netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, boo
nm_i = nm_next(nm_i, lim);
fl->ifl_rxd_idxs[tmp_pidx] = nic_i = nm_next(nic_i, lim);
- if (nm_i != head && tmp_pidx < IFLIB_MAX_RX_REFRESH-1)
+ if (nm_i != head && tmp_pidx < IFLIB_MAX_RX_REFRESH - 1)
continue;
iru.iru_pidx = refill_pidx;
- iru.iru_count = tmp_pidx+1;
+ iru.iru_count = tmp_pidx + 1;
ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
refill_pidx = nic_i;
@@ -880,8 +899,11 @@ netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, boo
for (int n = 0; n < iru.iru_count; n++) {
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, map[nic_i_dma],
- BUS_DMASYNC_PREREAD);
- /* XXX - change this to not use the netmap func*/
+ BUS_DMASYNC_PREREAD);
+ /*
+ * XXX - change this to not use the netmap
+ * func
+ */
nic_i_dma = nm_next(nic_i_dma, lim);
}
}
@@ -890,7 +912,7 @@ netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, boo
if (map)
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
if (__predict_true(nic_i != UINT_MAX))
ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i);
return (0);
@@ -916,8 +938,8 @@ iflib_netmap_txsync(struct netmap_kring *kring, int flags)
struct netmap_adapter *na = kring->na;
struct ifnet *ifp = na->ifp;
struct netmap_ring *ring = kring->ring;
- u_int nm_i; /* index into the netmap ring */
- u_int nic_i; /* index into the NIC ring */
+ u_int nm_i; /* index into the netmap ring */
+ u_int nic_i; /* index into the NIC ring */
u_int n;
u_int const lim = kring->nkr_num_slots - 1;
u_int const head = kring->rhead;
@@ -928,13 +950,14 @@ iflib_netmap_txsync(struct netmap_kring *kring, int flags)
* them every half ring, or where NS_REPORT is set
*/
u_int report_frequency = kring->nkr_num_slots >> 1;
+
/* device-specific */
if_ctx_t ctx = ifp->if_softc;
iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id];
if (txq->ift_sds.ifsd_map)
bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/*
@@ -960,7 +983,7 @@ iflib_netmap_txsync(struct netmap_kring *kring, int flags)
*/
nm_i = netmap_idx_n2k(kring, kring->nr_hwcur);
- if (nm_i != head) { /* we have new packets to send */
+ if (nm_i != head) { /* we have new packets to send */
pkt_info_zero(&pi);
pi.ipi_segs = txq->ift_segs;
pi.ipi_qsidx = kring->ring_id;
@@ -977,8 +1000,8 @@ iflib_netmap_txsync(struct netmap_kring *kring, int flags)
uint64_t paddr;
void *addr = PNMB(na, slot, &paddr);
int flags = (slot->flags & NS_REPORT ||
- nic_i == 0 || nic_i == report_frequency) ?
- IPI_TX_INTR : 0;
+ nic_i == 0 || nic_i == report_frequency) ?
+ IPI_TX_INTR : 0;
/* device-specific */
pi.ipi_len = len;
@@ -1004,9 +1027,12 @@ iflib_netmap_txsync(struct netmap_kring *kring, int flags)
/* buffer has changed, reload map */
netmap_reload_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[nic_i], addr);
}
- /* make sure changes to the buffer are synced */
+ /*
+ * make sure changes to the buffer are
+ * synced
+ */
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_sds.ifsd_map[nic_i],
- BUS_DMASYNC_PREWRITE);
+ BUS_DMASYNC_PREWRITE);
}
slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
nm_i = nm_next(nm_i, lim);
@@ -1017,12 +1043,11 @@ iflib_netmap_txsync(struct netmap_kring *kring, int flags)
/* synchronize the NIC ring */
if (txq->ift_sds.ifsd_map)
bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* (re)start the tx unit up to slot nic_i (excluded) */
ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i);
}
-
/*
* Second part: reclaim buffers for completed transmissions.
*
@@ -1036,8 +1061,7 @@ iflib_netmap_txsync(struct netmap_kring *kring, int flags)
/* some tx completed, increment avail */
nic_i = txq->ift_cidx_processed;
kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
- }
- else {
+ } else {
if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) {
DELAY(1);
GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txq->ift_id].ift_task);
@@ -1065,8 +1089,8 @@ iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
{
struct netmap_adapter *na = kring->na;
struct netmap_ring *ring = kring->ring;
- uint32_t nm_i; /* index into the netmap ring */
- uint32_t nic_i; /* index into the NIC ring */
+ uint32_t nm_i; /* index into the netmap ring */
+ uint32_t nic_i; /* index into the NIC ring */
u_int i, n;
u_int const lim = kring->nkr_num_slots - 1;
u_int const head = netmap_idx_n2k(kring, kring->rhead);
@@ -1077,6 +1101,7 @@ iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
if_ctx_t ctx = ifp->if_softc;
iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id];
iflib_fl_t fl = rxq->ifr_fl;
+
if (head > lim)
return netmap_ring_reinit(kring);
@@ -1085,7 +1110,7 @@ iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
if (fl->ifl_sds.ifsd_map == NULL)
continue;
bus_dmamap_sync(rxq->ifr_fl[i].ifl_desc_tag, fl->ifl_ifdi->idi_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
}
/*
* First part: import newly received packets.
@@ -1123,14 +1148,14 @@ iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
ring->slot[nm_i].flags = 0;
if (fl->ifl_sds.ifsd_map)
bus_dmamap_sync(fl->ifl_ifdi->idi_tag,
- fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD);
+ fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD);
nm_i = nm_next(nm_i, lim);
nic_i = nm_next(nic_i, lim);
}
- if (n) { /* update the state variables */
+ if (n) { /* update the state variables */
if (netmap_no_pendintr && !force_update) {
/* diagnostics */
- iflib_rx_miss ++;
+ iflib_rx_miss++;
iflib_rx_miss_bufs += n;
}
fl->ifl_cidx = nic_i;
@@ -1215,6 +1240,7 @@ iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq)
* netmap slot index, si
*/
int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i);
+
netmap_load_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si));
}
}
@@ -1251,14 +1277,14 @@ iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq)
static __inline void
prefetch(void *x)
{
- __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
+ __asm volatile ("prefetcht0 %0"::"m" (*(unsigned long *)x));
}
static __inline void
prefetch2cachelines(void *x)
{
- __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
+ __asm volatile ("prefetcht0 %0"::"m" (*(unsigned long *)x));
#if (CACHE_LINE_SIZE < 128)
- __asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long)))));
+ __asm volatile ("prefetcht0 %0"::"m" (*(((unsigned long *)x) + CACHE_LINE_SIZE / (sizeof(unsigned long)))));
#endif
}
#else
@@ -1271,8 +1297,8 @@ iflib_gen_mac(if_ctx_t ctx)
{
struct thread *td;
MD5_CTX mdctx;
- char uuid[HOSTUUIDLEN+1];
- char buf[HOSTUUIDLEN+16];
+ char uuid[HOSTUUIDLEN + 1];
+ char buf[HOSTUUIDLEN + 16];
uint8_t *mac;
unsigned char digest[16];
@@ -1280,7 +1306,7 @@ iflib_gen_mac(if_ctx_t ctx)
mac = ctx->ifc_mac;
uuid[HOSTUUIDLEN] = 0;
bcopy(td->td_ucred->cr_prison->pr_hostuuid, uuid, HOSTUUIDLEN);
- snprintf(buf, HOSTUUIDLEN+16, "%s-%s", uuid, device_get_nameunit(ctx->ifc_dev));
+ snprintf(buf, HOSTUUIDLEN + 16, "%s-%s", uuid, device_get_nameunit(ctx->ifc_dev));
/*
* Generate a pseudo-random, deterministic MAC
* address based on the UUID and unit number.
@@ -1317,7 +1343,7 @@ _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
{
if (err)
return;
- *(bus_addr_t *) arg = segs[0].ds_addr;
+ *(bus_addr_t *)arg = segs[0].ds_addr;
}
int
@@ -1329,26 +1355,25 @@ iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags)
KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized"));
- err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
- sctx->isc_q_align, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- size, /* maxsize */
- 1, /* nsegments */
- size, /* maxsegsize */
- BUS_DMA_ALLOCNOW, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockarg */
- &dma->idi_tag);
+ err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
+ sctx->isc_q_align, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ size, /* maxsize */
+ 1, /* nsegments */
+ size, /* maxsegsize */
+ BUS_DMA_ALLOCNOW, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockarg */
+ &dma->idi_tag);
if (err) {
device_printf(dev,
"%s: bus_dma_tag_create failed: %d\n",
__func__, err);
goto fail_0;
}
-
- err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr,
+ err = bus_dmamem_alloc(dma->idi_tag, (void **)&dma->idi_vaddr,
BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map);
if (err) {
device_printf(dev,
@@ -1356,7 +1381,6 @@ iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags)
__func__, (uintmax_t)size, err);
goto fail_1;
}
-
dma->idi_paddr = IF_BAD_DMA;
err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr,
size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT);
@@ -1366,7 +1390,6 @@ iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags)
__func__, err);
goto fail_2;
}
-
dma->idi_size = size;
return (0);
@@ -1445,7 +1468,7 @@ iflib_record_started(void *arg)
}
SYSINIT(iflib_record_started, SI_SUB_SMP + 1, SI_ORDER_FIRST,
- iflib_record_started, NULL);
+ iflib_record_started, NULL);
#endif
static int
@@ -1453,6 +1476,7 @@ iflib_fast_intr(void *arg)
{
iflib_filter_info_t info = arg;
struct grouptask *gtask = info->ifi_task;
+
if (!iflib_started)
return (FILTER_HANDLED);
@@ -1523,8 +1547,8 @@ iflib_fast_intr_ctx(void *arg)
static int
_iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
- driver_filter_t filter, driver_intr_t handler, void *arg,
- char *name)
+ driver_filter_t filter, driver_intr_t handler, void *arg,
+ char *name)
{
int rc, flags;
struct resource *res;
@@ -1545,11 +1569,11 @@ _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
irq->ii_res = res;
KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL"));
rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET,
- filter, handler, arg, &tag);
+ filter, handler, arg, &tag);
if (rc != 0) {
device_printf(dev,
"failed to setup interrupt for rid %d, name %s: %d\n",
- rid, name ? name : "unknown", rc);
+ rid, name ? name : "unknown", rc);
return (rc);
} else if (name)
bus_describe_intr(dev, res, tag, "%s", name);
@@ -1586,65 +1610,63 @@ iflib_txsd_alloc(iflib_txq_t txq)
* Setup DMA descriptor areas.
*/
if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
- 1, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- sctx->isc_tx_maxsize, /* maxsize */
- nsegments, /* nsegments */
- sctx->isc_tx_maxsegsize, /* maxsegsize */
- 0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &txq->ift_desc_tag))) {
- device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err);
- device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n",
+ 1, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ sctx->isc_tx_maxsize, /* maxsize */
+ nsegments, /* nsegments */
+ sctx->isc_tx_maxsegsize, /* maxsegsize */
+ 0, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &txq->ift_desc_tag))) {
+ device_printf(dev, "Unable to allocate TX DMA tag: %d\n", err);
+ device_printf(dev, "maxsize: %ju nsegments: %d maxsegsize: %ju\n",
(uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize);
goto fail;
}
if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
- 1, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- scctx->isc_tx_tso_size_max, /* maxsize */
- ntsosegments, /* nsegments */
- scctx->isc_tx_tso_segsize_max, /* maxsegsize */
- 0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &txq->ift_tso_desc_tag))) {
- device_printf(dev,"Unable to allocate TX TSO DMA tag: %d\n", err);
+ 1, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ scctx->isc_tx_tso_size_max, /* maxsize */
+ ntsosegments, /* nsegments */
+ scctx->isc_tx_tso_segsize_max, /* maxsegsize */
+ 0, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &txq->ift_tso_desc_tag))) {
+ device_printf(dev, "Unable to allocate TX TSO DMA tag: %d\n", err);
goto fail;
}
if (!(txq->ift_sds.ifsd_flags =
- (uint8_t *) malloc(sizeof(uint8_t) *
+ (uint8_t *)malloc(sizeof(uint8_t) *
scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
err = ENOMEM;
goto fail;
}
if (!(txq->ift_sds.ifsd_m =
- (struct mbuf **) malloc(sizeof(struct mbuf *) *
+ (struct mbuf **)malloc(sizeof(struct mbuf *) *
scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
err = ENOMEM;
goto fail;
}
-
- /* Create the descriptor buffer dma maps */
+ /* Create the descriptor buffer dma maps */
#if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__)))
if ((ctx->ifc_flags & IFC_DMAR) == 0)
return (0);
if (!(txq->ift_sds.ifsd_map =
- (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
+ (bus_dmamap_t *)malloc(sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer map memory\n");
err = ENOMEM;
goto fail;
}
-
for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) {
err = bus_dmamap_create(txq->ift_desc_tag, 0, &txq->ift_sds.ifsd_map[i]);
if (err != 0) {
@@ -1715,10 +1737,10 @@ iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i)
if (txq->ift_sds.ifsd_map != NULL) {
bus_dmamap_sync(txq->ift_desc_tag,
- txq->ift_sds.ifsd_map[i],
- BUS_DMASYNC_POSTWRITE);
+ txq->ift_sds.ifsd_map[i],
+ BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(txq->ift_desc_tag,
- txq->ift_sds.ifsd_map[i]);
+ txq->ift_sds.ifsd_map[i]);
}
m_free(*mp);
DBG_COUNTER_INC(tx_frees);
@@ -1749,7 +1771,7 @@ iflib_txq_setup(iflib_txq_t txq)
IFDI_TXQ_SETUP(ctx, txq->ift_id);
for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++)
bus_dmamap_sync(di->idi_tag, di->idi_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
@@ -1769,65 +1791,64 @@ iflib_rxsd_alloc(iflib_rxq_t rxq)
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
device_t dev = ctx->ifc_dev;
iflib_fl_t fl;
- int err;
+ int err;
MPASS(scctx->isc_nrxd[0] > 0);
MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0);
fl = rxq->ifr_fl;
- for (int i = 0; i < rxq->ifr_nfl; i++, fl++) {
- fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */
- err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
- 1, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- sctx->isc_rx_maxsize, /* maxsize */
- sctx->isc_rx_nsegments, /* nsegments */
- sctx->isc_rx_maxsegsize, /* maxsegsize */
- 0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockarg */
- &fl->ifl_desc_tag);
+ for (int i = 0; i < rxq->ifr_nfl; i++, fl++) {
+ fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't
+ * necessarily the same */
+ err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
+ 1, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ sctx->isc_rx_maxsize, /* maxsize */
+ sctx->isc_rx_nsegments, /* nsegments */
+ sctx->isc_rx_maxsegsize, /* maxsegsize */
+ 0, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockarg */
+ &fl->ifl_desc_tag);
if (err) {
device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
- __func__, err);
+ __func__, err);
goto fail;
}
if (!(fl->ifl_sds.ifsd_flags =
- (uint8_t *) malloc(sizeof(uint8_t) *
- scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
+ (uint8_t *)malloc(sizeof(uint8_t) *
+ scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
err = ENOMEM;
goto fail;
}
if (!(fl->ifl_sds.ifsd_m =
- (struct mbuf **) malloc(sizeof(struct mbuf *) *
- scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
+ (struct mbuf **)malloc(sizeof(struct mbuf *) *
+ scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
err = ENOMEM;
goto fail;
}
if (!(fl->ifl_sds.ifsd_cl =
- (caddr_t *) malloc(sizeof(caddr_t) *
- scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
+ (caddr_t *)malloc(sizeof(caddr_t) *
+ scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
err = ENOMEM;
goto fail;
}
-
/* Create the descriptor buffer dma maps */
#if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__)))
if ((ctx->ifc_flags & IFC_DMAR) == 0)
continue;
if (!(fl->ifl_sds.ifsd_map =
- (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
+ (bus_dmamap_t *)malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer map memory\n");
err = ENOMEM;
goto fail;
}
-
for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) {
err = bus_dmamap_create(fl->ifl_desc_tag, 0, &fl->ifl_sds.ifsd_map[i]);
if (err != 0) {
@@ -1850,9 +1871,9 @@ iflib_rxsd_alloc(iflib_rxq_t rxq)
*/
struct rxq_refill_cb_arg {
- int error;
+ int error;
bus_dma_segment_t seg;
- int nseg;
+ int nseg;
};
static void
@@ -1886,7 +1907,7 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
{
struct mbuf *m;
int idx, frag_idx = fl->ifl_fragidx;
- int pidx = fl->ifl_pidx;
+ int pidx = fl->ifl_pidx;
caddr_t cl, *sd_cl;
struct mbuf **sd_m;
uint8_t *sd_flags;
@@ -1904,7 +1925,7 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
idx = pidx;
credits = fl->ifl_credits;
- n = count;
+ n = count;
MPASS(n > 0);
MPASS(credits + n <= fl->ifl_size);
@@ -1926,11 +1947,11 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
*
* If the cluster is still set then we know a minimum sized packet was received
*/
- bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size, &frag_idx);
+ bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size, &frag_idx);
if ((frag_idx < 0) || (frag_idx >= fl->ifl_size))
- bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx);
+ bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx);
if ((cl = sd_cl[frag_idx]) == NULL) {
- if ((cl = sd_cl[frag_idx] = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL)
+ if ((cl = sd_cl[frag_idx] = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL)
break;
#if MEMORY_LOGGING
fl->ifl_cl_enqueued++;
@@ -1956,9 +1977,9 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
MPASS(sd_map != NULL);
MPASS(sd_map[frag_idx] != NULL);
err = bus_dmamap_load(fl->ifl_desc_tag, sd_map[frag_idx],
- cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, 0);
+ cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, 0);
bus_dmamap_sync(fl->ifl_desc_tag, sd_map[frag_idx],
- BUS_DMASYNC_PREREAD);
+ BUS_DMASYNC_PREREAD);
if (err != 0 || cb_arg.error) {
/*
@@ -1972,7 +1993,7 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
}
bus_addr = cb_arg.seg.ds_addr;
}
- bit_set(fl->ifl_rx_bitmap, frag_idx);
+ bit_set(fl->ifl_rx_bitmap, frag_idx);
sd_flags[frag_idx] |= RX_SW_DESC_INUSE;
MPASS(sd_m[frag_idx] == NULL);
@@ -1997,7 +2018,6 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
fl->ifl_pidx = idx;
fl->ifl_credits = credits;
}
-
}
done:
if (i) {
@@ -2015,7 +2035,7 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
if (sd_map)
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx);
fl->ifl_fragidx = frag_idx;
}
@@ -2050,6 +2070,7 @@ iflib_fl_bufs_free(iflib_fl_t fl)
if (*sd_flags & RX_SW_DESC_INUSE) {
if (fl->ifl_sds.ifsd_map != NULL) {
bus_dmamap_t sd_map = fl->ifl_sds.ifsd_map[i];
+
bus_dmamap_unload(fl->ifl_desc_tag, sd_map);
if (fl->ifl_rxq->ifr_ctx->ifc_in_detach)
bus_dmamap_destroy(fl->ifl_desc_tag, sd_map);
@@ -2128,7 +2149,8 @@ iflib_fl_setup(iflib_fl_t fl)
fl->ifl_zone = m_getzone(fl->ifl_buf_size);
- /* avoid pre-allocating zillions of clusters to an idle card
+ /*
+ * avoid pre-allocating zillions of clusters to an idle card
* potentially speeding up attach
*/
_iflib_fl_refill(ctx, fl, min(128, fl->ifl_size));
@@ -2198,7 +2220,7 @@ iflib_timer(void *arg)
IFDI_TIMER(ctx, txq->ift_id);
if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) &&
((txq->ift_cleaned_prev == txq->ift_cleaned) ||
- (sctx->isc_pause_frames == 0)))
+ (sctx->isc_pause_frames == 0)))
goto hung;
if (ifmp_ring_is_stalled(txq->ift_br))
@@ -2209,15 +2231,15 @@ iflib_timer(void *arg)
GROUPTASK_ENQUEUE(&txq->ift_task);
sctx->isc_pause_frames = 0;
- if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)
- callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu);
+ if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)
+ callout_reset_on(&txq->ift_timer, hz / 2, iflib_timer, txq, txq->ift_timer.c_cpu);
return;
- hung:
- device_printf(ctx->ifc_dev, "TX(%d) desc avail = %d, pidx = %d\n",
- txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx);
+hung:
+ device_printf(ctx->ifc_dev, "TX(%d) desc avail = %d, pidx = %d\n",
+ txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx);
STATE_LOCK(ctx);
if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
- ctx->ifc_flags |= (IFC_DO_WATCHDOG|IFC_DO_RESET);
+ ctx->ifc_flags |= (IFC_DO_WATCHDOG | IFC_DO_RESET);
iflib_admin_intr_deferred(ctx);
STATE_UNLOCK(ctx);
}
@@ -2244,7 +2266,7 @@ iflib_init_locked(if_ctx_t ctx)
if (if_getcapenable(ifp) & IFCAP_TXCSUM)
if_sethwassistbits(ifp, tx_ip_csum_flags, 0);
if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
- if_sethwassistbits(ifp, tx_ip6_csum_flags, 0);
+ if_sethwassistbits(ifp, tx_ip6_csum_flags, 0);
if (if_getcapenable(ifp) & IFCAP_TSO4)
if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
if (if_getcapenable(ifp) & IFCAP_TSO6)
@@ -2275,13 +2297,13 @@ iflib_init_locked(if_ctx_t ctx)
}
}
}
- done:
+done:
if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
IFDI_INTR_ENABLE(ctx);
txq = ctx->ifc_txqs;
for (i = 0; i < sctx->isc_ntxqsets; i++, txq++)
- callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq,
- txq->ift_timer.c_cpu);
+ callout_reset_on(&txq->ift_timer, hz / 2, iflib_timer, txq,
+ txq->ift_timer.c_cpu);
}
static int
@@ -2329,7 +2351,10 @@ iflib_stop(if_ctx_t ctx)
iflib_debug_reset();
/* Wait for current tx queue users to exit to disarm watchdog timer. */
for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) {
- /* make sure all transmitters have completed before proceeding XXX */
+ /*
+ * make sure all transmitters have completed before
+ * proceeding XXX
+ */
CALLOUT_LOCK(txq);
callout_stop(&txq->ift_timer);
@@ -2351,7 +2376,10 @@ iflib_stop(if_ctx_t ctx)
bzero((void *)di->idi_vaddr, di->idi_size);
}
for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) {
- /* make sure all transmitters have completed before proceeding XXX */
+ /*
+ * make sure all transmitters have completed before
+ * proceeding XXX
+ */
for (j = 0, di = rxq->ifr_ifdi; j < rxq->ifr_nfl; j++, di++)
bzero((void *)di->idi_vaddr, di->idi_size);
@@ -2374,8 +2402,8 @@ calc_next_rxd(iflib_fl_t fl, int cidx)
if (__predict_false(size == 0))
return (start);
- cur = start + size*cidx;
- end = start + size*nrxd;
+ cur = start + size * cidx;
+ end = start + size * nrxd;
next = CACHE_PTR_NEXT(cur);
return (next < end ? next : start);
}
@@ -2388,19 +2416,19 @@ prefetch_pkts(iflib_fl_t fl, int cidx)
caddr_t next_rxd;
- nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1);
+ nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd - 1);
prefetch(&fl->ifl_sds.ifsd_m[nextptr]);
prefetch(&fl->ifl_sds.ifsd_cl[nextptr]);
next_rxd = calc_next_rxd(fl, cidx);
prefetch(next_rxd);
- prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]);
- prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]);
- prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]);
- prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]);
- prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]);
- prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]);
- prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]);
- prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]);
+ prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd - 1)]);
+ prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd - 1)]);
+ prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd - 1)]);
+ prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd - 1)]);
+ prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd - 1)]);
+ prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd - 1)]);
+ prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd - 1)]);
+ prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd - 1)]);
}
static void
@@ -2427,33 +2455,36 @@ rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int unload, if_rxsd_t sd)
if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH)
prefetch_pkts(fl, cidx);
if (fl->ifl_sds.ifsd_map != NULL) {
- next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1);
+ next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size - 1);
prefetch(&fl->ifl_sds.ifsd_map[next]);
map = fl->ifl_sds.ifsd_map[cidx];
di = fl->ifl_ifdi;
- next = (cidx + CACHE_LINE_SIZE) & (fl->ifl_size-1);
+ next = (cidx + CACHE_LINE_SIZE) & (fl->ifl_size - 1);
prefetch(&fl->ifl_sds.ifsd_flags[next]);
bus_dmamap_sync(di->idi_tag, di->idi_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- /* not valid assert if bxe really does SGE from non-contiguous elements */
+ /*
+ * not valid assert if bxe really does SGE from
+ * non-contiguous elements
+ */
MPASS(fl->ifl_cidx == cidx);
if (unload)
bus_dmamap_unload(fl->ifl_desc_tag, map);
}
- fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1);
+ fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size - 1);
if (__predict_false(fl->ifl_cidx == 0))
fl->ifl_gen = 0;
if (map != NULL)
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- bit_clear(fl->ifl_rx_bitmap, cidx);
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ bit_clear(fl->ifl_rx_bitmap, cidx);
}
static struct mbuf *
assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd)
{
- int i, padlen , flags;
+ int i, padlen, flags;
struct mbuf *m, *mh, *mt;
caddr_t cl;
@@ -2467,7 +2498,10 @@ assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd)
/* Don't include zero-length frags */
if (ri->iri_frags[i].irf_len == 0) {
- /* XXX we can save the cluster here, but not the mbuf */
+ /*
+ * XXX we can save the cluster here, but not the
+ * mbuf
+ */
m_init(*sd->ifsd_m, M_NOWAIT, MT_DATA, 0);
m_free(*sd->ifsd_m);
*sd->ifsd_m = NULL;
@@ -2476,7 +2510,7 @@ assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd)
m = *sd->ifsd_m;
*sd->ifsd_m = NULL;
if (mh == NULL) {
- flags = M_PKTHDR|M_EXT;
+ flags = M_PKTHDR | M_EXT;
mh = mt = m;
padlen = ri->iri_pad;
} else {
@@ -2512,7 +2546,10 @@ iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
struct if_rxsd sd;
struct mbuf *m;
- /* should I merge this back in now that the two paths are basically duplicated? */
+ /*
+ * should I merge this back in now that the two paths are basically
+ * duplicated?
+ */
if (ri->iri_nfrags == 1 &&
ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) {
rxd_frag_to_sd(rxq, &ri->iri_frags[0], FALSE, &sd);
@@ -2525,7 +2562,7 @@ iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
#endif
memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len);
m->m_len = ri->iri_frags[0].irf_len;
- } else {
+ } else {
m = assemble_segments(rxq, ri, &sd);
}
m->m_pkthdr.len = ri->iri_len;
@@ -2541,7 +2578,7 @@ iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
#if defined(INET6) || defined(INET)
static void
-iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6)
+iflib_get_ip_forwarding(struct lro_ctrl *lc, bool * v4, bool * v6)
{
CURVNET_SET(lc->ifp->if_vnet);
#if defined(INET6)
@@ -2559,8 +2596,7 @@ iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6)
* would not return zero.
*/
static bool
-iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding)
-{
+iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding){
struct ether_header *eh;
uint16_t eh_type;
@@ -2568,12 +2604,12 @@ iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding)
eh_type = ntohs(eh->ether_type);
switch (eh_type) {
#if defined(INET6)
- case ETHERTYPE_IPV6:
- return !v6_forwarding;
+ case ETHERTYPE_IPV6:
+ return !v6_forwarding;
#endif
#if defined (INET)
- case ETHERTYPE_IP:
- return !v4_forwarding;
+ case ETHERTYPE_IP:
+ return !v4_forwarding;
#endif
}
@@ -2581,14 +2617,13 @@ iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding)
}
#else
static void
-iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused)
+iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool * v4 __unused, bool * v6 __unused)
{
}
#endif
static bool
-iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
-{
+iflib_rxeof(iflib_rxq_t rxq, qidx_t budget){
if_ctx_t ctx = rxq->ifr_ctx;
if_shared_ctx_t sctx = ctx->ifc_sctx;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
@@ -2611,7 +2646,7 @@ iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
ifp = ctx->ifc_ifp;
mh = mt = NULL;
MPASS(budget > 0);
- rx_pkts = rx_bytes = 0;
+ rx_pkts = rx_bytes = 0;
if (sctx->isc_flags & IFLIB_HAS_RXCQ)
cidxp = &rxq->ifr_cq_cidx;
else
@@ -2622,7 +2657,6 @@ iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
DBG_COUNTER_INC(rx_unavail);
return (false);
}
-
for (budget_left = budget; (budget_left > 0) && (avail > 0); budget_left--, avail--) {
if (__predict_false(!CTX_ACTIVE(ctx))) {
DBG_COUNTER_INC(rx_ctx_inactive);
@@ -2700,8 +2734,8 @@ iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
mt = mf = NULL;
}
}
- if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC|CSUM_L4_VALID)) ==
- (CSUM_L4_CALC|CSUM_L4_VALID)) {
+ if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC | CSUM_L4_VALID)) ==
+ (CSUM_L4_CALC | CSUM_L4_VALID)) {
if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0)
continue;
}
@@ -2712,7 +2746,6 @@ iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
DBG_COUNTER_INC(rx_if_input);
continue;
}
-
if (mf == NULL)
mf = m;
if (mt != NULL)
@@ -2723,7 +2756,6 @@ iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
ifp->if_input(ifp, mf);
DBG_COUNTER_INC(rx_if_input);
}
-
if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts);
@@ -2750,9 +2782,10 @@ txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use)
{
qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
qidx_t minthresh = txq->ift_size / 8;
- if (in_use > 4*minthresh)
+
+ if (in_use > 4 * minthresh)
return (notify_count);
- if (in_use > 2*minthresh)
+ if (in_use > 2 * minthresh)
return (notify_count >> 1);
if (in_use > minthresh)
return (notify_count >> 3);
@@ -2764,9 +2797,10 @@ txq_max_rs_deferred(iflib_txq_t txq)
{
qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
qidx_t minthresh = txq->ift_size / 8;
- if (txq->ift_in_use > 4*minthresh)
+
+ if (txq->ift_in_use > 4 * minthresh)
return (notify_count);
- if (txq->ift_in_use > 2*minthresh)
+ if (txq->ift_in_use > 2 * minthresh)
return (notify_count >> 1);
if (txq->ift_in_use > minthresh)
return (notify_count >> 2);
@@ -2792,8 +2826,7 @@ txq_max_rs_deferred(iflib_txq_t txq)
#define MAX_TX_DESC(ctx) ((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max)
static inline bool
-iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use)
-{
+iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use){
qidx_t dbval, max;
bool rang;
@@ -2813,11 +2846,11 @@ static void
print_pkt(if_pkt_info_t pi)
{
printf("pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n",
- pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx);
+ pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx);
printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n",
- pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag);
+ pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag);
printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n",
- pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto);
+ pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto);
}
#endif
@@ -2841,7 +2874,6 @@ iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
n = *mp = m;
}
}
-
/*
* Determine where frame payload starts.
* Jump over vlan headers if already present,
@@ -2864,115 +2896,115 @@ iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
switch (pi->ipi_etype) {
#ifdef INET
case ETHERTYPE_IP:
- {
- struct ip *ip = NULL;
- struct tcphdr *th = NULL;
- int minthlen;
+ {
+ struct ip *ip = NULL;
+ struct tcphdr *th = NULL;
+ int minthlen;
- minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th));
- if (__predict_false(m->m_len < minthlen)) {
- /*
- * if this code bloat is causing too much of a hit
- * move it to a separate function and mark it noinline
- */
- if (m->m_len == pi->ipi_ehdrlen) {
- n = m->m_next;
- MPASS(n);
- if (n->m_len >= sizeof(*ip)) {
- ip = (struct ip *)n->m_data;
- if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th))
- th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
+ minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th));
+ if (__predict_false(m->m_len < minthlen)) {
+ /*
+ * if this code bloat is causing too much of a hit
+ * move it to a separate function and mark it noinline
+ */
+ if (m->m_len == pi->ipi_ehdrlen) {
+ n = m->m_next;
+ MPASS(n);
+ if (n->m_len >= sizeof(*ip)) {
+ ip = (struct ip *)n->m_data;
+ if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th))
+ th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
+ } else {
+ txq->ift_pullups++;
+ if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
+ return (ENOMEM);
+ ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
+ }
} else {
txq->ift_pullups++;
if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
return (ENOMEM);
ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
+ if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
+ th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
}
} else {
- txq->ift_pullups++;
- if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
- return (ENOMEM);
ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
}
- } else {
- ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
- if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
- th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
- }
- pi->ipi_ip_hlen = ip->ip_hl << 2;
- pi->ipi_ipproto = ip->ip_p;
- pi->ipi_flags |= IPI_TX_IPV4;
+ pi->ipi_ip_hlen = ip->ip_hl << 2;
+ pi->ipi_ipproto = ip->ip_p;
+ pi->ipi_flags |= IPI_TX_IPV4;
- if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP))
- ip->ip_sum = 0;
+ if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP))
+ ip->ip_sum = 0;
- if (IS_TSO4(pi)) {
- if (pi->ipi_ipproto == IPPROTO_TCP) {
- if (__predict_false(th == NULL)) {
- txq->ift_pullups++;
- if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL))
- return (ENOMEM);
- th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen);
+ if (IS_TSO4(pi)) {
+ if (pi->ipi_ipproto == IPPROTO_TCP) {
+ if (__predict_false(th == NULL)) {
+ txq->ift_pullups++;
+ if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL))
+ return (ENOMEM);
+ th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen);
+ }
+ pi->ipi_tcp_hflags = th->th_flags;
+ pi->ipi_tcp_hlen = th->th_off << 2;
+ pi->ipi_tcp_seq = th->th_seq;
+ }
+ if (__predict_false(ip->ip_p != IPPROTO_TCP))
+ return (ENXIO);
+ th->th_sum = in_pseudo(ip->ip_src.s_addr,
+ ip->ip_dst.s_addr, htons(IPPROTO_TCP));
+ pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
+ if (sctx->isc_flags & IFLIB_TSO_INIT_IP) {
+ ip->ip_sum = 0;
+ ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz);
}
- pi->ipi_tcp_hflags = th->th_flags;
- pi->ipi_tcp_hlen = th->th_off << 2;
- pi->ipi_tcp_seq = th->th_seq;
- }
- if (__predict_false(ip->ip_p != IPPROTO_TCP))
- return (ENXIO);
- th->th_sum = in_pseudo(ip->ip_src.s_addr,
- ip->ip_dst.s_addr, htons(IPPROTO_TCP));
- pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
- if (sctx->isc_flags & IFLIB_TSO_INIT_IP) {
- ip->ip_sum = 0;
- ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz);
}
+ break;
}
- break;
- }
#endif
#ifdef INET6
case ETHERTYPE_IPV6:
- {
- struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen);
- struct tcphdr *th;
- pi->ipi_ip_hlen = sizeof(struct ip6_hdr);
-
- if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) {
- if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL))
- return (ENOMEM);
- }
- th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen);
+ {
+ struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen);
+ struct tcphdr *th;
- /* XXX-BZ this will go badly in case of ext hdrs. */
- pi->ipi_ipproto = ip6->ip6_nxt;
- pi->ipi_flags |= IPI_TX_IPV6;
+ pi->ipi_ip_hlen = sizeof(struct ip6_hdr);
- if (IS_TSO6(pi)) {
- if (pi->ipi_ipproto == IPPROTO_TCP) {
- if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) {
- if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL))
- return (ENOMEM);
+ if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) {
+ if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL))
+ return (ENOMEM);
+ }
+ th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen);
+
+ /* XXX-BZ this will go badly in case of ext hdrs. */
+ pi->ipi_ipproto = ip6->ip6_nxt;
+ pi->ipi_flags |= IPI_TX_IPV6;
+
+ if (IS_TSO6(pi)) {
+ if (pi->ipi_ipproto == IPPROTO_TCP) {
+ if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) {
+ if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL))
+ return (ENOMEM);
+ }
+ pi->ipi_tcp_hflags = th->th_flags;
+ pi->ipi_tcp_hlen = th->th_off << 2;
}
- pi->ipi_tcp_hflags = th->th_flags;
- pi->ipi_tcp_hlen = th->th_off << 2;
+ if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP))
+ return (ENXIO);
+ /*
+ * The corresponding flag is set by the stack in the IPv4
+ * TSO case, but not in IPv6 (at least in FreeBSD 10.2).
+ * So, set it here because the rest of the flow requires it.
+ */
+ pi->ipi_csum_flags |= CSUM_TCP_IPV6;
+ th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
+ pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
}
-
- if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP))
- return (ENXIO);
- /*
- * The corresponding flag is set by the stack in the IPv4
- * TSO case, but not in IPv6 (at least in FreeBSD 10.2).
- * So, set it here because the rest of the flow requires it.
- */
- pi->ipi_csum_flags |= CSUM_TCP_IPV6;
- th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
- pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
+ break;
}
- break;
- }
#endif
default:
pi->ipi_csum_flags &= ~CSUM_OFFLOAD;
@@ -2984,7 +3016,7 @@ iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
return (0);
}
-static __noinline struct mbuf *
+static __noinline struct mbuf *
collapse_pkthdr(struct mbuf *m0)
{
struct mbuf *m, *m_next, *tmp;
@@ -3032,7 +3064,7 @@ iflib_remove_mbuf(iflib_txq_t txq)
i = 1;
while (m) {
- ifsd_m[(pidx + i) & (ntxd -1)] = NULL;
+ ifsd_m[(pidx + i) & (ntxd - 1)] = NULL;
#if MEMORY_LOGGING
txq->ift_dequeued++;
#endif
@@ -3044,12 +3076,12 @@ iflib_remove_mbuf(iflib_txq_t txq)
static int
iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map,
- struct mbuf **m0, bus_dma_segment_t *segs, int *nsegs,
- int max_segs, int flags)
+ struct mbuf **m0, bus_dma_segment_t *segs, int *nsegs,
+ int max_segs, int flags)
{
if_ctx_t ctx;
- if_shared_ctx_t sctx;
- if_softc_ctx_t scctx;
+ if_shared_ctx_t sctx;
+ if_softc_ctx_t scctx;
int i, next, pidx, err, ntxd, count;
struct mbuf *m, *tmp, **ifsd_m;
@@ -3071,7 +3103,7 @@ iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map,
uint8_t *ifsd_flags = txq->ift_sds.ifsd_flags;
err = bus_dmamap_load_mbuf_sg(tag, map,
- *m0, segs, nsegs, BUS_DMA_NOWAIT);
+ *m0, segs, nsegs, BUS_DMA_NOWAIT);
if (err)
return (err);
ifsd_flags[pidx] |= TX_SW_DESC_MAPPED;
@@ -3096,7 +3128,7 @@ iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map,
m = *m0;
count = 0;
do {
- next = (pidx + count) & (ntxd-1);
+ next = (pidx + count) & (ntxd - 1);
MPASS(ifsd_m[next] == NULL);
ifsd_m[next] = m;
count++;
@@ -3129,7 +3161,7 @@ iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map,
* see if we can't be smarter about physically
* contiguous mappings
*/
- next = (pidx + count) & (ntxd-1);
+ next = (pidx + count) & (ntxd - 1);
MPASS(ifsd_m[next] == NULL);
#if MEMORY_LOGGING
txq->ift_enqueued++;
@@ -3173,8 +3205,8 @@ calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid)
if (__predict_false(size == 0))
return (start);
- cur = start + size*cidx;
- end = start + size*ntxd;
+ cur = start + size * cidx;
+ end = start + size * ntxd;
next = CACHE_PTR_NEXT(cur);
return (next < end ? next : start);
}
@@ -3190,7 +3222,7 @@ iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size)
* 18 is enough bytes to pad an ARP packet to 46 bytes, and
* and ARP message is the smallest common payload I can think of
*/
- static char pad[18]; /* just zeros */
+ static char pad[18]; /* just zeros */
int n;
struct mbuf *new_head;
@@ -3205,9 +3237,8 @@ iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size)
m_freem(*m_head);
*m_head = new_head;
}
-
for (n = min_frame_size - (*m_head)->m_pkthdr.len;
- n > 0; n -= sizeof(pad))
+ n > 0; n -= sizeof(pad))
if (!m_append(*m_head, min(n, sizeof(pad)), pad))
break;
@@ -3217,21 +3248,20 @@ iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size)
DBG_COUNTER_INC(encap_pad_mbuf_fail);
return (ENOBUFS);
}
-
return 0;
}
static int
iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
{
- if_ctx_t ctx;
- if_shared_ctx_t sctx;
- if_softc_ctx_t scctx;
- bus_dma_segment_t *segs;
- struct mbuf *m_head;
- void *next_txd;
- bus_dmamap_t map;
- struct if_pkt_info pi;
+ if_ctx_t ctx;
+ if_shared_ctx_t sctx;
+ if_softc_ctx_t scctx;
+ bus_dma_segment_t *segs;
+ struct mbuf *m_head;
+ void *next_txd;
+ bus_dmamap_t map;
+ struct if_pkt_info pi;
int remap = 0;
int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd;
bus_dma_tag_t desc_tag;
@@ -3251,17 +3281,16 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
cidx = txq->ift_cidx;
pidx = txq->ift_pidx;
if (ctx->ifc_flags & IFC_PREFETCH) {
- next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1);
+ next = (cidx + CACHE_PTR_INCREMENT) & (ntxd - 1);
if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) {
next_txd = calc_next_txd(txq, cidx, 0);
prefetch(next_txd);
}
-
/* prefetch the next cache line of mbuf pointers and flags */
prefetch(&txq->ift_sds.ifsd_m[next]);
if (txq->ift_sds.ifsd_map != NULL) {
prefetch(&txq->ift_sds.ifsd_map[next]);
- next = (cidx + CACHE_LINE_SIZE) & (ntxd-1);
+ next = (cidx + CACHE_LINE_SIZE) & (ntxd - 1);
prefetch(&txq->ift_sds.ifsd_flags[next]);
}
} else if (txq->ift_sds.ifsd_map != NULL)
@@ -3283,7 +3312,7 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
m_head = *m_headp;
pkt_info_zero(&pi);
- pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST));
+ pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG | M_BCAST | M_MCAST));
pi.ipi_pidx = pidx;
pi.ipi_qsidx = txq->ift_id;
pi.ipi_len = m_head->m_pkthdr.len;
@@ -3296,7 +3325,6 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
return (err);
m_head = *m_headp;
}
-
retry:
err = iflib_busdma_load_mbuf_sg(txq, desc_tag, map, m_headp, segs, &nsegs, max_segs, BUS_DMA_NOWAIT);
defrag:
@@ -3333,7 +3361,6 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
DBG_COUNTER_INC(encap_load_mbuf_fail);
return (err);
}
-
/*
* XXX assumes a 1 to 1 relationship between segments and
* descriptors - this does not hold true on all drivers, e.g.
@@ -3356,11 +3383,10 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
*/
txq->ift_rs_pending += nsegs + 1;
if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) ||
- iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) {
+ iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) {
pi.ipi_flags |= IPI_TX_INTR;
txq->ift_rs_pending = 0;
}
-
pi.ipi_segs = segs;
pi.ipi_nsegs = nsegs;
@@ -3373,7 +3399,7 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) {
if (map != NULL)
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
DBG_COUNTER_INC(tx_encap);
MPASS(pi.ipi_new_pidx < txq->ift_size);
@@ -3383,7 +3409,7 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
txq->ift_gen = 1;
}
/*
- * drivers can need as many as
+ * drivers can need as many as
* two sentinels
*/
MPASS(ndesc <= pi.ipi_nsegs + 2);
@@ -3433,7 +3459,7 @@ iflib_tx_desc_free(iflib_txq_t txq, int n)
cidx = txq->ift_cidx;
gen = txq->ift_gen;
qsize = txq->ift_size;
- mask = qsize-1;
+ mask = qsize - 1;
hasmap = txq->ift_sds.ifsd_map != NULL;
ifsd_flags = txq->ift_sds.ifsd_flags;
ifsd_m = txq->ift_sds.ifsd_m;
@@ -3457,10 +3483,15 @@ iflib_tx_desc_free(iflib_txq_t txq, int n)
ifsd_flags[cidx] &= ~TX_SW_DESC_MAPPED;
}
if ((m = ifsd_m[cidx]) != NULL) {
- /* XXX we don't support any drivers that batch packets yet */
+ /*
+ * XXX we don't support any drivers that
+ * batch packets yet
+ */
MPASS(m->m_nextpkt == NULL);
- /* if the number of clusters exceeds the number of segments
- * there won't be space on the ring to save a pointer to each
+ /*
+ * if the number of clusters exceeds the
+ * number of segments there won't be space
+ * on the ring to save a pointer to each
* cluster so we simply free the list here
*/
if (m->m_flags & M_TOOBIG) {
@@ -3491,7 +3522,7 @@ iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh)
if_ctx_t ctx = txq->ift_ctx;
KASSERT(thresh >= 0, ("invalid threshold to reclaim"));
- MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size);
+ MPASS(thresh /* + MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size);
/*
* Need a rate-limiting check so that this isn't called every time
@@ -3499,12 +3530,12 @@ iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh)
iflib_tx_credits_update(ctx, txq);
reclaim = DESC_RECLAIMABLE(txq);
- if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) {
+ if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */ ) {
#ifdef INVARIANTS
if (iflib_verbose_debug) {
printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__,
- txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments,
- reclaim, thresh);
+ txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments,
+ reclaim, thresh);
}
#endif
@@ -3524,17 +3555,17 @@ _ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining)
struct mbuf **items;
size = r->size;
- next = (cidx + CACHE_PTR_INCREMENT) & (size-1);
+ next = (cidx + CACHE_PTR_INCREMENT) & (size - 1);
items = __DEVOLATILE(struct mbuf **, &r->items[0]);
- prefetch(items[(cidx + offset) & (size-1)]);
+ prefetch(items[(cidx + offset) & (size - 1)]);
if (remaining > 1) {
prefetch2cachelines(&items[next]);
- prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]);
- prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]);
- prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]);
+ prefetch2cachelines(items[(cidx + offset + 1) & (size - 1)]);
+ prefetch2cachelines(items[(cidx + offset + 2) & (size - 1)]);
+ prefetch2cachelines(items[(cidx + offset + 3) & (size - 1)]);
}
- return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)]));
+ return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size - 1)]));
}
static void
@@ -3551,7 +3582,7 @@ iflib_txq_can_drain(struct ifmp_ring *r)
if_ctx_t ctx = txq->ift_ctx;
return ((TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2) ||
- ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false));
+ ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false));
}
static uint32_t
@@ -3566,7 +3597,7 @@ iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
bool do_prefetch, ring, rang;
if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) ||
- !LINK_ACTIVE(ctx))) {
+ !LINK_ACTIVE(ctx))) {
DBG_COUNTER_INC(txq_drain_notready);
return (0);
}
@@ -3576,12 +3607,11 @@ iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) {
DBG_COUNTER_INC(txq_drain_flushing);
for (i = 0; i < avail; i++) {
- m_free(r->items[(cidx + i) & (r->size-1)]);
- r->items[(cidx + i) & (r->size-1)] = NULL;
+ m_free(r->items[(cidx + i) & (r->size - 1)]);
+ r->items[(cidx + i) & (r->size - 1)] = NULL;
}
return (avail);
}
-
if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) {
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
CALLOUT_LOCK(txq);
@@ -3597,7 +3627,7 @@ iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
#ifdef INVARIANTS
if (iflib_verbose_debug)
printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__,
- avail, ctx->ifc_flags, TXQ_AVAIL(txq));
+ avail, ctx->ifc_flags, TXQ_AVAIL(txq));
#endif
do_prefetch = (ctx->ifc_flags & IFC_PREFETCH);
avail = TXQ_AVAIL(txq);
@@ -3641,7 +3671,7 @@ iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
}
/* deliberate use of bitwise or to avoid gratuitous short-circuit */
- ring = rang ? false : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx));
+ ring = rang ? false : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx));
iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use);
if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent);
@@ -3720,14 +3750,14 @@ _task_fn_tx(void *context)
if (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false))
netmap_tx_irq(ifp, txq->ift_id);
else {
-#ifdef DEV_NETMAP
+#ifdef DEV_NETMAP
if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) {
struct netmap_kring *kring = NA(ctx->ifc_ifp)->tx_rings[txq->ift_id];
if (kring->nr_hwtail != nm_prev(kring->rhead, kring->nkr_num_slots - 1))
GROUPTASK_ENQUEUE(&txq->ift_task);
}
-#endif
+#endif
}
IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
return;
@@ -3741,8 +3771,9 @@ _task_fn_tx(void *context)
#ifdef INVARIANTS
int rc =
#endif
- IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
- KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
+ IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
+
+ KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
}
}
@@ -3764,6 +3795,7 @@ _task_fn_rx(void *context)
#ifdef DEV_NETMAP
if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) {
u_int work = 0;
+
if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work)) {
more = false;
}
@@ -3771,7 +3803,7 @@ _task_fn_rx(void *context)
#endif
budget = ctx->ifc_sysctl_rx_budget;
if (budget == 0)
- budget = 16; /* XXX */
+ budget = 16; /* XXX */
if (more == false || (more = iflib_rxeof(rxq, budget)) == false) {
if (ctx->ifc_flags & IFC_LEGACY)
IFDI_INTR_ENABLE(ctx);
@@ -3779,7 +3811,8 @@ _task_fn_rx(void *context)
#ifdef INVARIANTS
int rc =
#endif
- IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
+ IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
+
KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
DBG_COUNTER_INC(rx_intr_enables);
}
@@ -3804,7 +3837,7 @@ _task_fn_admin(void *context)
oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE);
do_reset = (ctx->ifc_flags & IFC_DO_RESET);
do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG);
- ctx->ifc_flags &= ~(IFC_DO_RESET|IFC_DO_WATCHDOG);
+ ctx->ifc_flags &= ~(IFC_DO_RESET | IFC_DO_WATCHDOG);
STATE_UNLOCK(ctx);
if ((!running & !oactive) &&
@@ -3823,7 +3856,7 @@ _task_fn_admin(void *context)
}
IFDI_UPDATE_ADMIN_STATUS(ctx);
for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
- callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu);
+ callout_reset_on(&txq->ift_timer, hz / 2, iflib_timer, txq, txq->ift_timer.c_cpu);
IFDI_LINK_INTR_ENABLE(ctx);
if (do_reset)
iflib_if_init_locked(ctx);
@@ -3893,7 +3926,7 @@ iflib_if_init(void *arg)
static int
iflib_if_transmit(if_t ifp, struct mbuf *m)
{
- if_ctx_t ctx = if_getsoftc(ifp);
+ if_ctx_t ctx = if_getsoftc(ifp);
iflib_txq_t txq;
int err, qidx;
@@ -3903,7 +3936,6 @@ iflib_if_transmit(if_t ifp, struct mbuf *m)
m_freem(m);
return (ENOBUFS);
}
-
MPASS(m->m_nextpkt == NULL);
qidx = 0;
if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m))
@@ -3934,7 +3966,7 @@ iflib_if_transmit(if_t ifp, struct mbuf *m)
} while (next != NULL);
if (count > nitems(marr))
- if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) {
+ if ((mp = malloc(count * sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) {
/* XXX check nextpkt */
m_freem(m);
/* XXX simplify for now */
@@ -3959,7 +3991,6 @@ iflib_if_transmit(if_t ifp, struct mbuf *m)
ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
m_freem(m);
}
-
return (err);
}
@@ -3992,12 +4023,12 @@ static int
iflib_if_ioctl(if_t ifp, u_long command, caddr_t data)
{
if_ctx_t ctx = if_getsoftc(ifp);
- struct ifreq *ifr = (struct ifreq *)data;
+ struct ifreq *ifr = (struct ifreq *)data;
#if defined(INET) || defined(INET6)
- struct ifaddr *ifa = (struct ifaddr *)data;
+ struct ifaddr *ifa = (struct ifaddr *)data;
#endif
- bool avoid_reset = FALSE;
- int err = 0, reinit = 0, bits;
+ bool avoid_reset = FALSE;
+ int err = 0, reinit = 0, bits;
switch (command) {
case SIOCSIFADDR:
@@ -4014,8 +4045,8 @@ iflib_if_ioctl(if_t ifp, u_long command, caddr_t data)
** so we avoid doing it when possible.
*/
if (avoid_reset) {
- if_setflagbits(ifp, IFF_UP,0);
- if (!(if_getdrvflags(ifp)& IFF_DRV_RUNNING))
+ if_setflagbits(ifp, IFF_UP, 0);
+ if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
reinit = 1;
#ifdef INET
if (!(if_getflags(ifp) & IFF_NOARP))
@@ -4085,63 +4116,62 @@ iflib_if_ioctl(if_t ifp, u_long command, caddr_t data)
err = ifmedia_ioctl(ifp, ifr, &ctx->ifc_media, command);
break;
case SIOCGI2C:
- {
- struct ifi2creq i2c;
+ {
+ struct ifi2creq i2c;
- err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
- if (err != 0)
- break;
- if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
- err = EINVAL;
- break;
- }
- if (i2c.len > sizeof(i2c.data)) {
- err = EINVAL;
+ err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
+ if (err != 0)
+ break;
+ if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
+ err = EINVAL;
+ break;
+ }
+ if (i2c.len > sizeof(i2c.data)) {
+ err = EINVAL;
+ break;
+ }
+ if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0)
+ err = copyout(&i2c, ifr_data_get_ptr(ifr),
+ sizeof(i2c));
break;
}
-
- if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0)
- err = copyout(&i2c, ifr_data_get_ptr(ifr),
- sizeof(i2c));
- break;
- }
case SIOCSIFCAP:
- {
- int mask, setmask;
+ {
+ int mask, setmask;
- mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
- setmask = 0;
+ mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
+ setmask = 0;
#ifdef TCP_OFFLOAD
- setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6);
+ setmask |= mask & (IFCAP_TOE4 | IFCAP_TOE6);
#endif
- setmask |= (mask & IFCAP_FLAGS);
-
- if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
- setmask |= (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
- if ((mask & IFCAP_WOL) &&
- (if_getcapabilities(ifp) & IFCAP_WOL) != 0)
- setmask |= (mask & (IFCAP_WOL_MCAST|IFCAP_WOL_MAGIC));
- if_vlancap(ifp);
- /*
- * want to ensure that traffic has stopped before we change any of the flags
- */
- if (setmask) {
- CTX_LOCK(ctx);
- bits = if_getdrvflags(ifp);
- if (bits & IFF_DRV_RUNNING)
- iflib_stop(ctx);
- STATE_LOCK(ctx);
- if_togglecapenable(ifp, setmask);
- STATE_UNLOCK(ctx);
- if (bits & IFF_DRV_RUNNING)
- iflib_init_locked(ctx);
- STATE_LOCK(ctx);
- if_setdrvflags(ifp, bits);
- STATE_UNLOCK(ctx);
- CTX_UNLOCK(ctx);
+ setmask |= (mask & IFCAP_FLAGS);
+
+ if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
+ setmask |= (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
+ if ((mask & IFCAP_WOL) &&
+ (if_getcapabilities(ifp) & IFCAP_WOL) != 0)
+ setmask |= (mask & (IFCAP_WOL_MCAST | IFCAP_WOL_MAGIC));
+ if_vlancap(ifp);
+ /*
+ * want to ensure that traffic has stopped before we change any of the flags
+ */
+ if (setmask) {
+ CTX_LOCK(ctx);
+ bits = if_getdrvflags(ifp);
+ if (bits & IFF_DRV_RUNNING)
+ iflib_stop(ctx);
+ STATE_LOCK(ctx);
+ if_togglecapenable(ifp, setmask);
+ STATE_UNLOCK(ctx);
+ if (bits & IFF_DRV_RUNNING)
+ iflib_init_locked(ctx);
+ STATE_LOCK(ctx);
+ if_setdrvflags(ifp, bits);
+ STATE_UNLOCK(ctx);
+ CTX_UNLOCK(ctx);
+ }
+ break;
}
- break;
- }
case SIOCGPRIVATE_0:
case SIOCSDRVSPEC:
case SIOCGDRVSPEC:
@@ -4231,9 +4261,9 @@ iflib_device_probe(device_t dev)
{
pci_vendor_info_t *ent;
- uint16_t pci_vendor_id, pci_device_id;
- uint16_t pci_subvendor_id, pci_subdevice_id;
- uint16_t pci_rev_id;
+ uint16_t pci_vendor_id, pci_device_id;
+ uint16_t pci_subvendor_id, pci_subdevice_id;
+ uint16_t pci_rev_id;
if_shared_ctx_t sctx;
if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
@@ -4255,17 +4285,18 @@ iflib_device_probe(device_t dev)
}
if ((pci_device_id == ent->pvi_device_id) &&
((pci_subvendor_id == ent->pvi_subvendor_id) ||
- (ent->pvi_subvendor_id == 0)) &&
+ (ent->pvi_subvendor_id == 0)) &&
((pci_subdevice_id == ent->pvi_subdevice_id) ||
- (ent->pvi_subdevice_id == 0)) &&
+ (ent->pvi_subdevice_id == 0)) &&
((pci_rev_id == ent->pvi_rev_id) ||
- (ent->pvi_rev_id == 0))) {
+ (ent->pvi_rev_id == 0))) {
device_set_desc_copy(dev, ent->pvi_name);
- /* this needs to be changed to zero if the bus probing code
- * ever stops re-probing on best match because the sctx
- * may have its values over written by register calls
- * in subsequent probes
+ /*
+ * this needs to be changed to zero if the bus
+ * probing code ever stops re-probing on best match
+ * because the sctx may have its values over written
+ * by register calls in subsequent probes
*/
return (BUS_PROBE_DEFAULT);
}
@@ -4309,12 +4340,12 @@ iflib_reset_qvalues(if_ctx_t ctx)
for (i = 0; i < sctx->isc_nrxqs; i++) {
if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) {
device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n",
- i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]);
+ i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]);
scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i];
}
if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) {
device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n",
- i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]);
+ i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]);
scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i];
}
}
@@ -4322,12 +4353,12 @@ iflib_reset_qvalues(if_ctx_t ctx)
for (i = 0; i < sctx->isc_ntxqs; i++) {
if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) {
device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n",
- i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]);
+ i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]);
scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i];
}
if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) {
device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n",
- i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]);
+ i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]);
scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i];
}
}
@@ -4345,14 +4376,13 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
uint16_t main_rxq;
- ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO);
+ ctx = malloc(sizeof(*ctx), M_IFLIB, M_WAITOK | M_ZERO);
if (sc == NULL) {
- sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
+ sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK | M_ZERO);
device_set_softc(dev, ctx);
ctx->ifc_flags |= IFC_SC_ALLOCATED;
}
-
ctx->ifc_sctx = sctx;
ctx->ifc_dev = dev;
ctx->ifc_softc = sc;
@@ -4406,7 +4436,7 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
/* XXX change for per-queue sizes */
device_printf(dev, "using %d tx descriptors and %d rx descriptors\n",
- scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
+ scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
for (i = 0; i < sctx->isc_nrxqs; i++) {
if (!powerof2(scctx->isc_nrxd[i])) {
/* round down instead? */
@@ -4439,13 +4469,16 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
if (scctx->isc_tx_tso_size_max > FREEBSD_TSO_SIZE_MAX)
scctx->isc_tx_tso_size_max = FREEBSD_TSO_SIZE_MAX;
- /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
+ /*
+ * TSO parameters - dig these out of the data sheet - simply
+ * correspond to tag setup
+ */
ifp->if_hw_tsomaxsegcount = scctx->isc_tx_tso_segments_max;
ifp->if_hw_tsomax = scctx->isc_tx_tso_size_max;
ifp->if_hw_tsomaxsegsize = scctx->isc_tx_tso_segsize_max;
if (scctx->isc_rss_table_size == 0)
scctx->isc_rss_table_size = 64;
- scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
+ scctx->isc_rss_table_mask = scctx->isc_rss_table_size - 1;
GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
/* XXX format name */
@@ -4466,10 +4499,10 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
if (sctx->isc_flags & IFLIB_SKIP_MSIX) {
msix = scctx->isc_vectors;
} else if (scctx->isc_msix_bar != 0)
- /*
- * The simple fact that isc_msix_bar is not 0 does not mean we
- * we have a good value there that is known to work.
- */
+ /*
+ * The simple fact that isc_msix_bar is not 0 does not mean we
+ * we have a good value there that is known to work.
+ */
msix = iflib_msix_init(ctx);
else {
scctx->isc_vectors = 1;
@@ -4483,7 +4516,6 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
device_printf(dev, "Unable to allocate queue memory\n");
goto fail;
}
-
if ((err = iflib_qset_structures_setup(ctx)))
goto fail_queues;
@@ -4547,7 +4579,7 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
int
iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp,
- struct iflib_cloneattach_ctx *clctx)
+ struct iflib_cloneattach_ctx *clctx)
{
int err;
if_ctx_t ctx;
@@ -4558,10 +4590,10 @@ iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp,
uint16_t main_txq;
uint16_t main_rxq;
- ctx = malloc(sizeof(*ctx), M_IFLIB, M_WAITOK|M_ZERO);
- sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
+ ctx = malloc(sizeof(*ctx), M_IFLIB, M_WAITOK | M_ZERO);
+ sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK | M_ZERO);
ctx->ifc_flags |= IFC_SC_ALLOCATED;
- if (sctx->isc_flags & (IFLIB_PSEUDO|IFLIB_VIRTUAL))
+ if (sctx->isc_flags & (IFLIB_PSEUDO | IFLIB_VIRTUAL))
ctx->ifc_flags |= IFC_PSEUDO;
ctx->ifc_sctx = sctx;
@@ -4591,7 +4623,7 @@ iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp,
if (sctx->isc_flags & IFLIB_GEN_MAC)
iflib_gen_mac(ctx);
if ((err = IFDI_CLONEATTACH(ctx, clctx->cc_ifc, clctx->cc_name,
- clctx->cc_params)) != 0) {
+ clctx->cc_params)) != 0) {
device_printf(dev, "IFDI_CLONEATTACH failed %d\n", err);
return (err);
}
@@ -4636,7 +4668,7 @@ iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp,
/* XXX change for per-queue sizes */
device_printf(dev, "using %d tx descriptors and %d rx descriptors\n",
- scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
+ scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
for (i = 0; i < sctx->isc_nrxqs; i++) {
if (!powerof2(scctx->isc_nrxd[i])) {
/* round down instead? */
@@ -4669,13 +4701,16 @@ iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp,
if (scctx->isc_tx_tso_size_max > FREEBSD_TSO_SIZE_MAX)
scctx->isc_tx_tso_size_max = FREEBSD_TSO_SIZE_MAX;
- /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
+ /*
+ * TSO parameters - dig these out of the data sheet - simply
+ * correspond to tag setup
+ */
ifp->if_hw_tsomaxsegcount = scctx->isc_tx_tso_segments_max;
ifp->if_hw_tsomax = scctx->isc_tx_tso_size_max;
ifp->if_hw_tsomaxsegsize = scctx->isc_tx_tso_segsize_max;
if (scctx->isc_rss_table_size == 0)
scctx->isc_rss_table_size = 64;
- scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
+ scctx->isc_rss_table_mask = scctx->isc_rss_table_size - 1;
GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
/* XXX format name */
@@ -4689,7 +4724,6 @@ iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp,
device_printf(dev, "Unable to allocate queue memory\n");
goto fail;
}
-
if ((err = iflib_qset_structures_setup(ctx))) {
device_printf(dev, "qset structure setup failed %d\n", err);
goto fail_queues;
@@ -4739,7 +4773,7 @@ iflib_pseudo_deregister(if_ctx_t ctx)
EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event);
ether_ifdetach(ifp);
- /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
+ /* ether_ifdetach calls if_qflush - lock must be destroy afterwards */
CTX_LOCK_DESTROY(ctx);
/* XXX drain any dependent tasks */
tqg = qgroup_if_io_tqg;
@@ -4798,10 +4832,9 @@ iflib_device_deregister(if_ctx_t ctx)
/* Make sure VLANS are not using driver */
if (if_vlantrunkinuse(ifp)) {
- device_printf(dev,"Vlan in use, detach first\n");
+ device_printf(dev, "Vlan in use, detach first\n");
return (EBUSY);
}
-
CTX_LOCK(ctx);
ctx->ifc_in_detach = 1;
iflib_stop(ctx);
@@ -4830,7 +4863,7 @@ iflib_device_deregister(if_ctx_t ctx)
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
free(fl->ifl_rx_bitmap, M_IFLIB);
-
+
}
tqg = qgroup_if_config_tqg;
if (ctx->ifc_admin_task.gt_uniq != NULL)
@@ -4841,7 +4874,7 @@ iflib_device_deregister(if_ctx_t ctx)
IFDI_DETACH(ctx);
CTX_UNLOCK(ctx);
- /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
+ /* ether_ifdetach calls if_qflush - lock must be destroy afterwards */
CTX_LOCK_DESTROY(ctx);
device_set_softc(ctx->ifc_dev, NULL);
if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) {
@@ -4852,10 +4885,9 @@ iflib_device_deregister(if_ctx_t ctx)
}
if (ctx->ifc_msix_mem != NULL) {
bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY,
- ctx->ifc_softc_ctx.isc_msix_bar, ctx->ifc_msix_mem);
+ ctx->ifc_softc_ctx.isc_msix_bar, ctx->ifc_msix_mem);
ctx->ifc_msix_mem = NULL;
}
-
bus_generic_detach(dev);
if_free(ifp);
@@ -5043,12 +5075,11 @@ iflib_register(if_ctx_t ctx)
device_printf(dev, "can not allocate ifnet structure\n");
return (ENOMEM);
}
-
/*
* Initialize our context's device specific methods
*/
- kobj_init((kobj_t) ctx, (kobj_class_t) driver);
- kobj_class_compile((kobj_class_t) driver);
+ kobj_init((kobj_t)ctx, (kobj_class_t)driver);
+ kobj_class_compile((kobj_class_t)driver);
driver->refs++;
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
@@ -5061,14 +5092,14 @@ iflib_register(if_ctx_t ctx)
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
ctx->ifc_vlan_attach_event =
- EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx,
- EVENTHANDLER_PRI_FIRST);
+ EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx,
+ EVENTHANDLER_PRI_FIRST);
ctx->ifc_vlan_detach_event =
- EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx,
- EVENTHANDLER_PRI_FIRST);
+ EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx,
+ EVENTHANDLER_PRI_FIRST);
ifmedia_init(&ctx->ifc_media, IFM_IMASK,
- iflib_media_change, iflib_media_status);
+ iflib_media_change, iflib_media_status);
return (0);
}
@@ -5100,22 +5131,20 @@ iflib_queues_alloc(if_ctx_t ctx)
/* Allocate the TX ring struct memory */
if (!(ctx->ifc_txqs =
- (iflib_txq_t) malloc(sizeof(struct iflib_txq) *
+ (iflib_txq_t)malloc(sizeof(struct iflib_txq) *
ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate TX ring memory\n");
err = ENOMEM;
goto fail;
}
-
/* Now allocate the RX */
if (!(ctx->ifc_rxqs =
- (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) *
+ (iflib_rxq_t)malloc(sizeof(struct iflib_rxq) *
nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate RX ring memory\n");
err = ENOMEM;
goto rx_fail;
}
-
txq = ctx->ifc_txqs;
rxq = ctx->ifc_rxqs;
@@ -5125,7 +5154,7 @@ iflib_queues_alloc(if_ctx_t ctx)
for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) {
/* Set up some basics */
- if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) {
+ if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_WAITOK | M_ZERO)) == NULL) {
device_printf(dev, "failed to allocate iflib_dma_info\n");
err = ENOMEM;
goto err_tx_desc;
@@ -5155,7 +5184,6 @@ iflib_queues_alloc(if_ctx_t ctx)
err = ENOMEM;
goto err_tx_desc;
}
-
/* Initialize the TX lock */
snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:tx(%d):callout",
device_get_nameunit(dev), txq->ift_id);
@@ -5163,10 +5191,10 @@ iflib_queues_alloc(if_ctx_t ctx)
callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0);
snprintf(txq->ift_db_mtx_name, MTX_NAME_LEN, "%s:tx(%d):db",
- device_get_nameunit(dev), txq->ift_id);
+ device_get_nameunit(dev), txq->ift_id);
err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain,
- iflib_txq_can_drain, M_IFLIB, M_WAITOK);
+ iflib_txq_can_drain, M_IFLIB, M_WAITOK);
if (err) {
/* XXX free any allocated rings */
device_printf(dev, "Unable to allocate buf_ring\n");
@@ -5177,12 +5205,11 @@ iflib_queues_alloc(if_ctx_t ctx)
for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) {
/* Set up some basics */
- if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) {
+ if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, M_IFLIB, M_WAITOK | M_ZERO)) == NULL) {
device_printf(dev, "failed to allocate iflib_dma_info\n");
err = ENOMEM;
goto err_tx_desc;
}
-
rxq->ifr_ifdi = ifdip;
/* XXX this needs to be changed if #rx queues != #tx queues */
rxq->ifr_ntxqirq = 1;
@@ -5204,7 +5231,7 @@ iflib_queues_alloc(if_ctx_t ctx)
}
rxq->ifr_nfl = nfree_lists;
if (!(fl =
- (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) {
+ (iflib_fl_t)malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate free list memory\n");
err = ENOMEM;
goto err_tx_desc;
@@ -5216,27 +5243,26 @@ iflib_queues_alloc(if_ctx_t ctx)
fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset];
fl[j].ifl_rxd_size = scctx->isc_rxd_size[j];
}
- /* Allocate receive buffers for the ring*/
+ /* Allocate receive buffers for the ring */
if (iflib_rxsd_alloc(rxq)) {
device_printf(dev,
"Critical Failure setting up receive buffers\n");
err = ENOMEM;
goto err_rx_desc;
}
-
- for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
- fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB, M_WAITOK|M_ZERO);
+ for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
+ fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB, M_WAITOK | M_ZERO);
}
/* TXQs */
- vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
- paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
+ vaddrs = malloc(sizeof(caddr_t) * ntxqsets * ntxqs, M_IFLIB, M_WAITOK);
+ paddrs = malloc(sizeof(uint64_t) * ntxqsets * ntxqs, M_IFLIB, M_WAITOK);
for (i = 0; i < ntxqsets; i++) {
iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi;
for (j = 0; j < ntxqs; j++, di++) {
- vaddrs[i*ntxqs + j] = di->idi_vaddr;
- paddrs[i*ntxqs + j] = di->idi_paddr;
+ vaddrs[i * ntxqs + j] = di->idi_vaddr;
+ paddrs[i * ntxqs + j] = di->idi_paddr;
}
}
if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) {
@@ -5250,14 +5276,14 @@ iflib_queues_alloc(if_ctx_t ctx)
free(paddrs, M_IFLIB);
/* RXQs */
- vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
- paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
+ vaddrs = malloc(sizeof(caddr_t) * nrxqsets * nrxqs, M_IFLIB, M_WAITOK);
+ paddrs = malloc(sizeof(uint64_t) * nrxqsets * nrxqs, M_IFLIB, M_WAITOK);
for (i = 0; i < nrxqsets; i++) {
iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi;
for (j = 0; j < nrxqs; j++, di++) {
- vaddrs[i*nrxqs + j] = di->idi_vaddr;
- paddrs[i*nrxqs + j] = di->idi_paddr;
+ vaddrs[i * nrxqs + j] = di->idi_vaddr;
+ paddrs[i * nrxqs + j] = di->idi_paddr;
}
}
if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) {
@@ -5386,7 +5412,6 @@ iflib_qset_structures_setup(if_ctx_t ctx)
device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err);
return (err);
}
-
if ((err = iflib_rx_structures_setup(ctx)) != 0)
device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err);
@@ -5395,7 +5420,7 @@ iflib_qset_structures_setup(if_ctx_t ctx)
int
iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
- driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, char *name)
+ driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, char *name)
{
return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name));
@@ -5415,15 +5440,15 @@ find_nth(if_ctx_t ctx, int qid)
for (i = 0; i < eqid; i++) {
cpuid = CPU_FFS(&cpus);
MPASS(cpuid != 0);
- CPU_CLR(cpuid-1, &cpus);
+ CPU_CLR(cpuid - 1, &cpus);
}
cpuid = CPU_FFS(&cpus);
MPASS(cpuid != 0);
- return (cpuid-1);
+ return (cpuid - 1);
}
#ifdef SCHED_ULE
-extern struct cpu_group *cpu_top; /* CPU topology */
+extern struct cpu_group *cpu_top; /* CPU topology */
static int
find_child_with_core(int cpu, struct cpu_group *grp)
@@ -5506,8 +5531,14 @@ get_core_offset(if_ctx_t ctx, iflib_intr_type_t type, int qid)
{
switch (type) {
case IFLIB_INTR_TX:
- /* TX queues get cores which share at least an L2 cache with the corresponding RX queue */
- /* XXX handle multiple RX threads per core and more than two core per L2 group */
+ /*
+ * TX queues get cores which share at least an L2 cache with
+ * the corresponding RX queue
+ */
+ /*
+ * XXX handle multiple RX threads per core and more than two
+ * core per L2 group
+ */
return qid / CPU_COUNT(&ctx->ifc_cpus) + 1;
case IFLIB_INTR_RX:
case IFLIB_INTR_RXTX:
@@ -5549,8 +5580,8 @@ iflib_irq_set_affinity(if_ctx_t ctx, int irq, iflib_intr_type_t type, int qid,
int
iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
- iflib_intr_type_t type, driver_filter_t *filter,
- void *filter_arg, int qid, char *name)
+ iflib_intr_type_t type, driver_filter_t *filter,
+ void *filter_arg, int qid, char *name)
{
struct grouptask *gtask;
struct taskqgroup *tqg;
@@ -5564,7 +5595,7 @@ iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
tqrid = rid;
switch (type) {
- /* XXX merge tx/rx for netmap? */
+ /* XXX merge tx/rx for netmap? */
case IFLIB_INTR_TX:
q = &ctx->ifc_txqs[qid];
info = &ctx->ifc_txqs[qid].ift_filter_info;
@@ -5611,7 +5642,7 @@ iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
info->ifi_task = gtask;
info->ifi_ctx = q;
- err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info, name);
+ err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info, name);
if (err != 0) {
device_printf(ctx->ifc_dev, "_iflib_irq_alloc failed %d\n", err);
return (err);
@@ -5631,7 +5662,7 @@ iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
}
void
-iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, char *name)
+iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, char *name)
{
struct grouptask *gtask;
struct taskqgroup *tqg;
@@ -5671,8 +5702,7 @@ iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type,
err = iflib_irq_set_affinity(ctx, irq_num, type, qid, gtask, tqg, q, name);
if (err)
taskqgroup_attach(tqg, gtask, q, irq_num, name);
- }
- else {
+ } else {
taskqgroup_attach(tqg, gtask, q, irq_num, name);
}
}
@@ -5776,7 +5806,7 @@ iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name)
void
iflib_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn,
- const char *name)
+ const char *name)
{
GROUPTASK_INIT(gtask, 0, fn, ctx);
@@ -5787,7 +5817,7 @@ void
iflib_config_gtask_deinit(struct grouptask *gtask)
{
- taskqgroup_detach(qgroup_if_config_tqg, gtask);
+ taskqgroup_detach(qgroup_if_config_tqg, gtask);
}
void
@@ -5844,15 +5874,15 @@ iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget)
void
iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name,
- const char *description, if_int_delay_info_t info,
- int offset, int value)
+ const char *description, if_int_delay_info_t info,
+ int offset, int value)
{
info->iidi_ctx = ctx;
info->iidi_offset = offset;
info->iidi_value = value;
SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)),
- OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
+ OID_AUTO, name, CTLTYPE_INT | CTLFLAG_RW,
info, 0, iflib_sysctl_int_delay, "I", description);
}
@@ -5884,12 +5914,12 @@ iflib_msix_init(if_ctx_t ctx)
{
int i;
size_t len = sizeof(i);
+
err = kernel_sysctlbyname(curthread, "hw.pci.enable_msix", &i, &len, NULL, 0, NULL, 0);
if (err == 0) {
if (i == 0)
goto msi;
- }
- else {
+ } else {
device_printf(dev, "unable to read hw.pci.enable_msix.");
}
}
@@ -5908,7 +5938,7 @@ iflib_msix_init(if_ctx_t ctx)
{
int msix_ctrl, rid;
- pci_enable_busmaster(dev);
+ pci_enable_busmaster(dev);
rid = 0;
if (pci_find_cap(dev, PCIY_MSIX, &rid) == 0 && rid != 0) {
rid += PCIR_MSIX_CTRL;
@@ -5917,7 +5947,7 @@ iflib_msix_init(if_ctx_t ctx)
pci_write_config(dev, rid, msix_ctrl, 2);
} else {
device_printf(dev, "PCIY_MSIX capability not found; "
- "or rid %d == 0.\n", rid);
+ "or rid %d == 0.\n", rid);
goto msi;
}
}
@@ -5931,7 +5961,7 @@ iflib_msix_init(if_ctx_t ctx)
*/
if (bar != -1) {
ctx->ifc_msix_mem = bus_alloc_resource_any(dev,
- SYS_RES_MEMORY, &bar, RF_ACTIVE);
+ SYS_RES_MEMORY, &bar, RF_ACTIVE);
if (ctx->ifc_msix_mem == NULL) {
/* May not be enabled */
device_printf(dev, "Unable to map MSIX table \n");
@@ -5939,7 +5969,8 @@ iflib_msix_init(if_ctx_t ctx)
}
}
/* First try MSI/X */
- if ((msgs = pci_msix_count(dev)) == 0) { /* system has msix disabled */
+ if ((msgs = pci_msix_count(dev)) == 0) { /* system has msix
+ * disabled */
device_printf(dev, "System has MSIX disabled \n");
bus_release_resource(dev, SYS_RES_MEMORY,
bar, ctx->ifc_msix_mem);
@@ -5959,7 +5990,7 @@ iflib_msix_init(if_ctx_t ctx)
#endif
queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues);
device_printf(dev, "pxm cpus: %d queue msgs: %d admincnt: %d\n",
- CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt);
+ CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt);
#ifdef RSS
/* If we're doing RSS, clamp at the number of RSS buckets */
if (queues > rss_getnumbuckets())
@@ -5988,18 +6019,17 @@ iflib_msix_init(if_ctx_t ctx)
#ifdef INVARIANTS
if (tx_queues != rx_queues)
device_printf(dev, "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n",
- min(rx_queues, tx_queues), min(rx_queues, tx_queues));
+ min(rx_queues, tx_queues), min(rx_queues, tx_queues));
#endif
tx_queues = min(rx_queues, tx_queues);
rx_queues = min(rx_queues, tx_queues);
}
-
device_printf(dev, "using %d rx queues %d tx queues \n", rx_queues, tx_queues);
vectors = rx_queues + admincnt;
if ((err = pci_alloc_msix(dev, &vectors)) == 0) {
device_printf(dev,
- "Using MSIX interrupts with %d vectors\n", vectors);
+ "Using MSIX interrupts with %d vectors\n", vectors);
scctx->isc_vectors = vectors;
scctx->isc_nrxqsets = rx_queues;
scctx->isc_ntxqsets = tx_queues;
@@ -6015,17 +6045,17 @@ iflib_msix_init(if_ctx_t ctx)
scctx->isc_ntxqsets = 1;
scctx->isc_vectors = vectors;
if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) {
- device_printf(dev,"Using an MSI interrupt\n");
+ device_printf(dev, "Using an MSI interrupt\n");
scctx->isc_intr = IFLIB_INTR_MSI;
} else {
- device_printf(dev,"Using a Legacy interrupt\n");
+ device_printf(dev, "Using a Legacy interrupt\n");
scctx->isc_intr = IFLIB_INTR_LEGACY;
}
return (vectors);
}
-char * ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" };
+char *ring_states[] = {"IDLE", "BUSY", "STALLED", "ABDICATED"};
static int
mp_ring_state_handler(SYSCTL_HANDLER_ARGS)
@@ -6048,10 +6078,10 @@ mp_ring_state_handler(SYSCTL_HANDLER_ARGS)
ring_state = ring_states[state[3]];
sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s",
- state[0], state[1], state[2], ring_state);
+ state[0], state[1], state[2], ring_state);
rc = sbuf_finish(sb);
sbuf_delete(sb);
- return(rc);
+ return (rc);
}
enum iflib_ndesc_handler {
@@ -6072,7 +6102,7 @@ mp_ndesc_handler(SYSCTL_HANDLER_ARGS)
MPASS(type == IFLIB_NTXD_HANDLER || type == IFLIB_NRXD_HANDLER);
nqs = 8;
- switch(type) {
+ switch (type) {
case IFLIB_NTXD_HANDLER:
ndesc = ctx->ifc_sysctl_ntxds;
if (ctx->ifc_sctx)
@@ -6084,12 +6114,12 @@ mp_ndesc_handler(SYSCTL_HANDLER_ARGS)
nqs = ctx->ifc_sctx->isc_nrxqs;
break;
default:
- panic("unhandled type");
+ panic("unhandled type");
}
if (nqs == 0)
nqs = 8;
- for (i=0; i<8; i++) {
+ for (i = 0; i < 8; i++) {
if (i >= nqs)
break;
if (i)
@@ -6106,14 +6136,14 @@ mp_ndesc_handler(SYSCTL_HANDLER_ARGS)
ndesc[i] = strtoul(p, NULL, 10);
}
- return(rc);
+ return (rc);
}
#define NAME_BUFLEN 32
static void
iflib_add_device_sysctl_pre(if_ctx_t ctx)
{
- device_t dev = iflib_get_dev(ctx);
+ device_t dev = iflib_get_dev(ctx);
struct sysctl_oid_list *child, *oid_list;
struct sysctl_ctx_list *ctx_list;
struct sysctl_oid *node;
@@ -6121,38 +6151,38 @@ iflib_add_device_sysctl_pre(if_ctx_t ctx)
ctx_list = device_get_sysctl_ctx(dev);
child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib",
- CTLFLAG_RD, NULL, "IFLIB fields");
+ CTLFLAG_RD, NULL, "IFLIB fields");
oid_list = SYSCTL_CHILDREN(node);
SYSCTL_ADD_STRING(ctx_list, oid_list, OID_AUTO, "driver_version",
- CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 0,
- "driver version");
+ CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 0,
+ "driver version");
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs",
- CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0,
- "# of txqs to use, 0 => use default #");
+ CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0,
+ "# of txqs to use, 0 => use default #");
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs",
- CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0,
- "# of rxqs to use, 0 => use default #");
+ CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0,
+ "# of rxqs to use, 0 => use default #");
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable",
- CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0,
- "permit #txq != #rxq");
+ CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0,
+ "permit #txq != #rxq");
SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix",
- CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0,
- "disable MSIX (default 0)");
+ CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0,
+ "disable MSIX (default 0)");
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget",
- CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0,
- "set the rx budget");
+ CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0,
+ "set the rx budget");
/* XXX change for per-queue sizes */
SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds",
- CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER,
- mp_ndesc_handler, "A",
- "list of # of tx descriptors to use, 0 = use default #");
+ CTLTYPE_STRING | CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER,
+ mp_ndesc_handler, "A",
+ "list of # of tx descriptors to use, 0 = use default #");
SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds",
- CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER,
- mp_ndesc_handler, "A",
- "list of # of rx descriptors to use, 0 = use default #");
+ CTLTYPE_STRING | CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER,
+ mp_ndesc_handler, "A",
+ "list of # of rx descriptors to use, 0 = use default #");
}
static void
@@ -6160,7 +6190,7 @@ iflib_add_device_sysctl_post(if_ctx_t ctx)
{
if_shared_ctx_t sctx = ctx->ifc_sctx;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
- device_t dev = iflib_get_dev(ctx);
+ device_t dev = iflib_get_dev(ctx);
struct sysctl_oid_list *child;
struct sysctl_ctx_list *ctx_list;
iflib_fl_t fl;
@@ -6171,6 +6201,7 @@ iflib_add_device_sysctl_post(if_ctx_t ctx)
char *qfmt;
struct sysctl_oid *queue_node, *fl_node, *node;
struct sysctl_oid_list *queue_list, *fl_list;
+
ctx_list = device_get_sysctl_ctx(dev);
node = ctx->ifc_sysctl_node;
@@ -6185,76 +6216,76 @@ iflib_add_device_sysctl_post(if_ctx_t ctx)
for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) {
snprintf(namebuf, NAME_BUFLEN, qfmt, i);
queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
- CTLFLAG_RD, NULL, "Queue Name");
+ CTLFLAG_RD, NULL, "Queue Name");
queue_list = SYSCTL_CHILDREN(queue_node);
#if MEMORY_LOGGING
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued",
- CTLFLAG_RD,
- &txq->ift_dequeued, "total mbufs freed");
+ CTLFLAG_RD,
+ &txq->ift_dequeued, "total mbufs freed");
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued",
- CTLFLAG_RD,
- &txq->ift_enqueued, "total mbufs enqueued");
+ CTLFLAG_RD,
+ &txq->ift_enqueued, "total mbufs enqueued");
#endif
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag",
- CTLFLAG_RD,
- &txq->ift_mbuf_defrag, "# of times m_defrag was called");
+ CTLFLAG_RD,
+ &txq->ift_mbuf_defrag, "# of times m_defrag was called");
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups",
- CTLFLAG_RD,
- &txq->ift_pullups, "# of times m_pullup was called");
+ CTLFLAG_RD,
+ &txq->ift_pullups, "# of times m_pullup was called");
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed",
- CTLFLAG_RD,
- &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed");
+ CTLFLAG_RD,
+ &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed");
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail",
- CTLFLAG_RD,
- &txq->ift_no_desc_avail, "# of times no descriptors were available");
+ CTLFLAG_RD,
+ &txq->ift_no_desc_avail, "# of times no descriptors were available");
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed",
- CTLFLAG_RD,
- &txq->ift_map_failed, "# of times dma map failed");
+ CTLFLAG_RD,
+ &txq->ift_map_failed, "# of times dma map failed");
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig",
- CTLFLAG_RD,
- &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG");
+ CTLFLAG_RD,
+ &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG");
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup",
- CTLFLAG_RD,
- &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG");
+ CTLFLAG_RD,
+ &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG");
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx",
- CTLFLAG_RD,
- &txq->ift_pidx, 1, "Producer Index");
+ CTLFLAG_RD,
+ &txq->ift_pidx, 1, "Producer Index");
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx",
- CTLFLAG_RD,
- &txq->ift_cidx, 1, "Consumer Index");
+ CTLFLAG_RD,
+ &txq->ift_cidx, 1, "Consumer Index");
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed",
- CTLFLAG_RD,
- &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update");
+ CTLFLAG_RD,
+ &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update");
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use",
- CTLFLAG_RD,
- &txq->ift_in_use, 1, "descriptors in use");
+ CTLFLAG_RD,
+ &txq->ift_in_use, 1, "descriptors in use");
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed",
- CTLFLAG_RD,
- &txq->ift_processed, "descriptors procesed for clean");
+ CTLFLAG_RD,
+ &txq->ift_processed, "descriptors procesed for clean");
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned",
- CTLFLAG_RD,
- &txq->ift_cleaned, "total cleaned");
+ CTLFLAG_RD,
+ &txq->ift_cleaned, "total cleaned");
SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state",
- CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br->state),
- 0, mp_ring_state_handler, "A", "soft ring state");
+ CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br->state),
+ 0, mp_ring_state_handler, "A", "soft ring state");
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues",
- CTLFLAG_RD, &txq->ift_br->enqueues,
- "# of enqueues to the mp_ring for this queue");
+ CTLFLAG_RD, &txq->ift_br->enqueues,
+ "# of enqueues to the mp_ring for this queue");
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops",
- CTLFLAG_RD, &txq->ift_br->drops,
- "# of drops in the mp_ring for this queue");
+ CTLFLAG_RD, &txq->ift_br->drops,
+ "# of drops in the mp_ring for this queue");
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts",
- CTLFLAG_RD, &txq->ift_br->starts,
- "# of normal consumer starts in the mp_ring for this queue");
+ CTLFLAG_RD, &txq->ift_br->starts,
+ "# of normal consumer starts in the mp_ring for this queue");
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls",
- CTLFLAG_RD, &txq->ift_br->stalls,
- "# of consumer stalls in the mp_ring for this queue");
+ CTLFLAG_RD, &txq->ift_br->stalls,
+ "# of consumer stalls in the mp_ring for this queue");
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts",
- CTLFLAG_RD, &txq->ift_br->restarts,
- "# of consumer restarts in the mp_ring for this queue");
+ CTLFLAG_RD, &txq->ift_br->restarts,
+ "# of consumer restarts in the mp_ring for this queue");
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications",
- CTLFLAG_RD, &txq->ift_br->abdications,
- "# of consumer abdications in the mp_ring for this queue");
+ CTLFLAG_RD, &txq->ift_br->abdications,
+ "# of consumer abdications in the mp_ring for this queue");
}
if (scctx->isc_nrxqsets > 100)
@@ -6266,44 +6297,43 @@ iflib_add_device_sysctl_post(if_ctx_t ctx)
for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) {
snprintf(namebuf, NAME_BUFLEN, qfmt, i);
queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
- CTLFLAG_RD, NULL, "Queue Name");
+ CTLFLAG_RD, NULL, "Queue Name");
queue_list = SYSCTL_CHILDREN(queue_node);
if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_pidx",
- CTLFLAG_RD,
- &rxq->ifr_cq_pidx, 1, "Producer Index");
+ CTLFLAG_RD,
+ &rxq->ifr_cq_pidx, 1, "Producer Index");
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx",
- CTLFLAG_RD,
- &rxq->ifr_cq_cidx, 1, "Consumer Index");
+ CTLFLAG_RD,
+ &rxq->ifr_cq_cidx, 1, "Consumer Index");
}
-
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j);
fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf,
- CTLFLAG_RD, NULL, "freelist Name");
+ CTLFLAG_RD, NULL, "freelist Name");
fl_list = SYSCTL_CHILDREN(fl_node);
SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx",
- CTLFLAG_RD,
- &fl->ifl_pidx, 1, "Producer Index");
+ CTLFLAG_RD,
+ &fl->ifl_pidx, 1, "Producer Index");
SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx",
- CTLFLAG_RD,
- &fl->ifl_cidx, 1, "Consumer Index");
+ CTLFLAG_RD,
+ &fl->ifl_cidx, 1, "Consumer Index");
SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits",
- CTLFLAG_RD,
- &fl->ifl_credits, 1, "credits available");
+ CTLFLAG_RD,
+ &fl->ifl_credits, 1, "credits available");
#if MEMORY_LOGGING
SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued",
- CTLFLAG_RD,
- &fl->ifl_m_enqueued, "mbufs allocated");
+ CTLFLAG_RD,
+ &fl->ifl_m_enqueued, "mbufs allocated");
SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued",
- CTLFLAG_RD,
- &fl->ifl_m_dequeued, "mbufs freed");
+ CTLFLAG_RD,
+ &fl->ifl_m_dequeued, "mbufs freed");
SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued",
- CTLFLAG_RD,
- &fl->ifl_cl_enqueued, "clusters allocated");
+ CTLFLAG_RD,
+ &fl->ifl_cl_enqueued, "clusters allocated");
SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued",
- CTLFLAG_RD,
- &fl->ifl_cl_dequeued, "clusters freed");
+ CTLFLAG_RD,
+ &fl->ifl_cl_dequeued, "clusters freed");
#endif
}
@@ -6418,7 +6448,7 @@ iflib_netdump_poll(struct ifnet *ifp, int count)
(void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
for (i = 0; i < scctx->isc_nrxqsets; i++)
- (void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */);
+ (void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */ );
return (0);
}
-#endif /* NETDUMP */
+#endif /* NETDUMP */
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment