Skip to content

Instantly share code, notes, and snippets.

@raghavsethi
Created April 12, 2015 05:04
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save raghavsethi/416fb77d74ccf81bd93e to your computer and use it in GitHub Desktop.
Save raghavsethi/416fb77d74ccf81bd93e to your computer and use it in GitHub Desktop.
/*
* Copyright (c) 2015 Raghav Sethi, Princeton University. All rights reserved.
*/
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <inttypes.h>
#include <sys/types.h>
#include <sys/queue.h>
#include <netinet/in.h>
#include <setjmp.h>
#include <stdarg.h>
#include <ctype.h>
#include <errno.h>
#include <getopt.h>
#include <rte_common.h>
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
#include <rte_prefetch.h>
#include <rte_lcore.h>
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ring.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_errno.h>
#include <rte_udp.h>
#include <rte_ip.h>
#include "message_formats.pb-c.h"
#define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
#define MAX_PAYLOAD_SIZE 1500
#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
#define NB_MBUF 8192
#define MAX_PKT_BURST 32
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
/*
* Configurable number of RX/TX ring descriptors
*/
#define RTE_TEST_RX_DESC_DEFAULT 128
#define RTE_TEST_TX_DESC_DEFAULT 512
static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
static struct rte_eth_conf port_conf = {};
static struct ether_addr port_eth_addrs[RTE_MAX_ETHPORTS];
static uint32_t enabled_port_mask = 0;
static uint32_t enabled_ports[RTE_MAX_ETHPORTS];
static unsigned int n_enabled_ports;
static unsigned int n_rx_queues = 1;
static unsigned int n_lcores = 0;
struct mbuf_table {
unsigned len;
struct rte_mbuf *m_table[MAX_PKT_BURST];
};
#define MAX_RX_QUEUE_PER_LCORE 16
#define MAX_TX_QUEUE_PER_PORT 16
struct lcore_configuration {
uint8_t port; // Every lcore only talks to one port
unsigned n_rx_queue; // Every lcore can read from multiple rx queues
unsigned rx_queue_list[MAX_RX_QUEUE_PER_LCORE]; // List of RX queues this lcore can read from
struct mbuf_table tx_mbufs; // mbufs to hold TX queue for this lcore
} __rte_cache_aligned;
struct lcore_configuration lcore_conf[RTE_MAX_LCORE];
struct rte_mempool *pktmbuf_pool = NULL;
/* Per-port statistics struct */
struct queue_statistics_ {
uint64_t tx;
uint64_t rx;
uint64_t dropped;
} __rte_cache_aligned;
struct queue_statistics_ queue_statistics[RTE_MAX_LCORE*MAX_RX_QUEUE_PER_LCORE];
/* A tsc-based timer responsible for triggering statistics printout */
#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
#define MAX_TIMER_PERIOD 86400 /* 1 day max */
static int64_t timer_period = 1 * TIMER_MILLISECOND * 1000; /* default period is 10 seconds */
#define RESPONSE_SIZE 900
static char response_key[] = "TEST_RESPONSE_!@";
static char response_value[RESPONSE_SIZE];
/* Print out statistics on packets dropped */
static void
print_stats(void)
{
uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
total_packets_dropped = 0;
total_packets_tx = 0;
total_packets_rx = 0;
const char clr[] = { 27, '[', '2', 'J', '\0' };
const char topLeft[] = { 27, '[', '1', ';', '1', 'H','\0' };
/* Clear screen and move to top left */
printf("%s%s", clr, topLeft);
printf("\nTX Queue statistics ====================================");
for (unsigned queue_id = 0; queue_id < n_lcores; queue_id++) {
printf("\nStatistics for queue %u ------------------------------"
"\nPackets sent: %24" PRIu64
"\nPackets received: %20" PRIu64
"\nPackets dropped: %21" PRIu64,
queue_id,
queue_statistics[queue_id].tx,
queue_statistics[queue_id].rx,
queue_statistics[queue_id].dropped);
total_packets_dropped += queue_statistics[queue_id].dropped;
total_packets_tx += queue_statistics[queue_id].tx;
total_packets_rx += queue_statistics[queue_id].rx;
}
printf("\nAggregate statistics ==============================="
"\nTotal packets sent: %18" PRIu64
"\nTotal packets received: %14" PRIu64
"\nTotal packets dropped: %15" PRIu64,
total_packets_tx,
total_packets_rx,
total_packets_dropped);
printf("\n====================================================\n");
}
/* Send the burst of packets on an output interface */
static int
l2fwd_send_burst(struct lcore_configuration *lconf, unsigned n, unsigned lcore_id)
{
struct rte_mbuf **m_table;
unsigned ret;
m_table = (struct rte_mbuf **)lconf->tx_mbufs.m_table;
// lcore_id is equal to the TX queue number
ret = rte_eth_tx_burst(lconf->port, (uint16_t) lcore_id, m_table, (uint16_t) n);
queue_statistics[lcore_id].tx += ret;
if (unlikely(ret < n)) {
queue_statistics[lcore_id].dropped += (n - ret);
do {
rte_pktmbuf_free(m_table[ret]);
} while (++ret < n);
}
return 0;
}
/* Enqueue packets for TX and prepare them to be sent */
static int
l2fwd_send_packet(struct rte_mbuf *m)
{
unsigned lcore_id, len;
struct lcore_configuration *lconf;
lcore_id = rte_lcore_id();
lconf = &lcore_conf[lcore_id];
len = lconf->tx_mbufs.len;
lconf->tx_mbufs.m_table[len] = m;
len++;
/* enough pkts to be sent */
if (unlikely(len == MAX_PKT_BURST)) {
l2fwd_send_burst(lconf, MAX_PKT_BURST, lcore_id);
//l2fwd_send_burst(lconf, MAX_PKT_BURST, 0);
len = 0;
}
lconf->tx_mbufs.len = len;
return 0;
}
static void
l2fwd_simple_forward(struct rte_mbuf *m)
{
struct ether_hdr* eth;
eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
static uint8_t local_buffer_resp[MAX_PAYLOAD_SIZE];
SystemMessage *incoming_message;
uint8_t* packet_payload = (uint8_t*)eth + sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + sizeof(struct udp_hdr);
uint16_t pkt_len = rte_pktmbuf_pkt_len(m);
uint16_t payload_len = pkt_len - (sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + sizeof(struct udp_hdr));
/*
struct udp_hdr* udp = (uint8_t*)eth + sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr);
RTE_LOG(INFO, L2FWD, "packet dport: %u\n", ntohs(udp->dst_port));
*/
incoming_message = system_message__unpack(NULL, payload_len, packet_payload);
if (incoming_message == NULL) {
RTE_LOG(INFO, L2FWD, "couldn't unpack packet\n");
} else {
if (incoming_message->type == SYSTEM_MESSAGE__TYPE__GET) {
incoming_message->type = SYSTEM_MESSAGE__TYPE__RES;
incoming_message->dst_mac.len = 6;
incoming_message->dst_mac.data = (char*)(&eth->d_addr);
ResponseMessage get_response = RESPONSE_MESSAGE__INIT;
get_response.type = RESPONSE_MESSAGE__TYPE__GET;
get_response.key = response_key;
get_response.value = response_value;
get_response.src_mac.len = 6;
get_response.src_mac.data = (char*)(&eth->d_addr);
response_message__pack(&get_response, local_buffer_resp);
incoming_message->message.len = response_message__get_packed_size(&get_response);
incoming_message->message.data = local_buffer_resp;
uint16_t extra_len = system_message__get_packed_size(incoming_message) - payload_len;
if (rte_pktmbuf_append(m, extra_len) == NULL) {
RTE_LOG(INFO, L2FWD, "Couldn't extend mbuf by %u bytes\n", extra_len);
}
system_message__pack(incoming_message, packet_payload);
//RTE_LOG(INFO, L2FWD, "got GET request\n");
} else {
RTE_LOG(INFO, L2FWD, "got another kind of request\n");
}
}
// Flip src and dest addresses
struct ether_addr tmp_addr;
ether_addr_copy(&eth->d_addr, &tmp_addr);
ether_addr_copy(&eth->s_addr, &eth->d_addr);
ether_addr_copy(&tmp_addr, &eth->s_addr);
/*
char src_mac_str[19], dst_mac_str[19];
ether_format_addr(src_mac_str, 18, &eth->s_addr);
ether_format_addr(dst_mac_str, 18, &eth->d_addr);
src_mac_str[18] = '\0';
dst_mac_str[18] = '\0';
RTE_LOG(INFO, L2FWD, "packet now has src mac=%s and dst mac=%s\n", src_mac_str, dst_mac_str);
*/
l2fwd_send_packet(m);
}
/* main processing loop */
static void
l2fwd_main_loop(void)
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
struct rte_mbuf *m;
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc, timer_tsc;
unsigned nb_rx;
struct lcore_configuration *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
prev_tsc = 0;
timer_tsc = 0;
lcore_id = rte_lcore_id();
qconf = &lcore_conf[lcore_id];
if (qconf->n_rx_queue == 0) {
RTE_LOG(INFO, L2FWD, "lcore %u has no queues to read from\n", lcore_id);
return;
}
RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id);
while (1) {
cur_tsc = rte_rdtsc();
/*
* TX burst queue drain
*/
diff_tsc = cur_tsc - prev_tsc;
if (unlikely(diff_tsc > drain_tsc)) {
l2fwd_send_burst(qconf, qconf->tx_mbufs.len, (uint8_t) 0);
qconf->tx_mbufs.len = 0;
/* if timer is enabled */
if (timer_period > 0) {
/* advance the timer */
timer_tsc += diff_tsc;
/* if timer has reached its timeout */
if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
/* do this only on master core */
if (lcore_id == rte_get_master_lcore()) {
print_stats();
/* reset the timer */
timer_tsc = 0;
}
}
}
prev_tsc = cur_tsc;
}
// Read packet from RX queues
for (uint16_t queue_id = 0; queue_id < qconf->n_rx_queue; queue_id++) {
nb_rx = rte_eth_rx_burst(qconf->port, qconf->rx_queue_list[queue_id], pkts_burst, MAX_PKT_BURST);
queue_statistics[lcore_id].rx += nb_rx;
for (uint16_t j = 0; j < nb_rx; j++) {
m = pkts_burst[j];
rte_prefetch0(rte_pktmbuf_mtod(m, void *));
l2fwd_simple_forward(m);
}
}
}
}
static int
l2fwd_launch_one_lcore(__attribute__((unused)) void *dummy)
{
l2fwd_main_loop();
return 0;
}
/* display usage */
static void
l2fwd_usage(const char *prgname)
{
printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
" -p PORTMASK: hexadecimal bitmask of ports to configure\n"
" -q NQ: number of queue (=ports) per lcore (default is 1)\n"
" -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n",
prgname);
}
static int
l2fwd_parse_portmask(const char *portmask)
{
char *end = NULL;
unsigned long pm;
/* parse hexadecimal string */
pm = strtoul(portmask, &end, 16);
if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
return -1;
if (pm == 0)
return -1;
return pm;
}
static unsigned int
l2fwd_parse_nqueue(const char *q_arg)
{
char *end = NULL;
unsigned long n;
/* parse hexadecimal string */
n = strtoul(q_arg, &end, 10);
if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
return 0;
if (n == 0)
return 0;
if (n >= MAX_RX_QUEUE_PER_LCORE * RTE_MAX_LCORE)
return 0;
return n;
}
static int
l2fwd_parse_timer_period(const char *q_arg)
{
char *end = NULL;
int n;
/* parse number string */
n = strtol(q_arg, &end, 10);
if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
return -1;
if (n >= MAX_TIMER_PERIOD)
return -1;
return n;
}
/* Parse the argument given in the command line of the application */
static int
l2fwd_parse_args(int argc, char **argv)
{
int opt, ret;
char **argvopt;
int option_index;
char *prgname = argv[0];
static struct option lgopts[] = {
{NULL, 0, 0, 0}
};
argvopt = argv;
while ((opt = getopt_long(argc, argvopt, "p:q:T:",
lgopts, &option_index)) != EOF) {
switch (opt) {
/* portmask */
case 'p':
enabled_port_mask = l2fwd_parse_portmask(optarg);
if (enabled_port_mask == 0) {
printf("invalid portmask\n");
l2fwd_usage(prgname);
return -1;
}
break;
/* nqueue */
case 'q':
n_rx_queues = l2fwd_parse_nqueue(optarg);
if (n_rx_queues == 0) {
printf("invalid queue number\n");
l2fwd_usage(prgname);
return -1;
}
break;
/* timer period */
case 'T':
timer_period = l2fwd_parse_timer_period(optarg) * 1000 * TIMER_MILLISECOND;
if (timer_period < 0) {
printf("invalid timer period\n");
l2fwd_usage(prgname);
return -1;
}
break;
/* long options */
case 0:
l2fwd_usage(prgname);
return -1;
default:
l2fwd_usage(prgname);
return -1;
}
}
if (optind >= 0)
argv[optind-1] = prgname;
ret = optind-1;
optind = 0; /* reset getopt lib */
return ret;
}
/* Check the link status of all ports in up to 9s, and print them finally */
static void
check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
uint8_t portid, count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
fflush(stdout);
for (count = 0; count <= MAX_CHECK_TIME; count++) {
all_ports_up = 1;
for (portid = 0; portid < port_num; portid++) {
if ((port_mask & (1 << portid)) == 0)
continue;
memset(&link, 0, sizeof(link));
rte_eth_link_get_nowait(portid, &link);
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
printf("Port %d Link Up - speed %u "
"Mbps - %s\n", (uint8_t)portid,
(unsigned)link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
printf("Port %d Link Down\n",
(uint8_t)portid);
continue;
}
/* clear all_ports_up flag if any link down */
if (link.link_status == 0) {
all_ports_up = 0;
break;
}
}
/* after finally printing all link status, get out */
if (print_flag == 1)
break;
if (all_ports_up == 0) {
printf(".");
fflush(stdout);
rte_delay_ms(CHECK_INTERVAL);
}
/* set the print_flag if all ports up or timeout */
if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
print_flag = 1;
printf("done\n");
}
}
}
int
main(int argc, char **argv)
{
struct lcore_configuration *qconf;
int ret;
unsigned rx_lcore_id;
unsigned num_queues;
/* initialize fake response value */
int current_pos = 0;
while (current_pos + 6 < RESPONSE_SIZE) {
current_pos += sprintf((response_value+current_pos), "%d", rand());
}
response_value[RESPONSE_SIZE-1] = '\0';
/* init EAL */
ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
argc -= ret;
argv += ret;
printf("RTE EAL initialized\n");
fflush(stdout);
/* parse application arguments (after the EAL ones) */
ret = l2fwd_parse_args(argc, argv);
if (ret < 0) {
rte_exit(EXIT_FAILURE, "Invalid command-line arguments\n");
}
printf("Creating mbuf pool...\n");
fflush(stdout);
/* create the mbuf pool */
pktmbuf_pool =
rte_mempool_create("mbuf_pool", NB_MBUF,
MBUF_SIZE, 32,
sizeof(struct rte_pktmbuf_pool_private),
rte_pktmbuf_pool_init, NULL,
rte_pktmbuf_init, NULL,
rte_socket_id(), 0);
if (pktmbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
printf("Created mbuf pool successfully\n");
fflush(stdout);
unsigned n_total_ports = 0;
n_total_ports = rte_eth_dev_count();
if (n_total_ports == 0) {
rte_exit(EXIT_FAILURE, "Could not detect ethernet ports\n");
}
printf("Detected %d total ports\n", n_total_ports);
fflush(stdout);
if (n_total_ports > RTE_MAX_ETHPORTS) {
n_total_ports = RTE_MAX_ETHPORTS;
}
/* Get info for each enabled port */
struct rte_eth_dev_info dev_info;
unsigned n_enabled_ports = 0;
for (uint8_t portid = 0; portid < n_total_ports; portid++) {
// skip ports that are not enabled
if ((enabled_port_mask & (1 << portid)) == 0)
continue;
enabled_ports[n_enabled_ports++] = portid;
rte_eth_dev_info_get(portid, &dev_info);
printf("Port %d is available for use\n", portid);
fflush(stdout);
}
printf("%d ports configured\n", n_enabled_ports);
fflush(stdout);
/* Find number of active lcores */
n_lcores = 0;
while (rte_lcore_is_enabled(n_lcores)) {
n_lcores++;
}
if (n_rx_queues % n_lcores != 0) {
rte_exit(EXIT_FAILURE, "Number of queues(%d) should be multiple of number of lcores(%d)\n", n_rx_queues, n_lcores);
}
if (n_rx_queues % n_enabled_ports != 0) {
rte_exit(
EXIT_FAILURE, "Number of queues(%d) should be multiple of number of ports(%d)\n", n_rx_queues, n_enabled_ports
);
}
uint16_t rx_queues_per_lcore = n_rx_queues / n_lcores;
uint16_t rx_queues_per_port = n_rx_queues / n_enabled_ports;
uint16_t tx_queues_per_port = n_lcores;
if (rx_queues_per_port < rx_queues_per_lcore) {
rte_exit(EXIT_FAILURE, "rx_queues_per_port (%u) must be >= rx_queues_per_lcore (%u)\n", rx_queues_per_port,
rx_queues_per_lcore);
}
unsigned int portid_offset = 0;
unsigned int queue_id = 0;
unsigned int lcore_id = 0;
/* Assign each lcore some RX queues and a port */
for (int i = 0; i < n_lcores; i++) {
lcore_conf[i].n_rx_queue = 0;
printf("lcore %d is assigned rx queues: ", i);
for (int j = 0; j < rx_queues_per_lcore; j++) {
printf("%d ", queue_id);
lcore_conf[i].rx_queue_list[lcore_conf[i].n_rx_queue++] = queue_id++;
}
lcore_conf[i].port = enabled_ports[portid_offset];
printf(" (linked to port %d)\n", enabled_ports[portid_offset]);
if (queue_id % rx_queues_per_port) {
portid_offset++;
}
}
printf("Setting up %d RX queues per port and %d TX queues per port\n", rx_queues_per_port, tx_queues_per_port);
/* Initialise each port */
for (portid_offset = 0; portid_offset < n_enabled_ports; portid_offset++) {
uint8_t portid = enabled_ports[portid_offset];
/* Initialize port */
printf("Initializing port %u...\n", (unsigned) portid);
fflush(stdout);
port_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
ret = rte_eth_dev_configure(portid, rx_queues_per_port, tx_queues_per_port, &port_conf);
if (ret < 0) {
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%s, port=%u\n", rte_strerror(ret), (unsigned) portid);
}
rte_eth_macaddr_get(portid, &port_eth_addrs[portid]);
fflush(stdout);
/* Initialize RX queues */
for (uint16_t i = 0; i < rx_queues_per_port; i++) {
ret = rte_eth_rx_queue_setup(portid, i, nb_rxd,
rte_eth_dev_socket_id(portid),
NULL,
pktmbuf_pool); // Apparently, they all share this pool
if (ret < 0) {
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%s, code=%d, port=%u\n",
rte_strerror(ret), ret, (unsigned) portid);
}
}
fflush(stdout);
/* Initialize TX queues */
for (uint16_t i = 0; i < tx_queues_per_port; i++) {
ret = rte_eth_tx_queue_setup(portid, i, nb_txd,
rte_eth_dev_socket_id(portid),
NULL);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
ret, (unsigned) portid);
}
/* Start device */
ret = rte_eth_dev_start(portid);
if (ret < 0) {
rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n", ret, (unsigned) portid);
}
printf("Initialized queues and started device\n");
rte_eth_promiscuous_enable(portid);
printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
(unsigned) portid,
port_eth_addrs[portid].addr_bytes[0],
port_eth_addrs[portid].addr_bytes[1],
port_eth_addrs[portid].addr_bytes[2],
port_eth_addrs[portid].addr_bytes[3],
port_eth_addrs[portid].addr_bytes[4],
port_eth_addrs[portid].addr_bytes[5]);
/* initialize port stats */
memset(&queue_statistics, 0, sizeof(queue_statistics));
}
if (!n_enabled_ports) {
rte_exit(EXIT_FAILURE, "All available ports are disabled. Please set portmask.\n");
}
check_all_ports_link_status(n_total_ports, enabled_port_mask);
printf("Starting per-lcore processes...\n");
fflush(stdout);
/* launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, NULL, CALL_MASTER);
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
return 0;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment