@@ -2,6 +2,7 @@
* Copyright(c) 2018 Cavium, Inc
*/
#include <getopt.h>
+#include <stdbool.h>
#include <rte_ethdev.h>
#include <rte_eventdev.h>
@@ -13,6 +14,9 @@
#define CMD_LINE_OPT_TRANSFER_MODE "transfer-mode"
+static volatile bool rx_core_running;
+static volatile bool eventmode_worker_stop;
+
static const char short_options[] =
""
;
@@ -110,6 +114,16 @@ internal_get_next_active_core(struct eventmode_conf *em_conf,
return next_core;
}
+static inline bool
+internal_dev_has_burst_mode(uint8_t dev_id)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(dev_id, &dev_info);
+ return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) ?
+ true : false;
+}
+
/* Global functions */
void
@@ -992,3 +1006,288 @@ rte_eventmode_helper_get_tx_queue(struct rte_eventmode_helper_conf *mode_conf,
return eventdev_config->nb_eventqueue;
}
+/* Helper functions for launching workers */
+
+static int32_t
+rte_eventmode_helper_start_worker_rx_core(struct eventmode_conf *em_conf,
+ uint32_t lcore_id)
+{
+ uint32_t service_id[EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE];
+ struct rx_adapter_conf *adapter;
+ int service_count = 0;
+ int adapter_id;
+ int32_t ret;
+ int i;
+
+ RTE_EM_HLPR_LOG_INFO(
+ "Entering rx_core processing on lcore %u", lcore_id);
+
+ /*
+ * Need to parse adapter conf to see which all adapters need to be
+ * handled this core.
+ */
+ for (i = 0; i < em_conf->nb_rx_adapter; i++) {
+ /* Check if we have exceeded the max allowed */
+ if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE) {
+ RTE_EM_HLPR_LOG_ERR(
+ "Exceeded the max allowed adapters per rx core");
+ break;
+ }
+
+ adapter = &(em_conf->adapter[i]);
+ if (adapter->rx_core_id != lcore_id)
+ continue;
+
+ /* Adapter need to be handled by this core */
+ adapter_id = adapter->adapter_id;
+
+ /* Get the service ID for the adapters */
+ ret = rte_event_eth_rx_adapter_service_id_get(adapter_id,
+ &(service_id[service_count]));
+
+ if (ret != -ESRCH && ret != 0) {
+ RTE_EM_HLPR_LOG_ERR(
+ "Error getting service ID used by adapter");
+ return ret;
+ }
+
+ /* Update service count */
+ service_count++;
+ }
+
+ rx_core_running = true;
+
+ while (rx_core_running) {
+ for (i = 0; i < service_count; i++) {
+ /* Initiate rx_adapter service */
+ rte_service_run_iter_on_app_lcore(service_id[i], 0);
+ }
+ }
+
+ return 0;
+}
+
+static int32_t
+rte_eventmode_helper_stop_worker_rx_core(void)
+{
+ if (rx_core_running) {
+ RTE_EM_HLPR_LOG_INFO("Stopping rx cores\n");
+ rx_core_running = false;
+ }
+ return 0;
+}
+
+static struct rte_eventmode_helper_app_worker_params *
+rte_eventmode_helper_find_worker(uint32_t lcore_id,
+ struct eventmode_conf *em_conf,
+ struct rte_eventmode_helper_app_worker_params *app_wrkrs,
+ uint8_t nb_wrkr_param)
+{
+ struct rte_eventmode_helper_event_link_info *link = NULL;
+ uint8_t eventdev_id;
+ struct eventdev_params *eventdev_config;
+ int i;
+ struct rte_eventmode_helper_app_worker_params curr_conf = {0};
+ struct rte_eventmode_helper_app_worker_params *tmp_wrkr;
+
+ /*
+ * Event device to be used will be derived from the first lcore-event
+ * link.
+ *
+ * Assumption: All lcore-event link tied to a core would be using the
+ * same event device. in other words, one core would be polling on
+ * queues of a single event device only.
+ */
+
+ /* Get a link for this lcore */
+ for (i = 0; i < em_conf->nb_link; i++) {
+ link = &(em_conf->link[i]);
+ if (link->lcore_id == lcore_id)
+ break;
+ }
+
+ if (link == NULL) {
+ RTE_EM_HLPR_LOG_ERR(
+ "No valid link found for lcore(%d)", lcore_id);
+ return NULL;
+ }
+
+ /* Get event dev ID */
+ eventdev_id = link->eventdev_id;
+
+ /* Get the corresponding eventdev config */
+ eventdev_config = internal_get_eventdev_params(em_conf, eventdev_id);
+
+ /* Populate the curr_conf with the capabilities */
+
+ /* Check for burst mode */
+ if (internal_dev_has_burst_mode(eventdev_id))
+ curr_conf.cap.burst = RTE_EVENTMODE_HELPER_RX_TYPE_BURST;
+ else
+ curr_conf.cap.burst = RTE_EVENTMODE_HELPER_RX_TYPE_NON_BURST;
+
+ curr_conf.cap.s1_sched_type =
+ rte_eventmode_helper_get_s1_sched_type(em_conf);
+
+ curr_conf.cap.s2_sched_type =
+ rte_eventmode_helper_get_s2_sched_type(em_conf);
+
+ /* TODO make this part of em_conf */
+ curr_conf.nb_stage = 1;
+
+ /* Now parse the passed list and see if we have matching capabilties */
+
+ /* Initialize the pointer used to traverse the list */
+ tmp_wrkr = app_wrkrs;
+
+ for (i = 0; i < nb_wrkr_param; i++, tmp_wrkr++) {
+
+ /* Skip this if capabilities are not matching */
+ if (tmp_wrkr->cap.u64 != curr_conf.cap.u64)
+ continue;
+
+ /* Skip if the number of stages is not matching */
+ if (tmp_wrkr->nb_stage != curr_conf.nb_stage)
+ continue;
+
+ /* If the checks pass, we have a match */
+ return tmp_wrkr;
+ }
+
+ /* TODO required for ATQ */
+ RTE_SET_USED(eventdev_config);
+
+ return NULL;
+}
+
+static int
+rte_eventmode_helper_verify_match_worker(
+ struct rte_eventmode_helper_app_worker_params *match_wrkr)
+{
+ if (match_wrkr->nb_stage == 0) {
+ RTE_EM_HLPR_LOG_ERR("App stages cannot be 0");
+ return 0;
+ }
+
+ /* Verify the stages registered */
+ switch (match_wrkr->nb_stage) {
+ case 2:
+ if (match_wrkr->s2_worker_thread == NULL) {
+ RTE_EM_HLPR_LOG_ERR(
+ "No worker registered for second stage");
+ return 0;
+ }
+ /* Fall through */
+ case 1:
+ if (match_wrkr->s1_worker_thread == NULL) {
+ RTE_EM_HLPR_LOG_ERR(
+ "No worker registered for first stage");
+ return 0;
+ }
+ break;
+ default:
+ RTE_EM_HLPR_LOG_ERR("Only two stages are supported now");
+ return 0;
+ }
+
+ /* Success */
+ return 1;
+}
+
+void
+rte_eventmode_helper_launch_worker(struct rte_eventmode_helper_conf *mode_conf,
+ struct rte_eventmode_helper_app_worker_params *app_wrkr,
+ uint8_t nb_wrkr_param)
+{
+ struct rte_eventmode_helper_app_worker_params *match_wrkr;
+ uint32_t lcore_id;
+ int i;
+ struct eventmode_conf *em_conf;
+
+ if (mode_conf == NULL) {
+ RTE_EM_HLPR_LOG_ERR("Invalid conf");
+ return;
+ }
+
+ if (mode_conf->mode_params == NULL) {
+ RTE_EM_HLPR_LOG_ERR("Invalid mode params");
+ return;
+ }
+
+ /* Get eventmode conf */
+ em_conf = (struct eventmode_conf *)(mode_conf->mode_params);
+
+ /* Get core ID */
+ lcore_id = rte_lcore_id();
+
+ /* TODO check capability for rx core */
+
+ /* Check if this is rx core */
+ if (em_conf->rx_core_mask & (1 << lcore_id)) {
+ rte_eventmode_helper_start_worker_rx_core(em_conf, lcore_id);
+ return;
+ }
+
+ if (app_wrkr == NULL || nb_wrkr_param == 0) {
+ RTE_EM_HLPR_LOG_ERR("Invalid args");
+ return;
+ }
+
+ /*
+ * This is a regular worker thread. The application would be
+ * registering multiple workers with various capabilities. The
+ * worker to be run will be selected by the capabilities of the
+ * event device configured.
+ */
+
+ /* Get the first matching worker for the event device */
+ match_wrkr = rte_eventmode_helper_find_worker(lcore_id,
+ em_conf,
+ app_wrkr,
+ nb_wrkr_param);
+
+ if (match_wrkr == NULL) {
+ RTE_EM_HLPR_LOG_ERR(
+ "No matching worker registered for lcore %d", lcore_id);
+ goto clean_and_exit;
+ }
+
+ /* Verify sanity of the matched worker */
+ if (rte_eventmode_helper_verify_match_worker(match_wrkr) != 1) {
+ RTE_EM_HLPR_LOG_ERR("Error in validating the matched worker");
+ goto clean_and_exit;
+ }
+
+ /*
+ * If single stage, then the worker thread will have the loop,
+ * thereby avoiding the dereferencing of the function pointer.
+ */
+ if (match_wrkr->nb_stage == 1) {
+ match_wrkr->s1_worker_thread((void *)mode_conf);
+ goto clean_and_exit;
+ }
+
+ /* TODO write worker stop API */
+ eventmode_worker_stop = false;
+
+ /* Run the worker threads */
+ while (!eventmode_worker_stop) {
+
+ /* Launch the stages registered */
+ for (i = 1; i <= match_wrkr->nb_stage; i++) {
+ switch (i) {
+ case 1:
+ match_wrkr->s1_worker_thread((void *)em_conf);
+ break;
+ case 2:
+ match_wrkr->s2_worker_thread((void *)em_conf);
+ break;
+ }
+ }
+ }
+
+clean_and_exit:
+
+ /* Flag rx_cores to stop, if started */
+ rte_eventmode_helper_stop_worker_rx_core();
+}
@@ -12,6 +12,22 @@ enum rte_eventmode_helper_pkt_transfer_mode {
RTE_EVENTMODE_HELPER_PKT_TRANSFER_MODE_EVENT,
};
+/* Event mode packet rx types */
+enum rte_eventmode_helper_rx_types {
+ RTE_EVENTMODE_HELPER_RX_TYPE_INVALID = 0,
+ RTE_EVENTMODE_HELPER_RX_TYPE_NON_BURST,
+ RTE_EVENTMODE_HELPER_RX_TYPE_BURST,
+ RTE_EVENTMODE_HELPER_RX_TYPE_MAX = 16
+};
+
+/* Event mode packet tx types */
+enum rte_eventmode_helper_tx_types {
+ RTE_EVETNMODE_HELPER_TX_TYPE_INVALID = 0,
+ RTE_EVENTMODE_HELPER_TX_TYPE_LOCKED,
+ RTE_EVENTMODE_HELPER_TX_TYPE_LOCKLESS,
+ RTE_EVENTMODE_HELPER_TX_TYPE_MAX = 16
+};
+
struct rte_eventmode_helper_conf {
enum rte_eventmode_helper_pkt_transfer_mode mode;
/**< Packet transfer mode of the application */
@@ -36,6 +52,27 @@ struct rte_eventmode_helper_event_link_info {
/**< Lcore to be polling on this port */
};
+/* Workers registered by the application */
+struct rte_eventmode_helper_app_worker_params {
+ union {
+ struct {
+ uint64_t burst : 4;
+ /**< Specify status of rx type burst */
+ uint64_t s1_sched_type : 2;
+ /**< Stage 1 scheduling type for the thread */
+ uint64_t s2_sched_type : 2;
+ /**< Stage 2 scheduling type for the thread */
+ };
+ uint64_t u64;
+ } cap;
+ /**< Capabilities of this worker */
+ uint8_t nb_stage;
+ void (*s1_worker_thread)(void *);
+ /**< Stage 1 worker thread */
+ void (*s2_worker_thread)(void *);
+ /**< Stage 2 worker thread */
+};
+
/* Common helper functions for command line parsing */
/**
@@ -152,5 +189,25 @@ uint8_t
rte_eventmode_helper_get_tx_queue(struct rte_eventmode_helper_conf *mode_conf,
uint8_t eventdev_id);
+/**
+ * Launch eventmode worker
+ *
+ * The application can request the eventmode helper subsystem to launch the
+ * worker based on the capabilities of event device and the options selected
+ * while initializing the eventmode.
+ *
+ * @param mode_conf
+ * Configuration of the mode in which app is doing packet handling
+ * @param app_wrkr
+ * List of all the workers registered by application, along with it's
+ * capabilties
+ * @param nb_wrkr_param
+ * Number of workers passed by the application
+ *
+ */
+void
+rte_eventmode_helper_launch_worker(struct rte_eventmode_helper_conf *mode_conf,
+ struct rte_eventmode_helper_app_worker_params *app_wrkr,
+ uint8_t nb_wrkr_param);
#endif /* _RTE_EVENTMODE_HELPER_H_ */
@@ -43,6 +43,9 @@
#define EVENT_MODE_MAX_LCORE_LINKS \
(EVENT_MODE_MAX_EVENT_DEVS * EVENT_MODE_MAX_EVENT_QUEUES_PER_DEV)
+/* Max adapters that one rx core can handle */
+#define EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE EVENT_MODE_MAX_RX_ADAPTERS
+
/* Event dev params */
struct eventdev_params {
uint8_t eventdev_id;
@@ -104,4 +107,24 @@ struct eventmode_conf {
/**< 64 bit field to specify extended params */
};
+/*
+ * Get sched type of the first stage of app
+ *
+ */
+static inline uint8_t
+rte_eventmode_helper_get_s1_sched_type(struct eventmode_conf *em_conf)
+{
+ return em_conf->ext_params.s1_sched_type;
+}
+
+/*
+ * Get sched type of the first stage of app
+ *
+ */
+static inline uint8_t
+rte_eventmode_helper_get_s2_sched_type(struct eventmode_conf *em_conf)
+{
+ return em_conf->ext_params.s2_sched_type;
+}
+
#endif /* _RTE_EVENTMODE_HELPER_INTERNAL_H_ */