@@ -104,6 +104,16 @@ struct evt_options {
true : false;
}
+static inline bool
+evt_has_flow_id(uint8_t dev_id)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(dev_id, &dev_info);
+ return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_CARRY_FLOW_ID) ?
+ true : false;
+}
+
static inline int
evt_service_setup(uint32_t service_id)
{
@@ -169,6 +179,7 @@ struct evt_options {
.dequeue_timeout_ns = opt->deq_tmo_nsec,
.nb_event_queues = nb_queues,
.nb_event_ports = nb_ports,
+ .nb_single_link_event_port_queues = 0,
.nb_events_limit = info.max_num_events,
.nb_event_queue_flows = opt->nb_flows,
.nb_event_port_dequeue_depth =
@@ -19,7 +19,7 @@
}
static int
-order_atq_worker(void *arg)
+order_atq_worker(void *arg, const uint64_t flow_id_cap)
{
ORDER_WORKER_INIT;
struct rte_event ev;
@@ -34,6 +34,9 @@
continue;
}
+ if (!flow_id_cap)
+ ev.flow_id = ev.mbuf->udata64;
+
if (ev.sub_event_type == 0) { /* stage 0 from producer */
order_atq_process_stage_0(&ev);
while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
@@ -50,7 +53,7 @@
}
static int
-order_atq_worker_burst(void *arg)
+order_atq_worker_burst(void *arg, const uint64_t flow_id_cap)
{
ORDER_WORKER_INIT;
struct rte_event ev[BURST_SIZE];
@@ -68,6 +71,9 @@
}
for (i = 0; i < nb_rx; i++) {
+ if (!flow_id_cap)
+ ev[i].flow_id = ev[i].mbuf->udata64;
+
if (ev[i].sub_event_type == 0) { /*stage 0 */
order_atq_process_stage_0(&ev[i]);
} else if (ev[i].sub_event_type == 1) { /* stage 1 */
@@ -95,11 +101,21 @@
{
struct worker_data *w = arg;
const bool burst = evt_has_burst_mode(w->dev_id);
-
- if (burst)
- return order_atq_worker_burst(arg);
- else
- return order_atq_worker(arg);
+ const bool flow_id_cap = evt_has_flow_id(w->dev_id);
+
+ if (burst) {
+ if (flow_id_cap)
+ return order_atq_worker_burst(arg,
+ RTE_EVENT_DEV_CAP_CARRY_FLOW_ID);
+ else
+ return order_atq_worker_burst(arg, 0);
+ } else {
+ if (flow_id_cap)
+ return order_atq_worker(arg,
+ RTE_EVENT_DEV_CAP_CARRY_FLOW_ID);
+ else
+ return order_atq_worker(arg, 0);
+ }
}
static int
@@ -49,6 +49,7 @@
const uint32_t flow = (uintptr_t)m % nb_flows;
/* Maintain seq number per flow */
m->seqn = producer_flow_seq[flow]++;
+ m->udata64 = flow;
ev.flow_id = flow;
ev.mbuf = m;
@@ -318,7 +319,7 @@
opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
/* port configuration */
- const struct rte_event_port_conf p_conf = {
+ struct rte_event_port_conf p_conf = {
.dequeue_depth = opt->wkr_deq_dep,
.enqueue_depth = dev_info.max_event_port_dequeue_depth,
.new_event_threshold = dev_info.max_num_events,
@@ -351,6 +352,8 @@
p->queue_id = 0;
p->t = t;
+ p_conf.new_event_threshold /= 2;
+
ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
if (ret) {
evt_err("failed to setup producer port %d", port);
@@ -19,7 +19,7 @@
}
static int
-order_queue_worker(void *arg)
+order_queue_worker(void *arg, const uint64_t flow_id_cap)
{
ORDER_WORKER_INIT;
struct rte_event ev;
@@ -34,6 +34,9 @@
continue;
}
+ if (!flow_id_cap)
+ ev.flow_id = ev.mbuf->udata64;
+
if (ev.queue_id == 0) { /* from ordered queue */
order_queue_process_stage_0(&ev);
while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
@@ -50,7 +53,7 @@
}
static int
-order_queue_worker_burst(void *arg)
+order_queue_worker_burst(void *arg, const uint64_t flow_id_cap)
{
ORDER_WORKER_INIT;
struct rte_event ev[BURST_SIZE];
@@ -68,6 +71,10 @@
}
for (i = 0; i < nb_rx; i++) {
+
+ if (!flow_id_cap)
+ ev[i].flow_id = ev[i].mbuf->udata64;
+
if (ev[i].queue_id == 0) { /* from ordered queue */
order_queue_process_stage_0(&ev[i]);
} else if (ev[i].queue_id == 1) {/* from atomic queue */
@@ -95,11 +102,21 @@
{
struct worker_data *w = arg;
const bool burst = evt_has_burst_mode(w->dev_id);
-
- if (burst)
- return order_queue_worker_burst(arg);
- else
- return order_queue_worker(arg);
+ const bool flow_id_cap = evt_has_flow_id(w->dev_id);
+
+ if (burst) {
+ if (flow_id_cap)
+ return order_queue_worker_burst(arg,
+ RTE_EVENT_DEV_CAP_CARRY_FLOW_ID);
+ else
+ return order_queue_worker_burst(arg, 0);
+ } else {
+ if (flow_id_cap)
+ return order_queue_worker(arg,
+ RTE_EVENT_DEV_CAP_CARRY_FLOW_ID);
+ else
+ return order_queue_worker(arg, 0);
+ }
}
static int
@@ -559,10 +559,10 @@
if (!(info.event_dev_cap &
RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
pconf.enqueue_depth = info.max_event_port_enqueue_depth;
- pconf.disable_implicit_release = 1;
+ pconf.event_port_cfg = RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
- pconf.disable_implicit_release = 0;
+ pconf.event_port_cfg = 0;
}
ret = rte_event_port_setup(TEST_DEV_ID, info.max_event_ports,
@@ -357,7 +357,8 @@ static void drain_4_bytes(int fd, fd_set *fdset)
RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
RTE_EVENT_DEV_CAP_BURST_MODE |
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
- RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+ RTE_EVENT_DEV_CAP_NONSEQ_MODE |
+ RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
}
static int
@@ -405,7 +405,8 @@ static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
RTE_EVENT_DEV_CAP_BURST_MODE|
RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
- RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+ RTE_EVENT_DEV_CAP_NONSEQ_MODE |
+ RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
}
@@ -535,7 +536,7 @@ static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
port_conf->enqueue_depth =
DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
- port_conf->disable_implicit_release = 0;
+ port_conf->event_port_cfg = 0;
}
static int
@@ -224,7 +224,8 @@
.event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE|
RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED|
RTE_EVENT_DEV_CAP_NONSEQ_MODE|
- RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT
+ RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT|
+ RTE_EVENT_DEV_CAP_CARRY_FLOW_ID
};
}
@@ -152,7 +152,8 @@ struct ssovf_mbox_convert_ns_getworks_iter {
RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES|
RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
- RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+ RTE_EVENT_DEV_CAP_NONSEQ_MODE |
+ RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
}
@@ -218,7 +219,7 @@ struct ssovf_mbox_convert_ns_getworks_iter {
port_conf->new_event_threshold = edev->max_num_events;
port_conf->dequeue_depth = 1;
port_conf->enqueue_depth = 1;
- port_conf->disable_implicit_release = 0;
+ port_conf->event_port_cfg = 0;
}
static void
@@ -501,7 +501,8 @@
RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
- RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+ RTE_EVENT_DEV_CAP_NONSEQ_MODE |
+ RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
}
static void
@@ -374,7 +374,8 @@
.max_event_port_dequeue_depth = MAX_OPDL_CONS_Q_DEPTH,
.max_event_port_enqueue_depth = MAX_OPDL_CONS_Q_DEPTH,
.max_num_events = OPDL_INFLIGHT_EVENTS_TOTAL,
- .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE,
+ .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE |
+ RTE_EVENT_DEV_CAP_CARRY_FLOW_ID,
};
*info = evdev_opdl_info;
@@ -101,7 +101,8 @@
dev_info->max_num_events = (1ULL << 20);
dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
RTE_EVENT_DEV_CAP_BURST_MODE |
- RTE_EVENT_DEV_CAP_EVENT_QOS;
+ RTE_EVENT_DEV_CAP_EVENT_QOS |
+ RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
}
static int
@@ -209,7 +210,7 @@
port_conf->new_event_threshold = 32 * 1024;
port_conf->dequeue_depth = 16;
port_conf->enqueue_depth = 16;
- port_conf->disable_implicit_release = 0;
+ port_conf->event_port_cfg = 0;
}
static void
@@ -175,7 +175,8 @@
}
p->inflight_max = conf->new_event_threshold;
- p->implicit_release = !conf->disable_implicit_release;
+ p->implicit_release = !(conf->event_port_cfg &
+ RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
/* check if ring exists, same as rx_worker above */
snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id,
@@ -508,7 +509,7 @@
port_conf->new_event_threshold = 1024;
port_conf->dequeue_depth = 16;
port_conf->enqueue_depth = 16;
- port_conf->disable_implicit_release = 0;
+ port_conf->event_port_cfg = 0;
}
static int
@@ -615,7 +616,8 @@
RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE|
RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
- RTE_EVENT_DEV_CAP_NONSEQ_MODE),
+ RTE_EVENT_DEV_CAP_NONSEQ_MODE |
+ RTE_EVENT_DEV_CAP_CARRY_FLOW_ID),
};
*info = evdev_sw_info;
@@ -172,7 +172,6 @@ struct test {
.new_event_threshold = 1024,
.dequeue_depth = 32,
.enqueue_depth = 64,
- .disable_implicit_release = 0,
};
if (num_ports > MAX_PORTS)
return -1;
@@ -1227,7 +1226,6 @@ struct test_event_dev_stats {
.new_event_threshold = 128,
.dequeue_depth = 32,
.enqueue_depth = 64,
- .disable_implicit_release = 0,
};
if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
printf("%d Error setting up port\n", __LINE__);
@@ -1317,7 +1315,6 @@ struct test_event_dev_stats {
.new_event_threshold = 128,
.dequeue_depth = 32,
.enqueue_depth = 64,
- .disable_implicit_release = 0,
};
if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
printf("%d Error setting up port\n", __LINE__);
@@ -3079,7 +3076,8 @@ struct test_event_dev_stats {
* only be initialized once - and this needs to be set for multiple runs
*/
conf.new_event_threshold = 512;
- conf.disable_implicit_release = disable_implicit_release;
+ conf.event_port_cfg = disable_implicit_release ?
+ RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL : 0;
if (rte_event_port_setup(evdev, 0, &conf) < 0) {
printf("Error setting up RX port\n");
@@ -129,6 +129,7 @@
struct rte_event_dev_config config = {
.nb_event_queues = nb_queues,
.nb_event_ports = nb_ports,
+ .nb_single_link_event_port_queues = 1,
.nb_events_limit = 4096,
.nb_event_queue_flows = 1024,
.nb_event_port_dequeue_depth = 128,
@@ -143,7 +144,7 @@
.schedule_type = cdata.queue_type,
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
.nb_atomic_flows = 1024,
- .nb_atomic_order_sequences = 1024,
+ .nb_atomic_order_sequences = 1024,
};
struct rte_event_queue_conf tx_q_conf = {
.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
@@ -167,7 +168,8 @@
disable_implicit_release = (dev_info.event_dev_cap &
RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
- wkr_p_conf.disable_implicit_release = disable_implicit_release;
+ wkr_p_conf.event_port_cfg = disable_implicit_release ?
+ RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL : 0;
if (dev_info.max_num_events < config.nb_events_limit)
config.nb_events_limit = dev_info.max_num_events;
@@ -436,6 +436,7 @@
struct rte_event_dev_config config = {
.nb_event_queues = nb_queues,
.nb_event_ports = nb_ports,
+ .nb_single_link_event_port_queues = 0,
.nb_events_limit = 4096,
.nb_event_queue_flows = 1024,
.nb_event_port_dequeue_depth = 128,
@@ -126,8 +126,9 @@
if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
- event_p_conf.disable_implicit_release =
- evt_rsrc->disable_implicit_release;
+ event_p_conf.event_port_cfg = 0;
+ if (evt_rsrc->disable_implicit_release)
+ event_p_conf.event_port_cfg |= RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
evt_rsrc->deq_depth = def_p_conf.dequeue_depth;
for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
@@ -123,8 +123,9 @@
if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
- event_p_conf.disable_implicit_release =
- evt_rsrc->disable_implicit_release;
+ event_p_conf.event_port_cfg = 0;
+ if (evt_rsrc->disable_implicit_release)
+ event_p_conf.event_port_cfg |= RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
event_p_id++) {
@@ -115,8 +115,9 @@
if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
- event_p_conf.disable_implicit_release =
- evt_rsrc->disable_implicit_release;
+ event_p_conf.event_port_cfg = 0;
+ if (evt_rsrc->disable_implicit_release)
+ event_p_conf.event_port_cfg |= RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
evt_rsrc->deq_depth = def_p_conf.dequeue_depth;
for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
@@ -113,8 +113,9 @@
if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
- event_p_conf.disable_implicit_release =
- evt_rsrc->disable_implicit_release;
+ event_p_conf.event_port_cfg = 0;
+ if (evt_rsrc->disable_implicit_release)
+ event_p_conf.event_port_cfg |= RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
event_p_id++) {
@@ -130,6 +130,7 @@ enum rte_cpu_flag_t {
RTE_CPUFLAG_CLDEMOTE, /**< Cache Line Demote */
RTE_CPUFLAG_MOVDIRI, /**< Direct Store Instructions */
RTE_CPUFLAG_MOVDIR64B, /**< Direct Store Instructions 64B */
+ RTE_CPUFLAG_UMWAIT, /**< UMONITOR/UMWAIT */
RTE_CPUFLAG_AVX512VP2INTERSECT, /**< AVX512 Two Register Intersection */
/* The last item */
@@ -137,6 +137,7 @@ struct feature_entry {
FEAT_DEF(CLDEMOTE, 0x00000007, 0, RTE_REG_ECX, 25)
FEAT_DEF(MOVDIRI, 0x00000007, 0, RTE_REG_ECX, 27)
FEAT_DEF(MOVDIR64B, 0x00000007, 0, RTE_REG_ECX, 28)
+ FEAT_DEF(UMWAIT, 0x00000007, 0, RTE_REG_ECX, 5)
FEAT_DEF(AVX512VP2INTERSECT, 0x00000007, 0, RTE_REG_EDX, 8)
};
@@ -7,6 +7,7 @@ else
cflags += '-DBSD'
endif
+use_function_versioning = true
sources = files('rte_eventdev.c',
'rte_event_ring.c',
'eventdev_trace_points.c',
@@ -286,7 +286,7 @@ static int txa_service_queue_del(uint8_t id,
return ret;
}
- pc->disable_implicit_release = 0;
+ pc->event_port_cfg = 0;
ret = rte_event_port_setup(dev_id, port_id, pc);
if (ret) {
RTE_EDEV_LOG_ERR("failed to setup event port %u\n",
@@ -32,6 +32,8 @@
#include <rte_ethdev.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_compat.h>
+#include <rte_function_versioning.h>
#include "rte_eventdev.h"
#include "rte_eventdev_pmd.h"
@@ -87,7 +89,8 @@
}
int
-rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
+rte_event_dev_info_get(uint8_t dev_id,
+ struct rte_event_dev_info *dev_info)
{
struct rte_eventdev *dev;
@@ -437,9 +440,29 @@
dev_id);
return -EINVAL;
}
- if (dev_conf->nb_event_queues > info.max_event_queues) {
- RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
- dev_id, dev_conf->nb_event_queues, info.max_event_queues);
+ if (dev_conf->nb_event_queues > info.max_event_queues +
+ info.max_single_link_event_port_queue_pairs) {
+ RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d",
+ dev_id, dev_conf->nb_event_queues,
+ info.max_event_queues,
+ info.max_single_link_event_port_queue_pairs);
+ return -EINVAL;
+ }
+ if (dev_conf->nb_event_queues -
+ dev_conf->nb_single_link_event_port_queues >
+ info.max_event_queues) {
+ RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d",
+ dev_id, dev_conf->nb_event_queues,
+ dev_conf->nb_single_link_event_port_queues,
+ info.max_event_queues);
+ return -EINVAL;
+ }
+ if (dev_conf->nb_single_link_event_port_queues >
+ dev_conf->nb_event_queues) {
+ RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d",
+ dev_id,
+ dev_conf->nb_single_link_event_port_queues,
+ dev_conf->nb_event_queues);
return -EINVAL;
}
@@ -448,9 +471,31 @@
RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
return -EINVAL;
}
- if (dev_conf->nb_event_ports > info.max_event_ports) {
- RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
- dev_id, dev_conf->nb_event_ports, info.max_event_ports);
+ if (dev_conf->nb_event_ports > info.max_event_ports +
+ info.max_single_link_event_port_queue_pairs) {
+ RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d",
+ dev_id, dev_conf->nb_event_ports,
+ info.max_event_ports,
+ info.max_single_link_event_port_queue_pairs);
+ return -EINVAL;
+ }
+ if (dev_conf->nb_event_ports -
+ dev_conf->nb_single_link_event_port_queues
+ > info.max_event_ports) {
+ RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d",
+ dev_id, dev_conf->nb_event_ports,
+ dev_conf->nb_single_link_event_port_queues,
+ info.max_event_ports);
+ return -EINVAL;
+ }
+
+ if (dev_conf->nb_single_link_event_port_queues >
+ dev_conf->nb_event_ports) {
+ RTE_EDEV_LOG_ERR(
+ "dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d",
+ dev_id,
+ dev_conf->nb_single_link_event_port_queues,
+ dev_conf->nb_event_ports);
return -EINVAL;
}
@@ -737,7 +782,8 @@
return -EINVAL;
}
- if (port_conf && port_conf->disable_implicit_release &&
+ if (port_conf &&
+ (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&
!(dev->data->event_dev_cap &
RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
RTE_EDEV_LOG_ERR(
@@ -809,6 +855,7 @@
uint32_t *attr_value)
{
struct rte_eventdev *dev;
+ uint32_t config;
if (!attr_value)
return -EINVAL;
@@ -830,6 +877,10 @@
case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
*attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
break;
+ case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:
+ config = dev->data->ports_cfg[port_id].event_port_cfg;
+ *attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
+ break;
default:
return -EINVAL;
};
@@ -291,6 +291,12 @@
* single queue to each port or map a single queue to many port.
*/
+#define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
+/**< Event device preserves the flow ID from the enqueued
+ * event to the dequeued event if the flag is set. Otherwise,
+ * the content of this field is implementation dependent.
+ */
+
/* Event device priority levels */
#define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
/**< Highest priority expressed across eventdev subsystem
@@ -380,6 +386,10 @@ struct rte_event_dev_info {
* event port by this device.
* A device that does not support bulk enqueue will set this as 1.
*/
+ uint8_t max_event_port_links;
+ /**< Maximum number of queues that can be linked to a single event
+ * port by this device.
+ */
int32_t max_num_events;
/**< A *closed system* event dev has a limit on the number of events it
* can manage at a time. An *open system* event dev does not have a
@@ -387,6 +397,12 @@ struct rte_event_dev_info {
*/
uint32_t event_dev_cap;
/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
+ uint8_t max_single_link_event_port_queue_pairs;
+ /**< Maximum number of event ports and queues that are optimized for
+ * (and only capable of) single-link configurations supported by this
+ * device. These ports and queues are not accounted for in
+ * max_event_ports or max_event_queues.
+ */
};
/**
@@ -494,6 +510,14 @@ struct rte_event_dev_config {
*/
uint32_t event_dev_cfg;
/**< Event device config flags(RTE_EVENT_DEV_CFG_)*/
+ uint8_t nb_single_link_event_port_queues;
+ /**< Number of event ports and queues that will be singly-linked to
+ * each other. These are a subset of the overall event ports and
+ * queues; this value cannot exceed *nb_event_ports* or
+ * *nb_event_queues*. If the device has ports and queues that are
+ * optimized for single-link usage, this field is a hint for how many
+ * to allocate; otherwise, regular event ports and queues can be used.
+ */
};
/**
@@ -519,7 +543,6 @@ struct rte_event_dev_config {
rte_event_dev_configure(uint8_t dev_id,
const struct rte_event_dev_config *dev_conf);
-
/* Event queue specific APIs */
/* Event queue configuration bitmap flags */
@@ -671,6 +694,20 @@ struct rte_event_queue_conf {
/* Event port specific APIs */
+/* Event port configuration bitmap flags */
+#define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL (1ULL << 0)
+/**< Configure the port not to release outstanding events in
+ * rte_event_dev_dequeue_burst(). If set, all events received through
+ * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
+ * RTE_EVENT_OP_FORWARD. Must be unset if the device is not
+ * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
+ */
+#define RTE_EVENT_PORT_CFG_SINGLE_LINK (1ULL << 1)
+/**< This event port links only to a single event queue.
+ *
+ * @see rte_event_port_setup(), rte_event_port_link()
+ */
+
/** Event port configuration structure */
struct rte_event_port_conf {
int32_t new_event_threshold;
@@ -698,13 +735,7 @@ struct rte_event_port_conf {
* which previously supplied to rte_event_dev_configure().
* Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
*/
- uint8_t disable_implicit_release;
- /**< Configure the port not to release outstanding events in
- * rte_event_dev_dequeue_burst(). If true, all events received through
- * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
- * RTE_EVENT_OP_FORWARD. Must be false when the device is not
- * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
- */
+ uint32_t event_port_cfg; /**< Port cfg flags(EVENT_PORT_CFG_) */
};
/**
@@ -769,6 +800,10 @@ struct rte_event_port_conf {
* The new event threshold of the port
*/
#define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
+/**
+ * The implicit release disable attribute of the port
+ */
+#define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
/**
* Get an attribute from a port.
@@ -88,6 +88,60 @@
return -ENXIO;
}
+/**
+ * @internal
+ * Wrapper for use by pci drivers as a .probe function to attach to a event
+ * interface. Same as rte_event_pmd_pci_probe, except caller can specify
+ * the name.
+ */
+static inline int
+rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,
+ struct rte_pci_device *pci_dev,
+ size_t private_data_size,
+ eventdev_pmd_pci_callback_t devinit,
+ const char *name)
+{
+ struct rte_eventdev *eventdev;
+
+ int retval;
+
+ if (devinit == NULL)
+ return -EINVAL;
+
+ eventdev = rte_event_pmd_allocate(name,
+ pci_dev->device.numa_node);
+ if (eventdev == NULL)
+ return -ENOMEM;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eventdev->data->dev_private =
+ rte_zmalloc_socket(
+ "eventdev private structure",
+ private_data_size,
+ RTE_CACHE_LINE_SIZE,
+ rte_socket_id());
+
+ if (eventdev->data->dev_private == NULL)
+ rte_panic("Cannot allocate memzone for private "
+ "device data");
+ }
+
+ eventdev->dev = &pci_dev->device;
+
+ /* Invoke PMD device initialization function */
+ retval = devinit(eventdev);
+ if (retval == 0)
+ return 0;
+
+ RTE_EDEV_LOG_ERR("driver %s: (vendor_id=0x%x device_id=0x%x)"
+ " failed", pci_drv->driver.name,
+ (unsigned int) pci_dev->id.vendor_id,
+ (unsigned int) pci_dev->id.device_id);
+
+ rte_event_pmd_release(eventdev);
+
+ return -ENXIO;
+}
/**
* @internal
@@ -100,7 +100,6 @@ EXPERIMENTAL {
# added in 20.05
__rte_eventdev_trace_configure;
__rte_eventdev_trace_queue_setup;
- __rte_eventdev_trace_port_setup;
__rte_eventdev_trace_port_link;
__rte_eventdev_trace_port_unlink;
__rte_eventdev_trace_start;
@@ -134,4 +133,7 @@ EXPERIMENTAL {
__rte_eventdev_trace_crypto_adapter_queue_pair_del;
__rte_eventdev_trace_crypto_adapter_start;
__rte_eventdev_trace_crypto_adapter_stop;
+
+ # changed in 20.08
+ __rte_eventdev_trace_port_setup;
};