[dpdk-dev] [PATCH v2 3/8] event/opdl: add the opdl pmd main body and xstats helper function

Liang Ma liang.j.ma at intel.com
Fri Dec 15 12:26:24 CET 2017


This commit adds a OPDL implementation of the eventdev API. The
implementation here is intended to enable the community to use
the OPDL infrastructure under eventdev API.

The main components of the implementation is three files:
  - opdl_evdev.c              Creation, configuration, etc
  - opdl_evdev_xstats.c       helper function to support stats collection

This commit only adds the implementation, no existing DPDK files
are modified.

Signed-off-by: Liang Ma <liang.j.ma at intel.com>
Signed-off-by: Peter, Mccarthy <peter.mccarthy at intel.com>
---
 drivers/event/opdl/opdl_evdev.c        | 744 +++++++++++++++++++++++++++++++++
 drivers/event/opdl/opdl_evdev_xstats.c | 205 +++++++++
 2 files changed, 949 insertions(+)
 create mode 100644 drivers/event/opdl/opdl_evdev.c
 create mode 100644 drivers/event/opdl/opdl_evdev_xstats.c

diff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c
new file mode 100644
index 0000000..9184ba6
--- /dev/null
+++ b/drivers/event/opdl/opdl_evdev.c
@@ -0,0 +1,744 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <string.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_memzone.h>
+#include <rte_kvargs.h>
+#include <rte_ring.h>
+#include <rte_errno.h>
+#include <rte_event_ring.h>
+#include <rte_service_component.h>
+#include <rte_cycles.h>
+
+#include "opdl_evdev.h"
+#include "opdl_ring.h"
+
+#define EVENTDEV_NAME_OPDL_PMD event_opdl
+#define NUMA_NODE_ARG "numa_node"
+#define DO_VALIDATION_ARG "do_validation"
+#define DO_TEST_ARG "self_test"
+
+
+uint16_t
+opdl_event_enqueue_burst(void *port,
+			 const struct rte_event ev[],
+			 uint16_t num)
+{
+	struct opdl_port *p = port;
+
+	if (unlikely(!p->opdl->data->dev_started))
+		return 0;
+
+
+	/* either rx_enqueue or disclaim*/
+	return p->enq(p, ev, num);
+}
+
+uint16_t
+opdl_event_enqueue(void *port, const struct rte_event *ev)
+{
+	struct opdl_port *p = port;
+
+	if (unlikely(!p->opdl->data->dev_started))
+		return 0;
+
+
+	return p->enq(p, ev, 1);
+}
+
+uint16_t
+opdl_event_dequeue_burst(void *port,
+			 struct rte_event *ev,
+			 uint16_t num,
+			 uint64_t wait)
+{
+	struct opdl_port *p = (void *)port;
+
+	RTE_SET_USED(wait);
+
+	if (unlikely(!p->opdl->data->dev_started))
+		return 0;
+
+	/* This function pointer can point to tx_dequeue or claim*/
+	return p->deq(p, ev, num);
+}
+
+uint16_t
+opdl_event_dequeue(void *port,
+		   struct rte_event *ev,
+		   uint64_t wait)
+{
+	struct opdl_port *p = (void *)port;
+
+	if (unlikely(!p->opdl->data->dev_started))
+		return 0;
+
+	RTE_SET_USED(wait);
+
+	return p->deq(p, ev, 1);
+}
+
+static void
+opdl_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);
+
+
+static int
+opdl_port_link(struct rte_eventdev *dev,
+	       void *port,
+	       const uint8_t queues[],
+	       const uint8_t priorities[],
+	       uint16_t num)
+{
+	struct opdl_port *p = port;
+
+	RTE_SET_USED(priorities);
+	RTE_SET_USED(dev);
+
+	if (unlikely(dev->data->dev_started)) {
+		OPDL_LOG_ERR("Attempt to link queue (%u) to port %d while device started\n",
+				queues[0],
+				p->id);
+		rte_errno = -EINVAL;
+		return 0;
+	}
+
+	/* Max of 1 queue per port */
+	if (num > 1) {
+		OPDL_LOG_ERR("Attempt to link more than one queue (%u) to port %d requested\n",
+				num,
+				p->id);
+		rte_errno = -EDQUOT;
+		return 0;
+	}
+
+	if (!p->configured) {
+		OPDL_LOG_ERR("port %d not configured, cannot link to %u\n",
+				p->id,
+				queues[0]);
+		rte_errno = -EINVAL;
+		return 0;
+	}
+
+	if (p->external_qid != OPDL_INVALID_QID) {
+		OPDL_LOG_ERR("port %d already linked to queue %u, cannot link to %u\n",
+				p->id,
+				p->external_qid,
+				queues[0]);
+		rte_errno = -EINVAL;
+		return 0;
+	}
+
+	p->external_qid = queues[0];
+
+	return 1;
+}
+
+static int
+opdl_port_unlink(struct rte_eventdev *dev,
+		 void *port,
+		 uint8_t queues[],
+		 uint16_t nb_unlinks)
+{
+	struct opdl_port *p = port;
+
+	RTE_SET_USED(queues);
+	RTE_SET_USED(nb_unlinks);
+
+	if (unlikely(dev->data->dev_started)) {
+		OPDL_LOG_ERR("Attempt to unlink queue (%u) to port %d while device started\n",
+				queues[0],
+				p->id);
+		rte_errno = -EINVAL;
+		return 0;
+	}
+	RTE_SET_USED(nb_unlinks);
+
+	/* Port Stuff */
+	p->queue_id = OPDL_INVALID_QID;
+	p->p_type = OPDL_INVALID_PORT;
+	p->external_qid = OPDL_INVALID_QID;
+
+	return 1;
+}
+
+static int
+opdl_port_setup(struct rte_eventdev *dev,
+		uint8_t port_id,
+		const struct rte_event_port_conf *conf)
+{
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+	struct opdl_port *p = &device->ports[port_id];
+
+	RTE_SET_USED(conf);
+
+	/* Check if port already configured */
+	if (p->configured) {
+		OPDL_LOG_ERR("Attempt to setup port %d which is already setup\n",
+				p->id);
+		return -EDQUOT;
+	}
+
+	*p = (struct opdl_port){0}; /* zero entire structure */
+	p->id = port_id;
+	p->opdl = device;
+	p->queue_id = OPDL_INVALID_QID;
+	p->external_qid = OPDL_INVALID_QID;
+	dev->data->ports[port_id] = p;
+	rte_smp_wmb();
+	p->configured = 1;
+	device->nb_ports++;
+	return 0;
+}
+
+static void
+opdl_port_release(void *port)
+{
+	struct opdl_port *p = (void *)port;
+
+	if (p == NULL ||
+	    p->opdl->data->dev_started) {
+		return;
+	}
+
+	p->configured = 0;
+	p->initialized = 0;
+}
+
+static int
+opdl_queue_setup(struct rte_eventdev *dev,
+		 uint8_t queue_id,
+		 const struct rte_event_queue_conf *conf)
+{
+	enum queue_type type;
+
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+	/* Extra sanity check, probably not needed */
+	if (queue_id == OPDL_INVALID_QID) {
+		OPDL_LOG_ERR("Invalid queue id %u requested\n",
+				queue_id);
+		return -EINVAL;
+	}
+
+	if (device->nb_q_md > device->max_queue_nb) {
+		OPDL_LOG_ERR("Max number of queues %u exceeded by request %u\n",
+			     device->max_queue_nb,
+			     device->nb_q_md);
+		return -EINVAL;
+	}
+
+	if (RTE_EVENT_QUEUE_CFG_ALL_TYPES
+	    & conf->event_queue_cfg) {
+		OPDL_LOG_ERR("QUEUE_CFG_ALL_TYPES not supported\n");
+		return -ENOTSUP;
+	} else if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK
+		   & conf->event_queue_cfg) {
+		type = OPDL_Q_TYPE_SINGLE_LINK;
+	} else {
+		switch (conf->schedule_type) {
+		case RTE_SCHED_TYPE_ORDERED:
+			type = OPDL_Q_TYPE_ORDERED;
+			break;
+		case RTE_SCHED_TYPE_ATOMIC:
+			type = OPDL_Q_TYPE_ATOMIC;
+			break;
+		case RTE_SCHED_TYPE_PARALLEL:
+			type = OPDL_Q_TYPE_ORDERED;
+			break;
+		default:
+			OPDL_LOG_ERR("Unknown queue type %d requested\n",
+				     conf->event_queue_cfg);
+			return -EINVAL;
+		}
+	}
+	/* Check if queue id has been setup already */
+	for (uint32_t i = 0; i < device->nb_q_md; i++) {
+		if (device->q_md[i].ext_id == queue_id) {
+			OPDL_LOG_ERR("queue id %u already setup\n",
+					queue_id);
+			return -EINVAL;
+		}
+	}
+
+	device->q_md[device->nb_q_md].ext_id = queue_id;
+	device->q_md[device->nb_q_md].type = type;
+	device->q_md[device->nb_q_md].setup = 1;
+	device->nb_q_md++;
+
+	return 1;
+}
+
+static void
+opdl_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
+{
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+	RTE_SET_USED(queue_id);
+
+	if (device->data->dev_started)
+		return;
+
+}
+
+static void
+opdl_queue_def_conf(struct rte_eventdev *dev,
+		    uint8_t queue_id,
+		    struct rte_event_queue_conf *conf)
+{
+	RTE_SET_USED(dev);
+	RTE_SET_USED(queue_id);
+
+	static const struct rte_event_queue_conf default_conf = {
+		.nb_atomic_flows = 1024,
+		.nb_atomic_order_sequences = 1,
+		.event_queue_cfg = 0,
+		.schedule_type = RTE_SCHED_TYPE_ORDERED,
+		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+	};
+
+	*conf = default_conf;
+}
+
+static void
+opdl_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
+		struct rte_event_port_conf *port_conf)
+{
+	RTE_SET_USED(dev);
+	RTE_SET_USED(port_id);
+
+	port_conf->new_event_threshold = MAX_OPDL_CONS_Q_DEPTH;
+	port_conf->dequeue_depth = MAX_OPDL_CONS_Q_DEPTH;
+	port_conf->enqueue_depth = MAX_OPDL_CONS_Q_DEPTH;
+}
+
+static int
+opdl_dev_configure(const struct rte_eventdev *dev)
+{
+	struct opdl_evdev *opdl = opdl_pmd_priv(dev);
+	const struct rte_eventdev_data *data = dev->data;
+	const struct rte_event_dev_config *conf = &data->dev_conf;
+
+	opdl->max_queue_nb = conf->nb_event_queues;
+	opdl->max_port_nb = conf->nb_event_ports;
+	opdl->nb_events_limit = conf->nb_events_limit;
+
+	if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
+		OPDL_LOG_ERR("DEQUEUE_TIMEOUT not supported\n");
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
+
+static void
+opdl_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
+{
+	RTE_SET_USED(dev);
+
+	static const struct rte_event_dev_info evdev_opdl_info = {
+		.driver_name = OPDL_PMD_NAME,
+		.max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
+		.max_event_queue_flows = OPDL_QID_NUM_FIDS,
+		.max_event_queue_priority_levels = OPDL_Q_PRIORITY_MAX,
+		.max_event_priority_levels = OPDL_IQS_MAX,
+		.max_event_ports = OPDL_PORTS_MAX,
+		.max_event_port_dequeue_depth = MAX_OPDL_CONS_Q_DEPTH,
+		.max_event_port_enqueue_depth = MAX_OPDL_CONS_Q_DEPTH,
+		.max_num_events = OPDL_INFLIGHT_EVENTS_TOTAL,
+		.event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE,
+	};
+
+	*info = evdev_opdl_info;
+}
+
+static void
+opdl_dump(struct rte_eventdev *dev, FILE *f)
+{
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+	if (!device->do_validation)
+		return;
+
+	fprintf(f,
+		"\n\n -- RING STATISTICS --\n");
+
+	for (uint32_t i = 0; i < device->nb_opdls; i++)
+		opdl_ring_dump(device->opdl[i], f);
+
+	fprintf(f,
+		"\n\n -- PORT STATISTICS --\n"
+		"Type Port Index  Port Id  Queue Id     Av. Req Size  "
+		"Av. Grant Size     Av. Cycles PP"
+		"      Empty DEQs   Non Empty DEQs   Pkts Processed\n");
+
+	for (uint32_t i = 0; i < device->max_port_nb; i++) {
+		char queue_id[64];
+		char total_cyc[64];
+		const char *p_type;
+
+		uint64_t cne, cpg;
+		struct opdl_port *port = &device->ports[i];
+
+		if (port->initialized) {
+			cne = port->port_stat[claim_non_empty];
+			cpg = port->port_stat[claim_pkts_granted];
+			if (port->p_type == OPDL_REGULAR_PORT)
+				p_type = "REG";
+			else if (port->p_type == OPDL_PURE_RX_PORT)
+				p_type = "  RX";
+			else if (port->p_type == OPDL_PURE_TX_PORT)
+				p_type = "  TX";
+			else if (port->p_type == OPDL_ASYNC_PORT)
+				p_type = "SYNC";
+			else
+				p_type = "????";
+
+			sprintf(queue_id, "%02u", port->external_qid);
+			if (port->p_type == OPDL_REGULAR_PORT ||
+					port->p_type == OPDL_ASYNC_PORT)
+				sprintf(total_cyc,
+					" %'16"PRIu64"",
+					(cpg != 0 ?
+					 port->port_stat[total_cycles] / cpg
+					 : 0));
+			else
+				sprintf(total_cyc,
+					"             ----");
+			fprintf(f,
+				"%4s %10u %8u %9s %'16"PRIu64" %'16"PRIu64" %s "
+				"%'16"PRIu64" %'16"PRIu64" %'16"PRIu64"\n",
+				p_type,
+				i,
+				port->id,
+				(port->external_qid == OPDL_INVALID_QID ? "---"
+				 : queue_id),
+				(cne != 0 ?
+				 port->port_stat[claim_pkts_requested] / cne
+				 : 0),
+				(cne != 0 ?
+				 port->port_stat[claim_pkts_granted] / cne
+				 : 0),
+				total_cyc,
+				port->port_stat[claim_empty],
+				port->port_stat[claim_non_empty],
+				port->port_stat[claim_pkts_granted]);
+		}
+	}
+	fprintf(f, "\n");
+}
+
+
+static void
+opdl_stop(struct rte_eventdev *dev)
+{
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+	opdl_xstats_uninit(dev);
+
+	destroy_queues_and_rings(dev);
+
+
+	device->started = 0;
+
+	rte_smp_wmb();
+}
+
+static int
+opdl_start(struct rte_eventdev *dev)
+{
+	int err = 0;
+
+	if (!err)
+		err = create_queues_and_rings(dev);
+
+
+	if (!err)
+		err = assign_internal_queue_ids(dev);
+
+
+	if (!err)
+		err = initialise_queue_zero_ports(dev);
+
+
+	if (!err)
+		err = initialise_all_other_ports(dev);
+
+
+	if (!err)
+		err = check_queues_linked(dev);
+
+
+	if (!err)
+		err = opdl_add_event_handlers(dev);
+
+
+	if (!err)
+		err = build_all_dependencies(dev);
+
+	if (!err)
+		opdl_xstats_init(dev);
+	else
+		opdl_stop(dev);
+
+	return err;
+}
+
+static int
+opdl_close(struct rte_eventdev *dev)
+{
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+	uint32_t i;
+
+	for (i = 0; i < device->max_port_nb; i++) {
+		memset(&device->ports[i],
+		       0,
+		       sizeof(struct opdl_port));
+	}
+
+	memset(&device->s_md,
+			0x0,
+			sizeof(struct opdl_stage_meta_data)*OPDL_PORTS_MAX);
+
+	memset(&device->q_md,
+			0xFF,
+			sizeof(struct opdl_queue_meta_data)*OPDL_MAX_QUEUES);
+
+
+	memset(device->q_map_ex_to_in,
+			0,
+			sizeof(uint8_t)*OPDL_INVALID_QID);
+
+	opdl_xstats_uninit(dev);
+
+	device->max_port_nb = 0;
+
+	device->max_queue_nb = 0;
+
+	device->nb_opdls = 0;
+
+	device->nb_queues   = 0;
+
+	device->nb_ports    = 0;
+
+	device->nb_q_md     = 0;
+
+	dev->data->nb_queues = 0;
+
+	dev->data->nb_ports = 0;
+
+
+	return 0;
+}
+
+static int
+assign_numa_node(const char *key __rte_unused, const char *value, void *opaque)
+{
+	int *socket_id = opaque;
+	*socket_id = atoi(value);
+	if (*socket_id >= RTE_MAX_NUMA_NODES)
+		return -1;
+	return 0;
+}
+
+static int
+set_do_validation(const char *key __rte_unused, const char *value, void *opaque)
+{
+	int *do_val = opaque;
+	*do_val = atoi(value);
+	if (*do_val != 0)
+		*do_val = 1;
+
+	return 0;
+}
+static int
+set_do_test(const char *key __rte_unused, const char *value, void *opaque)
+{
+	int *do_test = opaque;
+
+	*do_test = atoi(value);
+
+	if (*do_test != 0)
+		*do_test = 1;
+	return 0;
+}
+
+static int
+opdl_probe(struct rte_vdev_device *vdev)
+{
+	static const struct rte_eventdev_ops evdev_opdl_ops = {
+		.dev_configure = opdl_dev_configure,
+		.dev_infos_get = opdl_info_get,
+		.dev_close = opdl_close,
+		.dev_start = opdl_start,
+		.dev_stop = opdl_stop,
+		.dump = opdl_dump,
+
+		.queue_def_conf = opdl_queue_def_conf,
+		.queue_setup = opdl_queue_setup,
+		.queue_release = opdl_queue_release,
+		.port_def_conf = opdl_port_def_conf,
+		.port_setup = opdl_port_setup,
+		.port_release = opdl_port_release,
+		.port_link = opdl_port_link,
+		.port_unlink = opdl_port_unlink,
+
+		.xstats_get = opdl_xstats_get,
+		.xstats_get_names = opdl_xstats_get_names,
+		.xstats_get_by_name = opdl_xstats_get_by_name,
+		.xstats_reset = opdl_xstats_reset,
+	};
+
+	static const char *const args[] = {
+		NUMA_NODE_ARG,
+		DO_VALIDATION_ARG,
+		DO_TEST_ARG,
+		NULL
+	};
+	const char *name;
+	const char *params;
+	struct rte_eventdev *dev;
+	struct opdl_evdev *opdl;
+	int socket_id = rte_socket_id();
+	int do_validation = 0;
+	int do_test = 0;
+	int str_len;
+	int test_result = 0;
+
+	name = rte_vdev_device_name(vdev);
+	params = rte_vdev_device_args(vdev);
+	if (params != NULL && params[0] != '\0') {
+		struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
+
+		if (!kvlist) {
+			OPDL_LOG_INFO(
+					"Ignoring unsupported parameters when creating device '%s'\n",
+					name);
+		} else {
+			int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
+					assign_numa_node, &socket_id);
+			if (ret != 0) {
+				OPDL_LOG_ERR(
+						"%s: Error parsing numa node parameter",
+						name);
+
+				rte_kvargs_free(kvlist);
+				return ret;
+			}
+
+			ret = rte_kvargs_process(kvlist, DO_VALIDATION_ARG,
+					set_do_validation, &do_validation);
+			if (ret != 0) {
+				OPDL_LOG_ERR(
+					"%s: Error parsing do validation parameter",
+					name);
+				rte_kvargs_free(kvlist);
+				return ret;
+			}
+
+			ret = rte_kvargs_process(kvlist, DO_TEST_ARG,
+					set_do_test, &do_test);
+			if (ret != 0) {
+				OPDL_LOG_ERR(
+					"%s: Error parsing do test parameter",
+					name);
+				rte_kvargs_free(kvlist);
+				return ret;
+			}
+
+			rte_kvargs_free(kvlist);
+		}
+	}
+
+	OPDL_LOG_INFO("\tSuccess - creating eventdev device %s, numa_node:[%d], do_valdation:[%s]"
+			" , self_test:[%s]\n",
+			name,
+			socket_id,
+			(do_validation ? "true" : "false"),
+			(do_test ? "true" : "false"));
+
+	dev = rte_event_pmd_vdev_init(name,
+			sizeof(struct opdl_evdev), socket_id);
+
+	if (dev == NULL) {
+		OPDL_LOG_ERR("eventdev vdev init() failed");
+		return -EFAULT;
+	}
+	dev->dev_ops = &evdev_opdl_ops;
+	dev->enqueue = opdl_event_enqueue;
+	dev->enqueue_burst = opdl_event_enqueue_burst;
+	dev->enqueue_new_burst = opdl_event_enqueue_burst;
+	dev->enqueue_forward_burst = opdl_event_enqueue_burst;
+	dev->dequeue = opdl_event_dequeue;
+	dev->dequeue_burst = opdl_event_dequeue_burst;
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	opdl = dev->data->dev_private;
+	opdl->data = dev->data;
+	opdl->socket = socket_id;
+	opdl->do_validation = do_validation;
+	opdl->do_test = do_test;
+	str_len = strlen(name);
+	memcpy(opdl->service_name, name, str_len);
+
+	if (do_test == 1)
+		test_result =  opdl_selftest();
+
+	return test_result;
+}
+
+static int
+opdl_remove(struct rte_vdev_device *vdev)
+{
+	const char *name;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	OPDL_LOG_INFO("Closing eventdev opdl device %s\n", name);
+
+	return rte_event_pmd_vdev_uninit(name);
+}
+
+static struct rte_vdev_driver evdev_opdl_pmd_drv = {
+	.probe = opdl_probe,
+	.remove = opdl_remove
+};
+
+RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OPDL_PMD, evdev_opdl_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(event_opdl, NUMA_NODE_ARG "=<int>"
+			      DO_VALIDATION_ARG "=<int>" DO_TEST_ARG "=<int>");
diff --git a/drivers/event/opdl/opdl_evdev_xstats.c b/drivers/event/opdl/opdl_evdev_xstats.c
new file mode 100644
index 0000000..a2abc76
--- /dev/null
+++ b/drivers/event/opdl/opdl_evdev_xstats.c
@@ -0,0 +1,205 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_event_ring.h>
+#include "opdl_evdev.h"
+
+static const char * const port_xstat_str[] = {
+
+	"claim_pkts_requested",
+	"claim_pkts_granted",
+	"claim_non_empty",
+	"claim_empty",
+	"total_cycles",
+};
+
+
+void
+opdl_xstats_init(struct rte_eventdev *dev)
+{
+	uint32_t i, j;
+
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+	if (!device->do_validation)
+		return;
+
+	for (i = 0; i < device->max_port_nb; i++) {
+		struct opdl_port *port = &device->ports[i];
+
+		for (j = 0; j < max_num_port_xstat; j++) {
+			uint32_t index = (i * max_num_port_xstat) + j;
+
+			/* Name */
+			sprintf(device->port_xstat[index].stat.name,
+			       "port_%02u_%s",
+			       i,
+			       port_xstat_str[j]);
+
+			/* ID */
+			device->port_xstat[index].id = index;
+
+			/* Stats ptr */
+			device->port_xstat[index].value = &port->port_stat[j];
+		}
+	}
+}
+
+int
+opdl_xstats_uninit(struct rte_eventdev *dev)
+{
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+	if (!device->do_validation)
+		return 0;
+
+	memset(device->port_xstat,
+	       0,
+	       sizeof(device->port_xstat));
+
+	return 0;
+}
+
+int
+opdl_xstats_get_names(const struct rte_eventdev *dev,
+		enum rte_event_dev_xstats_mode mode,
+		uint8_t queue_port_id,
+		struct rte_event_dev_xstats_name *xstats_names,
+		unsigned int *ids, unsigned int size)
+{
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+	if (!device->do_validation)
+		return -ENOTSUP;
+
+	if (mode == RTE_EVENT_DEV_XSTATS_DEVICE ||
+			mode == RTE_EVENT_DEV_XSTATS_QUEUE)
+		return -EINVAL;
+
+	if (queue_port_id >= device->max_port_nb)
+		return -EINVAL;
+
+	if (size < max_num_port_xstat)
+		return max_num_port_xstat;
+
+	uint32_t port_idx = queue_port_id * max_num_port_xstat;
+
+	for (uint32_t j = 0; j < max_num_port_xstat; j++) {
+
+		strcpy(xstats_names[j].name,
+				device->port_xstat[j + port_idx].stat.name);
+		ids[j] = device->port_xstat[j + port_idx].id;
+	}
+
+	return max_num_port_xstat;
+}
+
+int
+opdl_xstats_get(const struct rte_eventdev *dev,
+		enum rte_event_dev_xstats_mode mode,
+		uint8_t queue_port_id,
+		const unsigned int ids[],
+		uint64_t values[], unsigned int n)
+{
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+	if (!device->do_validation)
+		return -ENOTSUP;
+
+	if (mode == RTE_EVENT_DEV_XSTATS_DEVICE ||
+			mode == RTE_EVENT_DEV_XSTATS_QUEUE)
+		return -EINVAL;
+
+	if (queue_port_id >= device->max_port_nb)
+		return -EINVAL;
+
+	if (n > max_num_port_xstat)
+		return -EINVAL;
+
+	uint32_t p_start = queue_port_id * max_num_port_xstat;
+	uint32_t p_finish = p_start + max_num_port_xstat;
+
+	for (uint32_t i = 0; i < n; i++) {
+		if (ids[i] < p_start || ids[i] >= p_finish)
+			return -EINVAL;
+
+		values[i] = *(device->port_xstat[ids[i]].value);
+	}
+
+	return n;
+}
+
+uint64_t
+opdl_xstats_get_by_name(const struct rte_eventdev *dev,
+		const char *name, unsigned int *id)
+{
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+	if (!device->do_validation)
+		return -ENOTSUP;
+
+	uint32_t max_index = device->max_port_nb * max_num_port_xstat;
+
+	for (uint32_t i = 0; i < max_index; i++) {
+
+		if (strncmp(name,
+			   device->port_xstat[i].stat.name,
+			   RTE_EVENT_DEV_XSTATS_NAME_SIZE) == 0) {
+			if (id != NULL)
+				*id = i;
+			if (device->port_xstat[i].value)
+				return *(device->port_xstat[i].value);
+			break;
+		}
+	}
+	return -EINVAL;
+}
+
+int
+opdl_xstats_reset(struct rte_eventdev *dev,
+		enum rte_event_dev_xstats_mode mode,
+		int16_t queue_port_id, const uint32_t ids[],
+		uint32_t nb_ids)
+{
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+	if (!device->do_validation)
+		return -ENOTSUP;
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(mode);
+	RTE_SET_USED(queue_port_id);
+	RTE_SET_USED(ids);
+	RTE_SET_USED(nb_ids);
+
+	return -ENOTSUP;
+}
-- 
2.7.5

--------------------------------------------------------------
Intel Research and Development Ireland Limited
Registered in Ireland
Registered Office: Collinstown Industrial Park, Leixlip, County Kildare
Registered Number: 308263


This e-mail and any attachments may contain confidential material for the sole
use of the intended recipient(s). Any review or distribution by others is
strictly prohibited. If you are not the intended recipient, please contact the
sender and delete all copies.



More information about the dev mailing list