[dpdk-dev] [PATCH v3 16/17] event/sw: add xstats support

Harry van Haaren harry.van.haaren at intel.com
Fri Feb 17 15:54:11 CET 2017


From: Bruce Richardson <bruce.richardson at intel.com>

Add support for xstats to report out on the state of the eventdev.
Useful for debugging and for unit tests, as well as observability
at runtime and performance tuning of apps to work well with the
scheduler.

Signed-off-by: Bruce Richardson <bruce.richardson at intel.com>
Signed-off-by: Harry van Haaren <harry.van.haaren at intel.com>
---
 drivers/event/sw/Makefile          |   1 +
 drivers/event/sw/sw_evdev.c        |   8 +
 drivers/event/sw/sw_evdev.h        |  21 +-
 drivers/event/sw/sw_evdev_xstats.c | 511 +++++++++++++++++++++++++++++++++++++
 4 files changed, 540 insertions(+), 1 deletion(-)
 create mode 100644 drivers/event/sw/sw_evdev_xstats.c

diff --git a/drivers/event/sw/Makefile b/drivers/event/sw/Makefile
index a7f5b3d..eb0dc4c 100644
--- a/drivers/event/sw/Makefile
+++ b/drivers/event/sw/Makefile
@@ -55,6 +55,7 @@ EXPORT_MAP := rte_pmd_evdev_sw_version.map
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_worker.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_scheduler.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_xstats.c
 
 # export include files
 SYMLINK-y-include +=
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index 8b7d8ed..c273399 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -598,6 +598,8 @@ sw_start(struct rte_eventdev *dev)
 			}
 		}
 	}
+	if (sw_xstats_init(sw) < 0)
+		return -1;
 	sw->started = 1;
 	return 0;
 }
@@ -606,6 +608,7 @@ static void
 sw_stop(struct rte_eventdev *dev)
 {
 	struct sw_evdev *sw = sw_pmd_priv(dev);
+	sw_xstats_uninit(sw);
 	sw->started = 0;
 }
 
@@ -681,6 +684,11 @@ sw_probe(const char *name, const char *params)
 			.port_release = sw_port_release,
 			.port_link = sw_port_link,
 			.port_unlink = sw_port_unlink,
+
+			.xstats_get = sw_xstats_get,
+			.xstats_get_names = sw_xstats_get_names,
+			.xstats_get_by_name = sw_xstats_get_by_name,
+			.xstats_reset = sw_xstats_reset,
 	};
 
 	static const char *const args[] = {
diff --git a/drivers/event/sw/sw_evdev.h b/drivers/event/sw/sw_evdev.h
index 7c157c7..690bfa1 100644
--- a/drivers/event/sw/sw_evdev.h
+++ b/drivers/event/sw/sw_evdev.h
@@ -62,6 +62,8 @@
 
 #define SW_SCHED_TYPE_DIRECT (RTE_SCHED_TYPE_PARALLEL + 1)
 
+#define SW_NUM_POLL_BUCKETS (MAX_SW_CONS_Q_DEPTH >> SW_DEQ_STAT_BUCKET_SHIFT)
+
 enum {
 	QE_FLAG_VALID_SHIFT = 0,
 	QE_FLAG_COMPLETE_SHIFT,
@@ -203,7 +205,7 @@ struct sw_port {
 	uint64_t avg_pkt_ticks;      /* tracks average over NUM_SAMPLES burst */
 	uint64_t total_polls;        /* how many polls were counted in stats */
 	uint64_t zero_polls;         /* tracks polls returning nothing */
-	uint32_t poll_buckets[MAX_SW_CONS_Q_DEPTH >> SW_DEQ_STAT_BUCKET_SHIFT];
+	uint32_t poll_buckets[SW_NUM_POLL_BUCKETS];
 		/* bucket values in 4s for shorter reporting */
 
 	/* History list structs, containing info on pkts egressed to worker */
@@ -230,6 +232,11 @@ struct sw_evdev {
 
 	uint32_t port_count;
 	uint32_t qid_count;
+	uint32_t xstats_count;
+	struct sw_xstats_entry *xstats;
+	uint32_t xstats_count_mode_dev;
+	uint32_t xstats_count_mode_port;
+	uint32_t xstats_count_mode_queue;
 
 	/* Contains all ports - load balanced and directed */
 	struct sw_port ports[SW_PORTS_MAX] __rte_cache_aligned;
@@ -283,5 +290,17 @@ uint16_t sw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait);
 uint16_t sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
 			uint64_t wait);
 void sw_event_schedule(struct rte_eventdev *dev);
+int sw_xstats_init(struct sw_evdev *dev);
+int sw_xstats_uninit(struct sw_evdev *dev);
+int sw_xstats_get_names(const struct rte_eventdev *dev,
+	enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+	struct rte_event_dev_xstats_name *xstats_names, unsigned int size);
+int sw_xstats_get(const struct rte_eventdev *dev,
+		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+		const unsigned int ids[], uint64_t values[], unsigned int n);
+uint64_t sw_xstats_get_by_name(const struct rte_eventdev *dev,
+		const char *name, unsigned int *id);
+int sw_xstats_reset(struct rte_eventdev *dev);
+
 
 #endif /* _SW_EVDEV_H_ */
diff --git a/drivers/event/sw/sw_evdev_xstats.c b/drivers/event/sw/sw_evdev_xstats.c
new file mode 100644
index 0000000..3354522
--- /dev/null
+++ b/drivers/event/sw/sw_evdev_xstats.c
@@ -0,0 +1,511 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "sw_evdev.h"
+#include "iq_ring.h"
+#include "event_ring.h"
+
+enum xstats_type {
+	/* common stats */
+	rx,
+	tx,
+	dropped,
+	inflight,
+	calls,
+	credits,
+	/* device instance specific */
+	no_iq_enq,
+	no_cq_enq,
+	/* port_specific */
+	rx_used,
+	rx_free,
+	tx_used,
+	tx_free,
+	pkt_cycles,
+	poll_return, /* for zero-count and used also for port bucket loop */
+	/* qid_specific */
+	iq_size,
+	iq_used,
+	/* qid port mapping specific */
+	pinned,
+};
+
+typedef uint64_t (*xstats_fn)(const struct sw_evdev *dev,
+		uint16_t obj_idx, /* port or queue id */
+		enum xstats_type stat, int extra_arg);
+
+struct sw_xstats_entry {
+	struct rte_event_dev_xstats_name name;
+	xstats_fn fn;
+	uint16_t obj_idx;
+	enum xstats_type stat;
+	enum rte_event_dev_xstats_mode mode;
+	int extra_arg;
+};
+
+static uint64_t
+get_dev_stat(const struct sw_evdev *sw, uint16_t obj_idx __rte_unused,
+		enum xstats_type type, int extra_arg __rte_unused)
+{
+	switch (type) {
+	case rx: return sw->stats.rx_pkts;
+	case tx: return sw->stats.tx_pkts;
+	case dropped: return sw->stats.rx_dropped;
+	case calls: return sw->sched_called;
+	case no_iq_enq: return sw->sched_no_iq_enqueues;
+	case no_cq_enq: return sw->sched_no_cq_enqueues;
+	default: return -1;
+	}
+}
+
+static uint64_t
+get_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
+		enum xstats_type type, int extra_arg __rte_unused)
+{
+	const struct sw_port *p = &sw->ports[obj_idx];
+
+	switch (type) {
+	case rx: return p->stats.rx_pkts;
+	case tx: return p->stats.tx_pkts;
+	case dropped: return p->stats.rx_dropped;
+	case inflight: return p->inflights;
+	case pkt_cycles: return p->avg_pkt_ticks;
+	case calls: return p->total_polls;
+	case credits: return p->inflight_credits;
+	case poll_return: return p->zero_polls;
+	case rx_used: return qe_ring_count(p->rx_worker_ring);
+	case rx_free: return qe_ring_free_count(p->rx_worker_ring);
+	case tx_used: return qe_ring_count(p->cq_worker_ring);
+	case tx_free: return qe_ring_free_count(p->cq_worker_ring);
+	default: return -1;
+	}
+}
+
+static uint64_t
+get_port_bucket_stat(const struct sw_evdev *sw, uint16_t obj_idx,
+		enum xstats_type type, int extra_arg)
+{
+	const struct sw_port *p = &sw->ports[obj_idx];
+
+	switch (type) {
+	case poll_return: return p->poll_buckets[extra_arg];
+	default: return -1;
+	}
+}
+
+static uint64_t
+get_qid_stat(const struct sw_evdev *sw, uint16_t obj_idx,
+		enum xstats_type type, int extra_arg __rte_unused)
+{
+	const struct sw_qid *qid = &sw->qids[obj_idx];
+
+	switch (type) {
+	case rx: return qid->stats.rx_pkts;
+	case tx: return qid->stats.tx_pkts;
+	case dropped: return qid->stats.rx_dropped;
+	case inflight:
+		do {
+			uint64_t infl = 0;
+			unsigned int i;
+			for (i = 0; i < RTE_DIM(qid->fids); i++)
+				infl += qid->fids[i].pcount;
+			return infl;
+		} while (0);
+		break;
+	case iq_size: return RTE_DIM(qid->iq[0]->ring);
+	default: return -1;
+	}
+}
+
+static uint64_t
+get_qid_iq_stat(const struct sw_evdev *sw, uint16_t obj_idx,
+		enum xstats_type type, int extra_arg)
+{
+	const struct sw_qid *qid = &sw->qids[obj_idx];
+	const int iq_idx = extra_arg;
+
+	switch (type) {
+	case iq_used: return iq_ring_count(qid->iq[iq_idx]);
+	default: return -1;
+	}
+}
+
+static uint64_t
+get_qid_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
+		enum xstats_type type, int extra_arg)
+{
+	const struct sw_qid *qid = &sw->qids[obj_idx];
+	uint16_t port = extra_arg;
+
+	switch (type) {
+	case pinned:
+		do {
+			uint64_t pin = 0;
+			unsigned int i;
+			for (i = 0; i < RTE_DIM(qid->fids); i++)
+				if (qid->fids[i].cq == port)
+					pin++;
+			return pin;
+		} while (0);
+		break;
+	default: return -1;
+	}
+}
+
+int
+sw_xstats_init(struct sw_evdev *sw)
+{
+	/*
+	 * define the stats names and types. Used to build up the device
+	 * xstats array
+	 * There are multiple set of stats:
+	 *   - device-level,
+	 *   - per-port,
+	 *   - per-port-dequeue-burst-sizes
+	 *   - per-qid,
+	 *   - per-iq
+	 *   - per-port-per-qid
+	 *
+	 * For each of these sets, we have two parallel arrays, one for the
+	 * names, the other for the stat type parameter to be passed in the fn
+	 * call to get that stat. These two arrays must be kept in sync
+	 */
+	static const char * const dev_stats[] = { "rx", "tx", "drop",
+			"sched_calls", "sched_no_iq_enq", "sched_no_cq_enq",
+	};
+	static const enum xstats_type dev_types[] = { rx, tx, dropped,
+			calls, no_iq_enq, no_cq_enq,
+	};
+
+	static const char * const port_stats[] = {"rx", "tx", "drop",
+			"inflight", "avg_pkt_cycles", "credits",
+			"rx_ring_used", "rx_ring_free",
+			"cq_ring_used", "cq_ring_free",
+			"dequeue_calls", "dequeues_returning_0",
+	};
+	static const enum xstats_type port_types[] = { rx, tx, dropped,
+			inflight, pkt_cycles, credits,
+			rx_used, rx_free, tx_used, tx_free,
+			calls, poll_return,
+	};
+
+	static const char * const port_bucket_stats[] = {
+			"dequeues_returning" };
+	static const enum xstats_type port_bucket_types[] = { poll_return };
+
+	static const char * const qid_stats[] = {"rx", "tx", "drop",
+			"inflight", "iq_size"
+	};
+	static const enum xstats_type qid_types[] = { rx, tx, dropped, inflight,
+			iq_size
+	};
+
+	static const char * const qid_iq_stats[] = { "used" };
+	static const enum xstats_type qid_iq_types[] = { iq_used };
+
+	static const char * const qid_port_stats[] = { "pinned_flows" };
+	static const enum xstats_type qid_port_types[] = { pinned };
+	/* ---- end of stat definitions ---- */
+
+	/* check sizes, since a missed comma can lead to strings being
+	 * joined by the compiler.
+	 */
+	RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_types));
+	RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_types));
+	RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_types));
+	RTE_BUILD_BUG_ON(RTE_DIM(qid_iq_stats) != RTE_DIM(qid_iq_types));
+	RTE_BUILD_BUG_ON(RTE_DIM(qid_port_stats) != RTE_DIM(qid_port_types));
+	RTE_BUILD_BUG_ON(RTE_DIM(port_bucket_stats) !=
+			RTE_DIM(port_bucket_types));
+
+	/* other vars */
+	const uint32_t cons_bkt_shift =
+		(MAX_SW_CONS_Q_DEPTH >> SW_DEQ_STAT_BUCKET_SHIFT);
+	const unsigned int count = RTE_DIM(dev_stats) +
+			sw->port_count * RTE_DIM(port_stats) +
+			sw->port_count * RTE_DIM(port_bucket_stats) *
+				(cons_bkt_shift + 1) +
+			sw->qid_count * RTE_DIM(qid_stats) +
+			sw->qid_count * SW_IQS_MAX * RTE_DIM(qid_iq_stats) +
+			sw->qid_count * sw->port_count *
+				RTE_DIM(qid_port_stats);
+	unsigned int i, port, qid, iq, bkt, stat = 0;
+
+	sw->xstats = rte_zmalloc_socket(NULL, sizeof(sw->xstats[0]) * count, 0,
+			sw->data->socket_id);
+	if (sw->xstats == NULL)
+		return -ENOMEM;
+
+#define sname sw->xstats[stat].name.name
+	for (i = 0; i < RTE_DIM(dev_stats); i++, stat++) {
+		sw->xstats[stat] = (struct sw_xstats_entry){
+			.fn = get_dev_stat,
+			.stat = dev_types[i],
+			.mode = RTE_EVENT_DEV_XSTATS_DEVICE,
+		};
+		snprintf(sname, sizeof(sname), "dev_%s", dev_stats[i]);
+	}
+	sw->xstats_count_mode_dev = stat;
+
+	for (port = 0; port < sw->port_count; port++) {
+		for (i = 0; i < RTE_DIM(port_stats); i++, stat++) {
+			sw->xstats[stat] = (struct sw_xstats_entry){
+				.fn = get_port_stat,
+				.obj_idx = port,
+				.stat = port_types[i],
+				.mode = RTE_EVENT_DEV_XSTATS_PORT,
+			};
+			snprintf(sname, sizeof(sname), "port_%u_%s",
+					port, port_stats[i]);
+		}
+
+		for (bkt = 0; bkt < (sw->ports[port].cq_worker_ring->size >>
+				SW_DEQ_STAT_BUCKET_SHIFT) + 1; bkt++) {
+			for (i = 0; i < RTE_DIM(port_bucket_stats); i++) {
+				sw->xstats[stat] = (struct sw_xstats_entry){
+					.fn = get_port_bucket_stat,
+					.obj_idx = port,
+					.stat = port_bucket_types[i],
+					.mode = RTE_EVENT_DEV_XSTATS_PORT,
+					.extra_arg = bkt,
+				};
+				snprintf(sname, sizeof(sname),
+					"port_%u_%s_%u-%u",
+					port, port_bucket_stats[i],
+					(bkt << SW_DEQ_STAT_BUCKET_SHIFT) + 1,
+					(bkt + 1) << SW_DEQ_STAT_BUCKET_SHIFT);
+				stat++;
+			}
+		}
+	}
+
+	sw->xstats_count_mode_port = stat - sw->xstats_count_mode_dev;
+
+	for (qid = 0; qid < sw->qid_count; qid++) {
+		for (i = 0; i < RTE_DIM(qid_stats); i++, stat++) {
+			sw->xstats[stat] = (struct sw_xstats_entry){
+				.fn = get_qid_stat,
+				.obj_idx = qid,
+				.stat = qid_types[i],
+				.mode = RTE_EVENT_DEV_XSTATS_QUEUE,
+			};
+			snprintf(sname, sizeof(sname), "qid_%u_%s",
+					qid, qid_stats[i]);
+		}
+		for (iq = 0; iq < SW_IQS_MAX; iq++)
+			for (i = 0; i < RTE_DIM(qid_iq_stats); i++, stat++) {
+				sw->xstats[stat] = (struct sw_xstats_entry){
+					.fn = get_qid_iq_stat,
+					.obj_idx = qid,
+					.stat = qid_iq_types[i],
+					.mode = RTE_EVENT_DEV_XSTATS_QUEUE,
+					.extra_arg = iq
+				};
+				snprintf(sname, sizeof(sname),
+						"qid_%u_iq_%u_%s",
+						qid, iq,
+						qid_iq_stats[i]);
+			}
+
+		for (port = 0; port < sw->port_count; port++)
+			for (i = 0; i < RTE_DIM(qid_port_stats); i++, stat++) {
+				sw->xstats[stat] = (struct sw_xstats_entry){
+					.fn = get_qid_port_stat,
+					.obj_idx = qid,
+					.stat = qid_port_types[i],
+					.mode = RTE_EVENT_DEV_XSTATS_QUEUE,
+					.extra_arg = port
+				};
+				snprintf(sname, sizeof(sname),
+						"qid_%u_port_%u_%s",
+						qid, port,
+						qid_port_stats[i]);
+			}
+	}
+
+	sw->xstats_count_mode_queue = stat -
+		(sw->xstats_count_mode_dev + sw->xstats_count_mode_port);
+#undef sname
+
+	sw->xstats_count = stat;
+
+	return stat;
+}
+
+int
+sw_xstats_uninit(struct sw_evdev *sw)
+{
+	rte_free(sw->xstats);
+	sw->xstats_count = 0;
+	return 0;
+}
+
+int
+sw_xstats_get_names(const struct rte_eventdev *dev,
+		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+		struct rte_event_dev_xstats_name *xstats_names,
+		unsigned int size)
+{
+	const struct sw_evdev *sw = sw_pmd_priv_const(dev);
+	unsigned int i;
+	unsigned int xidx = 0;
+	RTE_SET_USED(mode);
+	RTE_SET_USED(queue_port_id);
+
+	uint32_t xstats_mode_count = 0;
+
+	switch (mode) {
+	case RTE_EVENT_DEV_XSTATS_DEVICE:
+		xstats_mode_count = sw->xstats_count_mode_dev;
+		break;
+	case RTE_EVENT_DEV_XSTATS_PORT:
+		xstats_mode_count = sw->xstats_count_mode_port;
+		break;
+	case RTE_EVENT_DEV_XSTATS_QUEUE:
+		xstats_mode_count = sw->xstats_count_mode_queue;
+		break;
+	default:
+		SW_LOG_ERR("Invalid mode recieved in sw_xstats_get_names()\n");
+		return -EINVAL;
+	};
+
+	if (xstats_mode_count > size)
+		return xstats_mode_count;
+
+	for (i = 0; i < sw->xstats_count && xidx < size; i++) {
+		if (sw->xstats[i].mode != mode)
+			continue;
+		xstats_names[xidx] = sw->xstats[i].name;
+		xidx++;
+	}
+	return xstats_mode_count;
+}
+
+int
+sw_xstats_get(const struct rte_eventdev *dev,
+		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+		const unsigned int ids[], uint64_t values[], unsigned int n)
+{
+	const struct sw_evdev *sw = sw_pmd_priv_const(dev);
+	unsigned int i;
+	unsigned int xidx = 0;
+	RTE_SET_USED(mode);
+	RTE_SET_USED(queue_port_id);
+
+	uint32_t xstats_mode_count = 0;
+
+	switch (mode) {
+	case RTE_EVENT_DEV_XSTATS_DEVICE:
+		xstats_mode_count = sw->xstats_count_mode_dev;
+		break;
+	case RTE_EVENT_DEV_XSTATS_PORT:
+		xstats_mode_count = sw->xstats_count_mode_port;
+		break;
+	case RTE_EVENT_DEV_XSTATS_QUEUE:
+		xstats_mode_count = sw->xstats_count_mode_queue;
+		break;
+	default:
+		SW_LOG_ERR("Invalid mode recieved in sw_xstats_get()\n");
+		return -EINVAL;
+	};
+
+	if (xstats_mode_count > n)
+		return xstats_mode_count;
+
+	for (i = 0; i < n && xidx < xstats_mode_count; i++) {
+		struct sw_xstats_entry *xs = &sw->xstats[ids[i]];
+		if (ids[i] > sw->xstats_count || xs->mode != mode)
+			continue;
+		values[xidx] = xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg);
+		xidx++;
+	}
+
+	return xidx;
+}
+
+uint64_t
+sw_xstats_get_by_name(const struct rte_eventdev *dev,
+		const char *name, unsigned int *id)
+{
+	const struct sw_evdev *sw = sw_pmd_priv_const(dev);
+	unsigned int i;
+
+	for (i = 0; i < sw->xstats_count; i++) {
+		struct sw_xstats_entry *xs = &sw->xstats[i];
+		if (strncmp(xs->name.name, name,
+				RTE_EVENT_DEV_XSTATS_NAME_SIZE) == 0){
+			if (id != NULL)
+				*id = i;
+			return xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg);
+		}
+	}
+	if (id != NULL)
+		*id = (uint32_t)-1;
+	return (uint64_t)-1;
+}
+
+int
+sw_xstats_reset(struct rte_eventdev *dev)
+{
+	struct sw_evdev *sw = sw_pmd_priv(dev);
+	uint32_t i;
+
+	/* instance stats */
+	sw->stats.rx_pkts = 0;
+	sw->stats.tx_pkts = 0;
+	sw->stats.rx_dropped = 0;
+	sw->sched_called = 0;
+	sw->sched_cq_qid_called = 0;
+	sw->sched_no_iq_enqueues = 0;
+	sw->sched_no_cq_enqueues = 0;
+
+	for (i = 0; i < sw->port_count; i++) {
+		struct sw_port *p = &sw->ports[i];
+		p->stats.rx_pkts = 0;
+		p->stats.tx_pkts = 0;
+		p->stats.rx_dropped = 0;
+		p->avg_pkt_ticks = 0;
+		p->total_polls = 0;
+		p->zero_polls = 0;
+
+		memset(p->poll_buckets, 0, sizeof(p->poll_buckets));
+	}
+
+	for (i = 0; i < sw->qid_count; i++) {
+		struct sw_qid *qid = &sw->qids[i];
+		qid->stats.rx_pkts = 0;
+		qid->stats.tx_pkts = 0;
+		qid->stats.rx_dropped = 0;
+	}
+
+	return 0;
+}
-- 
2.7.4



More information about the dev mailing list