[dpdk-dev] [PATCH] examples/vhost: add rate statistics for rx/tx and core

Jianfeng Tan jianfeng.tan at intel.com
Tue Dec 1 23:32:54 CET 2015


Currently, we only have aggregated statistics. This seems not
obvious to show how fast rx/tx and how busy of each core.

This patch adds rx/tx rate of each period of option --stat.
And also a simple core busy rate is added to show how many
rounds are really processing packets in all rounds of
circulation.

Besides, this fix the problem of statistics error under the
case of software vm2vm fowarding.

Signed-off-by: Jianfeng Tan <jianfeng.tan at intel.com>
Tested-by: Qian Xu <qian.q.xu at intel.com>
---
 examples/vhost/main.c | 150 +++++++++++++++++++++++++++++++++++++++-----------
 1 file changed, 119 insertions(+), 31 deletions(-)

diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 9bfda6d..8ce0668 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -310,14 +310,39 @@ struct ipv4_hdr {
 #define VLAN_HLEN       4
 #define VLAN_ETH_HLEN   18
 
+/* Per-core statistics struct */
+struct core_statistics {
+	/* write-only by coresponding datapath thread */
+	uint64_t total_rounds;
+	uint64_t busy_rounds;
+
+	/* write-only by print-stats thread */
+	uint64_t total_rounds_p;
+	uint64_t busy_rounds_p;
+} __rte_cache_aligned;
+struct core_statistics core_statistics[RTE_MAX_LCORE];
+
 /* Per-device statistics struct */
 struct device_statistics {
-	uint64_t tx_total;
+	/* rx: from vhost to virtio; tx: from virtio to vhost */
+
+	/* write-only by datapath threads */
+	/* for non zero-copy case, pkts may be enqueued by any lcore */
 	rte_atomic64_t rx_total_atomic;
-	uint64_t rx_total;
-	uint64_t tx;
 	rte_atomic64_t rx_atomic;
+	/* for zero-copy case */
+	uint64_t rx_total;
 	uint64_t rx;
+	/* write-only by corresponding datapath thread */
+	uint64_t tx_total;
+	uint64_t tx;
+
+	/* write-only by print-stats threads */
+	uint64_t rx_total_p;
+	uint64_t rx_p;
+	uint64_t tx_total_p;
+	uint64_t tx_p;
+
 } __rte_cache_aligned;
 struct device_statistics dev_statistics[MAX_DEVICES];
 
@@ -1044,14 +1069,10 @@ virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
 				/*send the packet to the local virtio device*/
 				ret = rte_vhost_enqueue_burst(tdev, VIRTIO_RXQ, &m, 1);
 				if (enable_stats) {
-					rte_atomic64_add(
-					&dev_statistics[tdev->device_fh].rx_total_atomic,
-					1);
-					rte_atomic64_add(
-					&dev_statistics[tdev->device_fh].rx_atomic,
-					ret);
-					dev_statistics[tdev->device_fh].tx_total++;
-					dev_statistics[tdev->device_fh].tx += ret;
+					rte_atomic64_add(&dev_statistics[tdev->device_fh].rx_total_atomic, 1);
+					rte_atomic64_add(&dev_statistics[tdev->device_fh].rx_atomic, ret);
+
+					dev_statistics[dev->device_fh].tx += ret;
 				}
 			}
 
@@ -1128,6 +1149,10 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
 	struct virtio_net *dev = vdev->dev;
 	struct ether_hdr *nh;
 
+	/* from now on, this pkt may be sent to physical device or another virtio device */
+	if (enable_stats)
+		dev_statistics[dev->device_fh].tx_total++;
+
 	/*check if destination is local VM*/
 	if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
 		rte_pktmbuf_free(m);
@@ -1182,10 +1207,12 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
 
 	tx_q->m_table[len] = m;
 	len++;
-	if (enable_stats) {
-		dev_statistics[dev->device_fh].tx_total++;
+
+	/* We are going to lose the information of which device this
+	 * pkt come from. So we suppose rte_eth_tx_burst() always succeeds.
+	 */
+	if (enable_stats)
 		dev_statistics[dev->device_fh].tx++;
-	}
 
 	if (unlikely(len == MAX_PKT_BURST)) {
 		m_table = (struct rte_mbuf **)tx_q->m_table;
@@ -1226,6 +1253,7 @@ switch_worker(__attribute__((unused)) void *arg)
 	uint16_t rx_count = 0;
 	uint16_t tx_count;
 	uint32_t retry = 0;
+	int busy_round;
 
 	RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
 	lcore_ll = lcore_info[lcore_id].lcore_ll;
@@ -1241,12 +1269,14 @@ switch_worker(__attribute__((unused)) void *arg)
 
 	while(1) {
 		cur_tsc = rte_rdtsc();
+		busy_round = 0;
 		/*
 		 * TX burst queue drain
 		 */
 		diff_tsc = cur_tsc - prev_tsc;
 		if (unlikely(diff_tsc > drain_tsc)) {
 
+			busy_round |= tx_q->len;
 			if (tx_q->len) {
 				LOG_DEBUG(VHOST_DATA, "TX queue drained after timeout with burst size %u \n", tx_q->len);
 
@@ -1296,6 +1326,7 @@ switch_worker(__attribute__((unused)) void *arg)
 				rx_count = rte_eth_rx_burst(ports[0],
 					vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
 
+				busy_round |= rx_count;
 				if (rx_count) {
 					/*
 					* Retry is enabled and the queue is full then we wait and retry to avoid packet loss
@@ -1310,11 +1341,8 @@ switch_worker(__attribute__((unused)) void *arg)
 					}
 					ret_count = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ, pkts_burst, rx_count);
 					if (enable_stats) {
-						rte_atomic64_add(
-						&dev_statistics[dev_ll->vdev->dev->device_fh].rx_total_atomic,
-						rx_count);
-						rte_atomic64_add(
-						&dev_statistics[dev_ll->vdev->dev->device_fh].rx_atomic, ret_count);
+						rte_atomic64_add(&dev_statistics[dev->device_fh].rx_total_atomic, rx_count);
+						rte_atomic64_add(&dev_statistics[dev->device_fh].rx_atomic, ret_count);
 					}
 					while (likely(rx_count)) {
 						rx_count--;
@@ -1327,6 +1355,8 @@ switch_worker(__attribute__((unused)) void *arg)
 			if (likely(!vdev->remove)) {
 				/* Handle guest TX*/
 				tx_count = rte_vhost_dequeue_burst(dev, VIRTIO_TXQ, mbuf_pool, pkts_burst, MAX_PKT_BURST);
+				busy_round |= tx_count;
+
 				/* If this is the first received packet we need to learn the MAC and setup VMDQ */
 				if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && tx_count) {
 					if (vdev->remove || (link_vmdq(vdev, pkts_burst[0]) == -1)) {
@@ -1341,6 +1371,10 @@ switch_worker(__attribute__((unused)) void *arg)
 			/*move to the next device in the list*/
 			dev_ll = dev_ll->next;
 		}
+		if (enable_stats) {
+			core_statistics[lcore_id].busy_rounds += !!busy_round;
+			core_statistics[lcore_id].total_rounds++;
+		}
 	}
 
 	return 0;
@@ -1856,10 +1890,8 @@ virtio_tx_route_zcp(struct virtio_net *dev, struct rte_mbuf *m,
 		mbuf->nb_segs,
 		(mbuf->next == NULL) ? "null" : "non-null");
 
-	if (enable_stats) {
-		dev_statistics[dev->device_fh].tx_total++;
+	if (enable_stats)
 		dev_statistics[dev->device_fh].tx++;
-	}
 
 	if (unlikely(len == MAX_PKT_BURST)) {
 		m_table = (struct rte_mbuf **)tx_q->m_table;
@@ -2792,6 +2824,7 @@ print_stats(void)
 	uint32_t device_fh;
 	const char clr[] = { 27, '[', '2', 'J', '\0' };
 	const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
+	unsigned lcore_id;
 
 	while(1) {
 		sleep(enable_stats);
@@ -2804,9 +2837,20 @@ print_stats(void)
 		dev_ll = ll_root_used;
 		while (dev_ll != NULL) {
 			device_fh = (uint32_t)dev_ll->vdev->dev->device_fh;
-			tx_total = dev_statistics[device_fh].tx_total;
-			tx = dev_statistics[device_fh].tx;
+
+			tx_total = dev_statistics[device_fh].tx_total -
+				dev_statistics[device_fh].tx_total_p;
+			tx = dev_statistics[device_fh].tx -
+				dev_statistics[device_fh].tx_p;
+			/* Because we do not use a lock to control the access of tx_total
+			 * and tx in dev_statistics, tx may be greater than tx_total. If
+			 * this happens, we'll count those redundant tx next time.
+			 * this applies to rx_total/rx and total_rounds/busy_rounds too.
+			 */
+			if (unlikely(tx_total < tx))
+				tx = tx_total;
 			tx_dropped = tx_total - tx;
+
 			if (zero_copy == 0) {
 				rx_total = rte_atomic64_read(
 					&dev_statistics[device_fh].rx_total_atomic);
@@ -2816,25 +2860,69 @@ print_stats(void)
 				rx_total = dev_statistics[device_fh].rx_total;
 				rx = dev_statistics[device_fh].rx;
 			}
+			rx_total -= dev_statistics[device_fh].rx_total_p;
+			rx -= dev_statistics[device_fh].rx_p;
+			if (unlikely(rx_total < rx))
+				rx = rx_total;
 			rx_dropped = rx_total - rx;
 
+			dev_statistics[device_fh].rx_total_p += rx_total;
+			dev_statistics[device_fh].rx_p += rx;
+			dev_statistics[device_fh].tx_total_p += tx_total;
+			dev_statistics[device_fh].tx_p += tx;
+
 			printf("\nStatistics for device %"PRIu32" ------------------------------"
-					"\nTX total: 		%"PRIu64""
-					"\nTX dropped: 		%"PRIu64""
-					"\nTX successful: 		%"PRIu64""
-					"\nRX total: 		%"PRIu64""
-					"\nRX dropped: 		%"PRIu64""
-					"\nRX successful: 		%"PRIu64"",
+					"\nRate:"
+					"\n\tTX total:\t\t\t%"PRIu64""
+					"\n\tTX dropped:\t\t\t%"PRIu64""
+					"\n\tTX successful:\t\t\t%"PRIu64""
+					"\n\tRX total:\t\t\t%"PRIu64""
+					"\n\tRX dropped:\t\t\t%"PRIu64""
+					"\n\tRX successful:\t\t\t%"PRIu64""
+					"\nAggregated statistics:"
+					"\n\tTX total:\t\t\t%"PRIu64""
+					"\n\tTX dropped:\t\t\t%"PRIu64""
+					"\n\tTX successful:\t\t\t%"PRIu64""
+					"\n\tRX total:\t\t\t%"PRIu64""
+					"\n\tRX dropped:\t\t\t%"PRIu64""
+					"\n\tRX successful:\t\t\t%"PRIu64"",
 					device_fh,
 					tx_total,
 					tx_dropped,
 					tx,
 					rx_total,
 					rx_dropped,
-					rx);
+					rx,
+					dev_statistics[device_fh].tx_total_p,
+					(dev_statistics[device_fh].tx_total_p
+						- dev_statistics[device_fh].tx_p),
+					dev_statistics[device_fh].tx_p,
+					dev_statistics[device_fh].rx_total_p,
+					(dev_statistics[device_fh].rx_total_p
+						- dev_statistics[device_fh].rx_p),
+					dev_statistics[device_fh].rx_p);
 
 			dev_ll = dev_ll->next;
 		}
+
+		printf("\n");
+		printf("\nCore statistics ======================================");
+		printf("\n\tCore ID: busy_rounds/total_rounds");
+		RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+			uint64_t busy_rounds, total_rounds;
+			busy_rounds = core_statistics[lcore_id].busy_rounds -
+				core_statistics[lcore_id].busy_rounds_p;
+			total_rounds = core_statistics[lcore_id].total_rounds -
+				core_statistics[lcore_id].total_rounds_p;
+			if (unlikely(total_rounds < busy_rounds))
+				total_rounds = busy_rounds;
+			core_statistics[lcore_id].busy_rounds_p += busy_rounds;
+			core_statistics[lcore_id].total_rounds_p += total_rounds;
+
+			printf("\n\tCore %2u:%12"PRIu64"/%12"PRIu64,
+					lcore_id, busy_rounds, total_rounds);
+		}
+
 		printf("\n======================================================\n");
 	}
 }
-- 
2.1.4



More information about the dev mailing list