[dpdk-dev] [PATCH v7 4/8] vhost: rxtx: use queue id instead of constant ring index

Yuanhan Liu yuanhan.liu at linux.intel.com
Wed Oct 21 08:54:19 CEST 2015


On Tue, Oct 20, 2015 at 09:43:54PM -0700, Stephen Hemminger wrote:
> On Wed, 21 Oct 2015 11:48:10 +0800
> Yuanhan Liu <yuanhan.liu at linux.intel.com> wrote:
> 
> >  
> > +static inline int __attribute__((always_inline))
> > +is_valid_virt_queue_idx(uint32_t virtq_idx, int is_tx, uint32_t max_qp_idx)
> > +{
> > +	if ((is_tx ^ (virtq_idx & 0x1)) ||
> > +	    (virtq_idx >= max_qp_idx * VIRTIO_QNUM))
> > +		return 0;
> > +
> > +	return 1;
> > +}
> 
> minor nits:
>  * this doesn't need to be marked as always inline, 
>     that is as they say in English "shooting a fly with a bazooka"
>  * prefer to just return logical result rather than have conditional:
>  * for booleans prefer the <stdbool.h> type boolean.
> 
> static bool
> is_valid_virt_queue_idx(uint32_t virtq_idx, bool is_tx, uint32_t max_qp_idx)
> {
> 	return (is_tx ^ (virtq_idx & 1)) || 
> 		virtq_idx >= max_qp_idx * VIRTIO_QNUM;
> }

Thanks for review, and here you go:


-- >8 --
>From 0be45f37c63c86e93b9caf751ceaeec0a4e66fa5 Mon Sep 17 00:00:00 2001
From: Changchun Ouyang <changchun.ouyang at intel.com>
Date: Wed, 16 Sep 2015 15:40:32 +0800
Subject: [PATCH] vhost: rxtx: use queue id instead of constant ring index

Do not use VIRTIO_RXQ or VIRTIO_TXQ anymore; use the queue_id
instead, which will be set to a proper value for a specific queue
when we have multiple queue support enabled.

For now, queue_id is still set with VIRTIO_RXQ or VIRTIO_TXQ,
so it should not break anything.

Signed-off-by: Changchun Ouyang <changchun.ouyang at intel.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu at linux.intel.com>
Acked-by: Flavio Leitner <fbl at sysclose.org>

---

v8: simplify is_valid_vrit_queue_idx()

v7: commit title fix
---
 lib/librte_vhost/vhost_rxtx.c | 43 +++++++++++++++++++++++++++++--------------
 1 file changed, 29 insertions(+), 14 deletions(-)

diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c
index 7026bfa..1ec8850 100644
--- a/lib/librte_vhost/vhost_rxtx.c
+++ b/lib/librte_vhost/vhost_rxtx.c
@@ -32,6 +32,7 @@
  */
 
 #include <stdint.h>
+#include <stdbool.h>
 #include <linux/virtio_net.h>
 
 #include <rte_mbuf.h>
@@ -42,6 +43,12 @@
 
 #define MAX_PKT_BURST 32
 
+static bool
+is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t qp_nb)
+{
+	return (is_tx ^ (idx & 1)) == 0 && idx < qp_nb * VIRTIO_QNUM;
+}
+
 /**
  * This function adds buffers to the virtio devices RX virtqueue. Buffers can
  * be received from the physical port or from another virtio device. A packet
@@ -68,12 +75,14 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
 	uint8_t success = 0;
 
 	LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
-	if (unlikely(queue_id != VIRTIO_RXQ)) {
-		LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
+	if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
+		RTE_LOG(ERR, VHOST_DATA,
+			"%s (%"PRIu64"): virtqueue idx:%d invalid.\n",
+			__func__, dev->device_fh, queue_id);
 		return 0;
 	}
 
-	vq = dev->virtqueue[VIRTIO_RXQ];
+	vq = dev->virtqueue[queue_id];
 	count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
 
 	/*
@@ -235,8 +244,9 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
 }
 
 static inline uint32_t __attribute__((always_inline))
-copy_from_mbuf_to_vring(struct virtio_net *dev, uint16_t res_base_idx,
-	uint16_t res_end_idx, struct rte_mbuf *pkt)
+copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
+			uint16_t res_base_idx, uint16_t res_end_idx,
+			struct rte_mbuf *pkt)
 {
 	uint32_t vec_idx = 0;
 	uint32_t entry_success = 0;
@@ -264,7 +274,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint16_t res_base_idx,
 	 * Convert from gpa to vva
 	 * (guest physical addr -> vhost virtual addr)
 	 */
-	vq = dev->virtqueue[VIRTIO_RXQ];
+	vq = dev->virtqueue[queue_id];
 	vb_addr = gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);
 	vb_hdr_addr = vb_addr;
 
@@ -464,11 +474,14 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
 
 	LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_merge_rx()\n",
 		dev->device_fh);
-	if (unlikely(queue_id != VIRTIO_RXQ)) {
-		LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
+	if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
+		RTE_LOG(ERR, VHOST_DATA,
+			"%s (%"PRIu64"): virtqueue idx:%d invalid.\n",
+			__func__, dev->device_fh, queue_id);
+		return 0;
 	}
 
-	vq = dev->virtqueue[VIRTIO_RXQ];
+	vq = dev->virtqueue[queue_id];
 	count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
 
 	if (count == 0)
@@ -509,8 +522,8 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
 							res_cur_idx);
 		} while (success == 0);
 
-		entry_success = copy_from_mbuf_to_vring(dev, res_base_idx,
-			res_cur_idx, pkts[pkt_idx]);
+		entry_success = copy_from_mbuf_to_vring(dev, queue_id,
+			res_base_idx, res_cur_idx, pkts[pkt_idx]);
 
 		rte_compiler_barrier();
 
@@ -562,12 +575,14 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
 	uint16_t free_entries, entry_success = 0;
 	uint16_t avail_idx;
 
-	if (unlikely(queue_id != VIRTIO_TXQ)) {
-		LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
+	if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->virt_qp_nb))) {
+		RTE_LOG(ERR, VHOST_DATA,
+			"%s (%"PRIu64"): virtqueue idx:%d invalid.\n",
+			__func__, dev->device_fh, queue_id);
 		return 0;
 	}
 
-	vq = dev->virtqueue[VIRTIO_TXQ];
+	vq = dev->virtqueue[queue_id];
 	avail_idx =  *((volatile uint16_t *)&vq->avail->idx);
 
 	/* If there are no available buffers then return. */
-- 
1.9.0



More information about the dev mailing list