[RFC,07/13] add vhost dequeue shadow descs update function

Message ID 20190708171320.38802-8-yong.liu@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series [RFC,01/13] add vhost normal enqueue function |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail Compilation issues

Commit Message

Marvin Liu July 8, 2019, 5:13 p.m. UTC
  Vhost dequeue function will buffer used descriptors as many as possible,
so that shadow used element should itself contain descriptor index and
wrap counter. First shadowed ring index also be recorded.

Signed-off-by: Marvin Liu <yong.liu@intel.com>
  

Patch

diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index d084fe364..5ccbe67b5 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -42,6 +42,8 @@ 
 /* Pre-calculated packed ring flags */
 #define VIRTIO_RX_FLAG_PACKED  (0ULL | VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE)
 #define VIRTIO_RX_WRAP_FLAG_PACKED (VRING_DESC_F_WRITE)
+#define VIRTIO_TX_FLAG_PACKED  (0ULL | VRING_DESC_F_AVAIL | VRING_DESC_F_USED)
+#define VIRTIO_TX_WRAP_FLAG_PACKED (0x0)
 
 /* Used in fast packed ring functions */
 #define PACKED_DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_packed_desc))
@@ -93,9 +95,11 @@  struct log_cache_entry {
 };
 
 struct vring_used_elem_packed {
+	uint16_t used_idx;
 	uint16_t id;
 	uint32_t len;
 	uint32_t count;
+	uint16_t used_wrap_counter;
 };
 
 /**
@@ -150,6 +154,7 @@  struct vhost_virtqueue {
 	};
 	uint16_t                shadow_used_idx;
 	uint16_t                enqueue_shadow_count;
+	uint16_t                dequeue_shadow_head;
 	struct vhost_vring_addr ring_addrs;
 
 	struct batch_copy_elem	*batch_copy_elems;
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 9eeebe642..83ed2d599 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -299,6 +299,28 @@  update_enqueue_shadow_used_ring_packed(struct vhost_virtqueue *vq,
 	vq->enqueue_shadow_count += count;
 }
 
+static __rte_always_inline void
+update_dequeue_shadow_used_ring_packed(struct vhost_virtqueue *vq,
+				 uint16_t buf_id, uint16_t count)
+{
+	if (!vq->shadow_used_idx)
+		vq->dequeue_shadow_head = vq->last_used_idx;
+
+	uint16_t i = vq->shadow_used_idx++;
+
+	vq->shadow_used_packed[i].id  = buf_id;
+	vq->shadow_used_packed[i].len = 0;
+	vq->shadow_used_packed[i].count = count;
+	vq->shadow_used_packed[i].used_idx = vq->last_used_idx;
+	vq->shadow_used_packed[i].used_wrap_counter = vq->used_wrap_counter;
+
+	vq->last_used_idx += count;
+	if (vq->last_used_idx >= vq->size) {
+		vq->used_wrap_counter ^= 1;
+		vq->last_used_idx -= vq->size;
+	}
+}
+
 static __rte_always_inline void
 update_shadow_used_ring_packed(struct vhost_virtqueue *vq,
 			 uint16_t desc_idx, uint32_t len, uint16_t count)
@@ -1990,6 +2012,8 @@  virtio_dev_tx_normal_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		&desc_count))
 			return -1;
 
+	update_dequeue_shadow_used_ring_packed(vq, buf_id, desc_count);
+
 	vq->last_avail_idx += desc_count;
 	if (vq->last_avail_idx >= vq->size) {
 		vq->last_avail_idx -= vq->size;