[dpdk-dev] [PATCH v4 11/20] net/virtio: add support for event suppression

Jens Freimann jfreimann at redhat.com
Thu Apr 19 09:07:42 CEST 2018


Signed-off-by: Jens Freimann <jfreimann at redhat.com>
---
 drivers/net/virtio/virtio_ethdev.c |  2 +-
 drivers/net/virtio/virtio_rxtx.c   | 15 +++++++-
 drivers/net/virtio/virtqueue.h     | 77 ++++++++++++++++++++++++++++++++++++--
 3 files changed, 89 insertions(+), 5 deletions(-)

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index f9af3fcdb..30c04aa19 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -802,7 +802,7 @@ virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 	struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
 	struct virtqueue *vq = rxvq->vq;
 
-	virtqueue_enable_intr(vq);
+	virtqueue_enable_intr(vq, 0, 0);
 	return 0;
 }
 
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 42b1d5997..a6b24ea64 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -128,6 +128,10 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
 		rte_smp_wmb();
 		_set_desc_avail(&desc[head_idx], wrap_counter);
 		vq->vq_descx[head_idx].ndescs = descs_used;
+		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
+			virtqueue_notify(vq);
+			PMD_RX_LOG(DEBUG, "Notified");
+		}
 	}
 
 	txvq->stats.packets += i;
@@ -1003,6 +1007,10 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
 	}
 
 	rxvq->stats.packets += nb_rx;
+	if (nb_rx > 0 && unlikely(virtqueue_kick_prepare_packed(vq))) {
+		virtqueue_notify(vq);
+		PMD_RX_LOG(DEBUG, "Notified");
+	}
 
 	vq->vq_used_cons_idx = used_idx;
 
@@ -1280,8 +1288,13 @@ virtio_recv_mergeable_pkts(void *rx_queue,
 
 	rxvq->stats.packets += nb_rx;
 
-	if (vtpci_packed_queue(vq->hw))
+	if (vtpci_packed_queue(vq->hw)) {
+		if (unlikely(virtqueue_kick_prepare(vq))) {
+			virtqueue_notify(vq);
+			PMD_RX_LOG(DEBUG, "Notified");
+		}
 		return nb_rx;
+	}
 
 	/* Allocate new mbuf for the used descriptor */
 	error = ENOSPC;
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 0df845e0b..96152ac76 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -176,6 +176,8 @@ struct virtqueue {
 	uint16_t vq_free_cnt;  /**< num of desc available */
 	uint16_t vq_avail_idx; /**< sync until needed */
 	uint16_t vq_free_thresh; /**< free threshold */
+	uint16_t vq_signalled_avail;
+	int vq_signalled_avail_valid;
 
 	void *vq_ring_virt_mem;  /**< linear address of vring*/
 	unsigned int vq_ring_size;
@@ -292,16 +294,37 @@ vring_desc_init(struct vring_desc *dp, uint16_t n)
 static inline void
 virtqueue_disable_intr(struct virtqueue *vq)
 {
-	vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+	if (vtpci_packed_queue(vq->hw) && vtpci_with_feature(vq->hw,
+				VIRTIO_RING_F_EVENT_IDX))
+		vq->vq_ring.device_event->desc_event_flags =
+			RING_EVENT_FLAGS_DISABLE;
+	else
+		vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
 }
 
 /**
  * Tell the backend to interrupt us.
  */
 static inline void
-virtqueue_enable_intr(struct virtqueue *vq)
+virtqueue_enable_intr(struct virtqueue *vq, uint16_t off, uint16_t wrap_counter)
 {
-	vq->vq_ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
+	uint16_t *flags = &vq->vq_ring.device_event->desc_event_flags;
+	uint16_t *event_off_wrap =
+		&vq->vq_ring.device_event->desc_event_off_wrap;
+	if (vtpci_packed_queue(vq->hw)) {
+		*flags = 0;
+		*event_off_wrap = 0;
+		if (*event_off_wrap & RING_EVENT_FLAGS_DESC) {
+			*event_off_wrap = off | 0x7FFF;
+			*event_off_wrap |= wrap_counter << 15;
+			*flags |= RING_EVENT_FLAGS_DESC;
+		} else {
+			*event_off_wrap = 0;
+		}
+		*flags |= RING_EVENT_FLAGS_ENABLE;
+	} else {
+		vq->vq_ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
+	}
 }
 
 /**
@@ -361,12 +384,60 @@ vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
 	vq->vq_avail_idx++;
 }
 
+static int vhost_idx_diff(struct virtqueue *vq, uint16_t old, uint16_t new)
+{
+	if (new > old)
+		return new - old;
+	return  (new + vq->vq_nentries - old);
+}
+
+static int vring_packed_need_event(struct virtqueue *vq,
+		uint16_t event_off, uint16_t new,
+		uint16_t old)
+{
+	return (uint16_t)(vhost_idx_diff(vq, new, event_off) - 1) <
+		(uint16_t)vhost_idx_diff(vq, new, old);
+}
+
+
 static inline int
 virtqueue_kick_prepare(struct virtqueue *vq)
 {
 	return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
 }
 
+static inline int
+virtqueue_kick_prepare_packed(struct virtqueue *vq)
+{
+	uint16_t notify_offset, flags, wrap;
+	uint16_t old, new;
+	int v;
+
+	if (vtpci_packed_queue(vq->hw)) {
+		flags = vq->vq_ring.device_event->desc_event_flags;
+		if (!(flags & RING_EVENT_FLAGS_DESC))
+			return flags & RING_EVENT_FLAGS_ENABLE;
+		virtio_rmb();
+		notify_offset = vq->vq_ring.device_event->desc_event_off_wrap;
+		wrap = notify_offset & 0x1;
+		notify_offset >>= 1;
+
+		old = vq->vq_signalled_avail;
+		v = vq->vq_signalled_avail_valid;
+		new = vq->vq_avail_idx;
+		vq->vq_signalled_avail = vq->vq_avail_idx;
+		vq->vq_signalled_avail_valid = 1;
+
+		if (unlikely(!v))
+			return 0;
+
+		return (vring_packed_need_event(vq, new, old, notify_offset) &&
+			wrap == vq->vq_ring.avail_wrap_counter);
+	} else {
+		return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
+	}
+}
+
 static inline void
 virtqueue_notify(struct virtqueue *vq)
 {
-- 
2.14.3



More information about the dev mailing list