[dpdk-dev,v3,20/21] net/virtio: add support for event suppression
Checks
Commit Message
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 2 +-
drivers/net/virtio/virtio_ethdev.h | 2 +-
drivers/net/virtio/virtio_rxtx.c | 15 +++++++-
drivers/net/virtio/virtqueue.h | 73 ++++++++++++++++++++++++++++++++++++--
4 files changed, 86 insertions(+), 6 deletions(-)
Comments
On 04/05/2018 12:10 PM, Jens Freimann wrote:
> Signed-off-by: Jens Freimann <jfreimann@redhat.com>
> ---
> drivers/net/virtio/virtio_ethdev.c | 2 +-
> drivers/net/virtio/virtio_ethdev.h | 2 +-
> drivers/net/virtio/virtio_rxtx.c | 15 +++++++-
> drivers/net/virtio/virtqueue.h | 73 ++++++++++++++++++++++++++++++++++++--
> 4 files changed, 86 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
> index a3c3376d7..65a6a9d89 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -727,7 +727,7 @@ virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
> struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
> struct virtqueue *vq = rxvq->vq;
>
> - virtqueue_enable_intr(vq);
> + virtqueue_enable_intr(vq, 0, 0);
> return 0;
> }
>
> diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
> index 3aeced4bb..19d3f2617 100644
> --- a/drivers/net/virtio/virtio_ethdev.h
> +++ b/drivers/net/virtio/virtio_ethdev.h
> @@ -37,7 +37,7 @@
> 1u << VIRTIO_RING_F_INDIRECT_DESC | \
> 1ULL << VIRTIO_F_VERSION_1 | \
> 1ULL << VIRTIO_F_RING_PACKED | \
> - 1ULL << VIRTIO_F_IOMMU_PLATFORM)
> + 1ULL << VIRTIO_RING_F_EVENT_IDX)
>
> #define VIRTIO_PMD_SUPPORTED_GUEST_FEATURES \
> (VIRTIO_PMD_DEFAULT_GUEST_FEATURES | \
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index a48ca6aaa..ed65434ce 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -127,6 +127,10 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
>
> rte_smp_wmb();
> _set_desc_avail(&desc[head_idx], wrap_counter);
> + if (unlikely(virtqueue_kick_prepare_packed(vq))) {
> + virtqueue_notify(vq);
> + PMD_RX_LOG(DEBUG, "Notified");
> + }
> }
>
> txvq->stats.packets += i;
> @@ -998,6 +1002,10 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
> }
>
> rxvq->stats.packets += nb_rx;
> + if (nb_rx > 0 && unlikely(virtqueue_kick_prepare_packed(vq))) {
> + virtqueue_notify(vq);
> + PMD_RX_LOG(DEBUG, "Notified");
> + }
>
> vq->vq_used_cons_idx = used_idx;
>
> @@ -1276,8 +1284,13 @@ virtio_recv_mergeable_pkts(void *rx_queue,
>
> rxvq->stats.packets += nb_rx;
>
> - if (vtpci_packed_queue(vq->hw))
> + if (vtpci_packed_queue(vq->hw)) {
> + if (unlikely(virtqueue_kick_prepare(vq))) {
> + virtqueue_notify(vq);
> + PMD_RX_LOG(DEBUG, "Notified");
> + }
> return nb_rx;
> + }
>
> /* Allocate new mbuf for the used descriptor */
> error = ENOSPC;
> diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
> index 7196bd717..6fd3317d2 100644
> --- a/drivers/net/virtio/virtqueue.h
> +++ b/drivers/net/virtio/virtqueue.h
> @@ -176,6 +176,8 @@ struct virtqueue {
> uint16_t vq_free_cnt; /**< num of desc available */
> uint16_t vq_avail_idx; /**< sync until needed */
> uint16_t vq_free_thresh; /**< free threshold */
> + uint16_t vq_signalled_avail;
> + int vq_signalled_avail_valid;
>
> void *vq_ring_virt_mem; /**< linear address of vring*/
> unsigned int vq_ring_size;
> @@ -273,16 +275,34 @@ vring_desc_init(struct vring_desc *dp, uint16_t n)
> static inline void
> virtqueue_disable_intr(struct virtqueue *vq)
> {
> - vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
> + if (vtpci_packed_queue(vq->hw) && vtpci_with_feature(vq->hw,
> + VIRTIO_RING_F_EVENT_IDX))
> + vq->vq_ring.device_event->desc_event_flags = RING_EVENT_FLAGS_DISABLE;
> + else
> + vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
> }
>
> /**
> * Tell the backend to interrupt us.
> */
> static inline void
> -virtqueue_enable_intr(struct virtqueue *vq)
> +virtqueue_enable_intr(struct virtqueue *vq, uint16_t off, uint16_t wrap_counter)
> {
> - vq->vq_ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
> + uint16_t *flags = &vq->vq_ring.device_event->desc_event_flags;
> + uint16_t *event_off_wrap = &vq->vq_ring.device_event->desc_event_off_wrap;
> + if (vtpci_packed_queue(vq->hw)) {
> + *flags = 0;
> + *event_off_wrap = 0;
> + if (*event_off_wrap & RING_EVENT_FLAGS_DESC) {
> + *event_off_wrap = off | 0x7FFF;
> + *event_off_wrap |= wrap_counter << 15;
> + *flags |= RING_EVENT_FLAGS_DESC;
> + } else
> + *event_off_wrap = 0;
> + *flags |= RING_EVENT_FLAGS_ENABLE;
> + } else {
> + vq->vq_ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
> + }
> }
>
> /**
> @@ -342,12 +362,59 @@ vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
> vq->vq_avail_idx++;
> }
>
> +static int vhost_idx_diff(struct virtqueue *vq, uint16_t old, uint16_t new)
> +{
> + if (new > old)
> + return new - old;
> + return (new + vq->vq_nentries - old);
> +}
> +
> +static int vring_packed_need_event(struct virtqueue *vq,
> + uint16_t event_off, uint16_t new,
> + uint16_t old)
> +{
> + return (uint16_t)(vhost_idx_diff(vq, new, event_off) - 1) <
> + (uint16_t)vhost_idx_diff(vq, new, old);
> +}
> +
> +
> static inline int
> virtqueue_kick_prepare(struct virtqueue *vq)
> {
> return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
> }
>
> +static inline int
> +virtqueue_kick_prepare_packed(struct virtqueue *vq)
> +{
> + uint16_t notify_offset, flags, wrap;
> + uint16_t old, new;
> + int v;
> +
> + if (vtpci_packed_queue(vq->hw)) {
> + flags = vq->vq_ring.device_event->desc_event_flags;
> + if (!(flags & RING_EVENT_FLAGS_DESC))
> + return flags & RING_EVENT_FLAGS_ENABLE;
> + virtio_rmb();
> + notify_offset = vq->vq_ring.device_event->desc_event_off_wrap;
> + wrap = notify_offset & 0x1;
> + notify_offset >>= 1;
> +
> + old = vq->vq_signalled_avail;
> + v = vq->vq_signalled_avail_valid;
> + new = vq->vq_signalled_avail = vq->vq_avail_idx;
> + vq->vq_signalled_avail_valid = 1;
> +
> + if (unlikely(!v))
> + return 0;
> +
> + return (vring_packed_need_event(vq, new, old, notify_offset) &&
> + wrap == vq->vq_ring.avail_wrap_counter);
> + } else {
> + return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
Maybe I didn't gezt it, but it looks confusing that the function name
contains packed, and that it seems it handles non-packed queue case.
> + }
> +}
> +
> static inline void
> virtqueue_notify(struct virtqueue *vq)
> {
>
On Fri, Apr 06, 2018 at 03:50:28PM +0200, Maxime Coquelin wrote:
>
>
>On 04/05/2018 12:10 PM, Jens Freimann wrote:
>>Signed-off-by: Jens Freimann <jfreimann@redhat.com>
>>---
>> drivers/net/virtio/virtio_ethdev.c | 2 +-
>> drivers/net/virtio/virtio_ethdev.h | 2 +-
>> drivers/net/virtio/virtio_rxtx.c | 15 +++++++-
>> drivers/net/virtio/virtqueue.h | 73 ++++++++++++++++++++++++++++++++++++--
>> 4 files changed, 86 insertions(+), 6 deletions(-)
>>
>>diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
>>index a3c3376d7..65a6a9d89 100644
>>--- a/drivers/net/virtio/virtio_ethdev.c
>>+++ b/drivers/net/virtio/virtio_ethdev.c
>>@@ -727,7 +727,7 @@ virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
>> struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
>> struct virtqueue *vq = rxvq->vq;
>>- virtqueue_enable_intr(vq);
>>+ virtqueue_enable_intr(vq, 0, 0);
>> return 0;
>> }
>>diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
>>index 3aeced4bb..19d3f2617 100644
>>--- a/drivers/net/virtio/virtio_ethdev.h
>>+++ b/drivers/net/virtio/virtio_ethdev.h
>>@@ -37,7 +37,7 @@
>> 1u << VIRTIO_RING_F_INDIRECT_DESC | \
>> 1ULL << VIRTIO_F_VERSION_1 | \
>> 1ULL << VIRTIO_F_RING_PACKED | \
>>- 1ULL << VIRTIO_F_IOMMU_PLATFORM)
>>+ 1ULL << VIRTIO_RING_F_EVENT_IDX)
>> #define VIRTIO_PMD_SUPPORTED_GUEST_FEATURES \
>> (VIRTIO_PMD_DEFAULT_GUEST_FEATURES | \
>>diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
>>index a48ca6aaa..ed65434ce 100644
>>--- a/drivers/net/virtio/virtio_rxtx.c
>>+++ b/drivers/net/virtio/virtio_rxtx.c
>>@@ -127,6 +127,10 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
>> rte_smp_wmb();
>> _set_desc_avail(&desc[head_idx], wrap_counter);
>>+ if (unlikely(virtqueue_kick_prepare_packed(vq))) {
>>+ virtqueue_notify(vq);
>>+ PMD_RX_LOG(DEBUG, "Notified");
>>+ }
>> }
>> txvq->stats.packets += i;
>>@@ -998,6 +1002,10 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
>> }
>> rxvq->stats.packets += nb_rx;
>>+ if (nb_rx > 0 && unlikely(virtqueue_kick_prepare_packed(vq))) {
>>+ virtqueue_notify(vq);
>>+ PMD_RX_LOG(DEBUG, "Notified");
>>+ }
>> vq->vq_used_cons_idx = used_idx;
>>@@ -1276,8 +1284,13 @@ virtio_recv_mergeable_pkts(void *rx_queue,
>> rxvq->stats.packets += nb_rx;
>>- if (vtpci_packed_queue(vq->hw))
>>+ if (vtpci_packed_queue(vq->hw)) {
>>+ if (unlikely(virtqueue_kick_prepare(vq))) {
>>+ virtqueue_notify(vq);
>>+ PMD_RX_LOG(DEBUG, "Notified");
>>+ }
>> return nb_rx;
>>+ }
>> /* Allocate new mbuf for the used descriptor */
>> error = ENOSPC;
>>diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
>>index 7196bd717..6fd3317d2 100644
>>--- a/drivers/net/virtio/virtqueue.h
>>+++ b/drivers/net/virtio/virtqueue.h
>>@@ -176,6 +176,8 @@ struct virtqueue {
>> uint16_t vq_free_cnt; /**< num of desc available */
>> uint16_t vq_avail_idx; /**< sync until needed */
>> uint16_t vq_free_thresh; /**< free threshold */
>>+ uint16_t vq_signalled_avail;
>>+ int vq_signalled_avail_valid;
>> void *vq_ring_virt_mem; /**< linear address of vring*/
>> unsigned int vq_ring_size;
>>@@ -273,16 +275,34 @@ vring_desc_init(struct vring_desc *dp, uint16_t n)
>> static inline void
>> virtqueue_disable_intr(struct virtqueue *vq)
>> {
>>- vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
>>+ if (vtpci_packed_queue(vq->hw) && vtpci_with_feature(vq->hw,
>>+ VIRTIO_RING_F_EVENT_IDX))
>>+ vq->vq_ring.device_event->desc_event_flags = RING_EVENT_FLAGS_DISABLE;
>>+ else
>>+ vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
>> }
>> /**
>> * Tell the backend to interrupt us.
>> */
>> static inline void
>>-virtqueue_enable_intr(struct virtqueue *vq)
>>+virtqueue_enable_intr(struct virtqueue *vq, uint16_t off, uint16_t wrap_counter)
>> {
>>- vq->vq_ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
>>+ uint16_t *flags = &vq->vq_ring.device_event->desc_event_flags;
>>+ uint16_t *event_off_wrap = &vq->vq_ring.device_event->desc_event_off_wrap;
>>+ if (vtpci_packed_queue(vq->hw)) {
>>+ *flags = 0;
>>+ *event_off_wrap = 0;
>>+ if (*event_off_wrap & RING_EVENT_FLAGS_DESC) {
>>+ *event_off_wrap = off | 0x7FFF;
>>+ *event_off_wrap |= wrap_counter << 15;
>>+ *flags |= RING_EVENT_FLAGS_DESC;
>>+ } else
>>+ *event_off_wrap = 0;
>>+ *flags |= RING_EVENT_FLAGS_ENABLE;
>>+ } else {
>>+ vq->vq_ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
>>+ }
>> }
>> /**
>>@@ -342,12 +362,59 @@ vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
>> vq->vq_avail_idx++;
>> }
>>+static int vhost_idx_diff(struct virtqueue *vq, uint16_t old, uint16_t new)
>>+{
>>+ if (new > old)
>>+ return new - old;
>>+ return (new + vq->vq_nentries - old);
>>+}
>>+
>>+static int vring_packed_need_event(struct virtqueue *vq,
>>+ uint16_t event_off, uint16_t new,
>>+ uint16_t old)
>>+{
>>+ return (uint16_t)(vhost_idx_diff(vq, new, event_off) - 1) <
>>+ (uint16_t)vhost_idx_diff(vq, new, old);
>>+}
>>+
>>+
>> static inline int
>> virtqueue_kick_prepare(struct virtqueue *vq)
>> {
>> return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
>> }
>>+static inline int
>>+virtqueue_kick_prepare_packed(struct virtqueue *vq)
>>+{
>>+ uint16_t notify_offset, flags, wrap;
>>+ uint16_t old, new;
>>+ int v;
>>+
>>+ if (vtpci_packed_queue(vq->hw)) {
>>+ flags = vq->vq_ring.device_event->desc_event_flags;
>>+ if (!(flags & RING_EVENT_FLAGS_DESC))
>>+ return flags & RING_EVENT_FLAGS_ENABLE;
>>+ virtio_rmb();
>>+ notify_offset = vq->vq_ring.device_event->desc_event_off_wrap;
>>+ wrap = notify_offset & 0x1;
>>+ notify_offset >>= 1;
>>+
>>+ old = vq->vq_signalled_avail;
>>+ v = vq->vq_signalled_avail_valid;
>>+ new = vq->vq_signalled_avail = vq->vq_avail_idx;
>>+ vq->vq_signalled_avail_valid = 1;
>>+
>>+ if (unlikely(!v))
>>+ return 0;
>>+
>>+ return (vring_packed_need_event(vq, new, old, notify_offset) &&
>>+ wrap == vq->vq_ring.avail_wrap_counter);
>>+ } else {
>>+ return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
>
>Maybe I didn't gezt it, but it looks confusing that the function name
>contains packed, and that it seems it handles non-packed queue case.
No, the else part is not needed here. I'll remove it and make sure to
call the right function.
Thanks!
regards,
Jens
>
>>+ }
>>+}
>>+
>> static inline void
>> virtqueue_notify(struct virtqueue *vq)
>> {
>>
On Thu, Apr 05, 2018 at 12:10:30PM +0200, Jens Freimann wrote:
> Signed-off-by: Jens Freimann <jfreimann@redhat.com>
> ---
> drivers/net/virtio/virtio_ethdev.c | 2 +-
> drivers/net/virtio/virtio_ethdev.h | 2 +-
> drivers/net/virtio/virtio_rxtx.c | 15 +++++++-
> drivers/net/virtio/virtqueue.h | 73 ++++++++++++++++++++++++++++++++++++--
> 4 files changed, 86 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
> index a3c3376d7..65a6a9d89 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -727,7 +727,7 @@ virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
> struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
> struct virtqueue *vq = rxvq->vq;
>
> - virtqueue_enable_intr(vq);
> + virtqueue_enable_intr(vq, 0, 0);
> return 0;
> }
>
> diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
> index 3aeced4bb..19d3f2617 100644
> --- a/drivers/net/virtio/virtio_ethdev.h
> +++ b/drivers/net/virtio/virtio_ethdev.h
> @@ -37,7 +37,7 @@
> 1u << VIRTIO_RING_F_INDIRECT_DESC | \
> 1ULL << VIRTIO_F_VERSION_1 | \
> 1ULL << VIRTIO_F_RING_PACKED | \
> - 1ULL << VIRTIO_F_IOMMU_PLATFORM)
Why remove this feature bit?
> + 1ULL << VIRTIO_RING_F_EVENT_IDX)
Supporting event suppression doesn't means supporting
F_EVENT_IDX. Event suppression is mandatory and not
a negotiable feature. F_EVENT_IDX is just an optional
enhancement for event suppression.
>
> #define VIRTIO_PMD_SUPPORTED_GUEST_FEATURES \
> (VIRTIO_PMD_DEFAULT_GUEST_FEATURES | \
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index a48ca6aaa..ed65434ce 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -127,6 +127,10 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
>
> rte_smp_wmb();
> _set_desc_avail(&desc[head_idx], wrap_counter);
> + if (unlikely(virtqueue_kick_prepare_packed(vq))) {
> + virtqueue_notify(vq);
> + PMD_RX_LOG(DEBUG, "Notified");
The indent isn't right.
> + }
> }
[...]
On Sun, Apr 08, 2018 at 02:07:50PM +0800, Tiwei Bie wrote:
>On Thu, Apr 05, 2018 at 12:10:30PM +0200, Jens Freimann wrote:
>> Signed-off-by: Jens Freimann <jfreimann@redhat.com>
>> ---
>> drivers/net/virtio/virtio_ethdev.c | 2 +-
>> drivers/net/virtio/virtio_ethdev.h | 2 +-
>> drivers/net/virtio/virtio_rxtx.c | 15 +++++++-
>> drivers/net/virtio/virtqueue.h | 73 ++++++++++++++++++++++++++++++++++++--
>> 4 files changed, 86 insertions(+), 6 deletions(-)
>>
>> diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
>> index a3c3376d7..65a6a9d89 100644
>> --- a/drivers/net/virtio/virtio_ethdev.c
>> +++ b/drivers/net/virtio/virtio_ethdev.c
>> @@ -727,7 +727,7 @@ virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
>> struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
>> struct virtqueue *vq = rxvq->vq;
>>
>> - virtqueue_enable_intr(vq);
>> + virtqueue_enable_intr(vq, 0, 0);
>> return 0;
>> }
>>
>> diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
>> index 3aeced4bb..19d3f2617 100644
>> --- a/drivers/net/virtio/virtio_ethdev.h
>> +++ b/drivers/net/virtio/virtio_ethdev.h
>> @@ -37,7 +37,7 @@
>> 1u << VIRTIO_RING_F_INDIRECT_DESC | \
>> 1ULL << VIRTIO_F_VERSION_1 | \
>> 1ULL << VIRTIO_F_RING_PACKED | \
>> - 1ULL << VIRTIO_F_IOMMU_PLATFORM)
>
>Why remove this feature bit?
my mistake. will fix it.
>
>
>> + 1ULL << VIRTIO_RING_F_EVENT_IDX)
>
>Supporting event suppression doesn't means supporting
>F_EVENT_IDX. Event suppression is mandatory and not
>a negotiable feature. F_EVENT_IDX is just an optional
>enhancement for event suppression.
I understand, will fix that too.
>
>
>>
>> #define VIRTIO_PMD_SUPPORTED_GUEST_FEATURES \
>> (VIRTIO_PMD_DEFAULT_GUEST_FEATURES | \
>> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
>> index a48ca6aaa..ed65434ce 100644
>> --- a/drivers/net/virtio/virtio_rxtx.c
>> +++ b/drivers/net/virtio/virtio_rxtx.c
>> @@ -127,6 +127,10 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
>>
>> rte_smp_wmb();
>> _set_desc_avail(&desc[head_idx], wrap_counter);
>> + if (unlikely(virtqueue_kick_prepare_packed(vq))) {
>> + virtqueue_notify(vq);
>> + PMD_RX_LOG(DEBUG, "Notified");
>
>The indent isn't right.
will fix. Thanks!
regards,
Jens
@@ -727,7 +727,7 @@ virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
struct virtqueue *vq = rxvq->vq;
- virtqueue_enable_intr(vq);
+ virtqueue_enable_intr(vq, 0, 0);
return 0;
}
@@ -37,7 +37,7 @@
1u << VIRTIO_RING_F_INDIRECT_DESC | \
1ULL << VIRTIO_F_VERSION_1 | \
1ULL << VIRTIO_F_RING_PACKED | \
- 1ULL << VIRTIO_F_IOMMU_PLATFORM)
+ 1ULL << VIRTIO_RING_F_EVENT_IDX)
#define VIRTIO_PMD_SUPPORTED_GUEST_FEATURES \
(VIRTIO_PMD_DEFAULT_GUEST_FEATURES | \
@@ -127,6 +127,10 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
rte_smp_wmb();
_set_desc_avail(&desc[head_idx], wrap_counter);
+ if (unlikely(virtqueue_kick_prepare_packed(vq))) {
+ virtqueue_notify(vq);
+ PMD_RX_LOG(DEBUG, "Notified");
+ }
}
txvq->stats.packets += i;
@@ -998,6 +1002,10 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
}
rxvq->stats.packets += nb_rx;
+ if (nb_rx > 0 && unlikely(virtqueue_kick_prepare_packed(vq))) {
+ virtqueue_notify(vq);
+ PMD_RX_LOG(DEBUG, "Notified");
+ }
vq->vq_used_cons_idx = used_idx;
@@ -1276,8 +1284,13 @@ virtio_recv_mergeable_pkts(void *rx_queue,
rxvq->stats.packets += nb_rx;
- if (vtpci_packed_queue(vq->hw))
+ if (vtpci_packed_queue(vq->hw)) {
+ if (unlikely(virtqueue_kick_prepare(vq))) {
+ virtqueue_notify(vq);
+ PMD_RX_LOG(DEBUG, "Notified");
+ }
return nb_rx;
+ }
/* Allocate new mbuf for the used descriptor */
error = ENOSPC;
@@ -176,6 +176,8 @@ struct virtqueue {
uint16_t vq_free_cnt; /**< num of desc available */
uint16_t vq_avail_idx; /**< sync until needed */
uint16_t vq_free_thresh; /**< free threshold */
+ uint16_t vq_signalled_avail;
+ int vq_signalled_avail_valid;
void *vq_ring_virt_mem; /**< linear address of vring*/
unsigned int vq_ring_size;
@@ -273,16 +275,34 @@ vring_desc_init(struct vring_desc *dp, uint16_t n)
static inline void
virtqueue_disable_intr(struct virtqueue *vq)
{
- vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+ if (vtpci_packed_queue(vq->hw) && vtpci_with_feature(vq->hw,
+ VIRTIO_RING_F_EVENT_IDX))
+ vq->vq_ring.device_event->desc_event_flags = RING_EVENT_FLAGS_DISABLE;
+ else
+ vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
}
/**
* Tell the backend to interrupt us.
*/
static inline void
-virtqueue_enable_intr(struct virtqueue *vq)
+virtqueue_enable_intr(struct virtqueue *vq, uint16_t off, uint16_t wrap_counter)
{
- vq->vq_ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
+ uint16_t *flags = &vq->vq_ring.device_event->desc_event_flags;
+ uint16_t *event_off_wrap = &vq->vq_ring.device_event->desc_event_off_wrap;
+ if (vtpci_packed_queue(vq->hw)) {
+ *flags = 0;
+ *event_off_wrap = 0;
+ if (*event_off_wrap & RING_EVENT_FLAGS_DESC) {
+ *event_off_wrap = off | 0x7FFF;
+ *event_off_wrap |= wrap_counter << 15;
+ *flags |= RING_EVENT_FLAGS_DESC;
+ } else
+ *event_off_wrap = 0;
+ *flags |= RING_EVENT_FLAGS_ENABLE;
+ } else {
+ vq->vq_ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
+ }
}
/**
@@ -342,12 +362,59 @@ vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
vq->vq_avail_idx++;
}
+static int vhost_idx_diff(struct virtqueue *vq, uint16_t old, uint16_t new)
+{
+ if (new > old)
+ return new - old;
+ return (new + vq->vq_nentries - old);
+}
+
+static int vring_packed_need_event(struct virtqueue *vq,
+ uint16_t event_off, uint16_t new,
+ uint16_t old)
+{
+ return (uint16_t)(vhost_idx_diff(vq, new, event_off) - 1) <
+ (uint16_t)vhost_idx_diff(vq, new, old);
+}
+
+
static inline int
virtqueue_kick_prepare(struct virtqueue *vq)
{
return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
}
+static inline int
+virtqueue_kick_prepare_packed(struct virtqueue *vq)
+{
+ uint16_t notify_offset, flags, wrap;
+ uint16_t old, new;
+ int v;
+
+ if (vtpci_packed_queue(vq->hw)) {
+ flags = vq->vq_ring.device_event->desc_event_flags;
+ if (!(flags & RING_EVENT_FLAGS_DESC))
+ return flags & RING_EVENT_FLAGS_ENABLE;
+ virtio_rmb();
+ notify_offset = vq->vq_ring.device_event->desc_event_off_wrap;
+ wrap = notify_offset & 0x1;
+ notify_offset >>= 1;
+
+ old = vq->vq_signalled_avail;
+ v = vq->vq_signalled_avail_valid;
+ new = vq->vq_signalled_avail = vq->vq_avail_idx;
+ vq->vq_signalled_avail_valid = 1;
+
+ if (unlikely(!v))
+ return 0;
+
+ return (vring_packed_need_event(vq, new, old, notify_offset) &&
+ wrap == vq->vq_ring.avail_wrap_counter);
+ } else {
+ return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
+ }
+}
+
static inline void
virtqueue_notify(struct virtqueue *vq)
{