[v7,05/13] vhost: add packed ring single dequeue
Checks
Commit Message
Add vhost single packet dequeue function for packed ring and meanwhile
left space for shadow used ring update function.
Signed-off-by: Marvin Liu <yong.liu@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Comments
On 10/21/19 5:40 PM, Marvin Liu wrote:
> Add vhost single packet dequeue function for packed ring and meanwhile
> left space for shadow used ring update function.
>
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
>
> diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> index 4ddf26567..317be1aed 100644
> --- a/lib/librte_vhost/virtio_net.c
> +++ b/lib/librte_vhost/virtio_net.c
> @@ -1635,6 +1635,61 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
> return i;
> }
>
> +static __rte_always_inline int
> +vhost_dequeue_single_packed(struct virtio_net *dev,
> + struct vhost_virtqueue *vq,
> + struct rte_mempool *mbuf_pool,
> + struct rte_mbuf **pkts,
> + uint16_t *buf_id,
> + uint16_t *desc_count)
> +{
> + struct buf_vector buf_vec[BUF_VECTOR_MAX];
> + uint32_t dummy_len;
> + uint16_t nr_vec = 0;
> + int err;
> +
> + if (unlikely(fill_vec_buf_packed(dev, vq,
> + vq->last_avail_idx, desc_count,
> + buf_vec, &nr_vec,
> + buf_id, &dummy_len,
> + VHOST_ACCESS_RO) < 0))
> + return -1;
> +
> + *pkts = rte_pktmbuf_alloc(mbuf_pool);
This is not properly rebased, you should no more call this API directly,
but instead virtio_dev_pktmbuf_alloc() which was introduced withg
flavio's patch.
> + if (unlikely(*pkts == NULL)) {
> + RTE_LOG(ERR, VHOST_DATA,
> + "Failed to allocate memory for mbuf.\n");
> + return -1;
> + }
> +
> + err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
> + mbuf_pool);
> + if (unlikely(err)) {
> + rte_pktmbuf_free(*pkts);
> + return -1;
> + }
> +
> + return 0;
> +}
> +
> +static __rte_unused int
> +virtio_dev_tx_single_packed(struct virtio_net *dev,
> + struct vhost_virtqueue *vq,
> + struct rte_mempool *mbuf_pool,
> + struct rte_mbuf **pkts)
> +{
> +
> + uint16_t buf_id, desc_count;
> +
> + if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
> + &desc_count))
> + return -1;
> +
> + vq_inc_last_avail_packed(vq, desc_count);
> +
> + return 0;
> +}
> +
> static __rte_noinline uint16_t
> virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
> struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
>
@@ -1635,6 +1635,61 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
return i;
}
+static __rte_always_inline int
+vhost_dequeue_single_packed(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts,
+ uint16_t *buf_id,
+ uint16_t *desc_count)
+{
+ struct buf_vector buf_vec[BUF_VECTOR_MAX];
+ uint32_t dummy_len;
+ uint16_t nr_vec = 0;
+ int err;
+
+ if (unlikely(fill_vec_buf_packed(dev, vq,
+ vq->last_avail_idx, desc_count,
+ buf_vec, &nr_vec,
+ buf_id, &dummy_len,
+ VHOST_ACCESS_RO) < 0))
+ return -1;
+
+ *pkts = rte_pktmbuf_alloc(mbuf_pool);
+ if (unlikely(*pkts == NULL)) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "Failed to allocate memory for mbuf.\n");
+ return -1;
+ }
+
+ err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
+ mbuf_pool);
+ if (unlikely(err)) {
+ rte_pktmbuf_free(*pkts);
+ return -1;
+ }
+
+ return 0;
+}
+
+static __rte_unused int
+virtio_dev_tx_single_packed(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts)
+{
+
+ uint16_t buf_id, desc_count;
+
+ if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
+ &desc_count))
+ return -1;
+
+ vq_inc_last_avail_packed(vq, desc_count);
+
+ return 0;
+}
+
static __rte_noinline uint16_t
virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)