[v2,1/2] vhost: cleanup async enqueue
Checks
Commit Message
This patch removes unnecessary check and function calls, and it changes
appropriate types for internal variables and fixes typos.
Signed-off-by: Jiayu Hu <jiayu.hu@intel.com>
---
lib/librte_vhost/rte_vhost_async.h | 6 +++---
lib/librte_vhost/virtio_net.c | 16 ++++++++--------
2 files changed, 11 insertions(+), 11 deletions(-)
@@ -147,8 +147,8 @@ __rte_experimental
int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id);
/**
- * This function submit enqueue data to async engine. This function has
- * no guranttee to the transfer completion upon return. Applications
+ * This function submits enqueue data to async engine. This function has
+ * no guarantee to the transfer completion upon return. Applications
* should poll transfer status by rte_vhost_poll_enqueue_completed()
*
* @param vid
@@ -167,7 +167,7 @@ uint16_t rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count);
/**
- * This function check async completion status for a specific vhost
+ * This function checks async completion status for a specific vhost
* device queue. Packets which finish copying (enqueue) operation
* will be returned in an array.
*
@@ -1128,8 +1128,11 @@ async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
}
out:
- async_fill_iter(src_it, tlen, src_iovec, tvec_idx);
- async_fill_iter(dst_it, tlen, dst_iovec, tvec_idx);
+ if (tlen) {
+ async_fill_iter(src_it, tlen, src_iovec, tvec_idx);
+ async_fill_iter(dst_it, tlen, dst_iovec, tvec_idx);
+ } else
+ src_it->count = 0;
return error;
}
@@ -1492,10 +1495,9 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
struct rte_vhost_iov_iter *src_it = it_pool;
struct rte_vhost_iov_iter *dst_it = it_pool + 1;
uint16_t n_free_slot, slot_idx = 0;
- uint16_t pkt_err = 0;
uint16_t segs_await = 0;
struct async_inflight_info *pkts_info = vq->async_pkts_info;
- int n_pkts = 0;
+ uint32_t n_pkts = 0, pkt_err = 0;
avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
@@ -1553,11 +1555,9 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
/*
* conditions to trigger async device transfer:
* - buffered packet number reaches transfer threshold
- * - this is the last packet in the burst enqueue
* - unused async iov number is less than max vhost vector
*/
if (pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
- (pkt_idx == count - 1 && pkt_burst_idx) ||
(VHOST_MAX_ASYNC_VEC / 2 - segs_await <
BUF_VECTOR_MAX)) {
n_pkts = vq->async_ops.transfer_data(dev->vid,
@@ -1569,7 +1569,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
segs_await = 0;
vq->async_pkts_inflight_n += pkt_burst_idx;
- if (unlikely(n_pkts < (int)pkt_burst_idx)) {
+ if (unlikely(n_pkts < pkt_burst_idx)) {
/*
* log error packets number here and do actual
* error processing when applications poll
@@ -1589,7 +1589,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
queue_id, tdes, 0, pkt_burst_idx);
vq->async_pkts_inflight_n += pkt_burst_idx;
- if (unlikely(n_pkts < (int)pkt_burst_idx))
+ if (unlikely(n_pkts < pkt_burst_idx))
pkt_err = pkt_burst_idx - n_pkts;
}