[v9,1/2] examples/vhost: add ioat ring space count and check
Checks
Commit Message
Add ioat ring space count and check, if ioat ring space is not enough
for the next async vhost packet enqueue, then just return to prevent
enqueue failure. Add rte_ioat_completed_ops() fail handler.
Signed-off-by: Cheng Jiang <Cheng1.jiang@intel.com>
Reviewed-by: Jiayu Hu <jiayu.hu@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
examples/vhost/ioat.c | 24 +++++++++++++-----------
1 file changed, 13 insertions(+), 11 deletions(-)
--
2.29.2
Comments
On 1/12/21 5:38 AM, Cheng Jiang wrote:
> Add ioat ring space count and check, if ioat ring space is not enough
> for the next async vhost packet enqueue, then just return to prevent
> enqueue failure. Add rte_ioat_completed_ops() fail handler.
>
> Signed-off-by: Cheng Jiang <Cheng1.jiang@intel.com>
> Reviewed-by: Jiayu Hu <jiayu.hu@intel.com>
> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
> examples/vhost/ioat.c | 24 +++++++++++++-----------
> 1 file changed, 13 insertions(+), 11 deletions(-)
>
> diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c
> index 71d8a1f1f..dbad28d43 100644
> --- a/examples/vhost/ioat.c
> +++ b/examples/vhost/ioat.c
> @@ -17,6 +17,7 @@ struct packet_tracker {
> unsigned short next_read;
> unsigned short next_write;
> unsigned short last_remain;
> + unsigned short ioat_space;
> };
>
> struct packet_tracker cb_tracker[MAX_VHOST_DEVICE];
> @@ -113,7 +114,7 @@ open_ioat(const char *value)
> goto out;
> }
> rte_rawdev_start(dev_id);
> -
> + cb_tracker[dev_id].ioat_space = IOAT_RING_SIZE;
> dma_info->nr++;
> i++;
> }
> @@ -140,13 +141,9 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
> src = descs[i_desc].src;
> dst = descs[i_desc].dst;
> i_seg = 0;
> + if (cb_tracker[dev_id].ioat_space < src->nr_segs)
> + break;
> while (i_seg < src->nr_segs) {
> - /*
> - * TODO: Assuming that the ring space of the
> - * IOAT device is large enough, so there is no
> - * error here, and the actual error handling
> - * will be added later.
> - */
> rte_ioat_enqueue_copy(dev_id,
> (uintptr_t)(src->iov[i_seg].iov_base)
> + src->offset,
> @@ -158,7 +155,8 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
> i_seg++;
> }
> write &= mask;
> - cb_tracker[dev_id].size_track[write] = i_seg;
> + cb_tracker[dev_id].size_track[write] = src->nr_segs;
> + cb_tracker[dev_id].ioat_space -= src->nr_segs;
> write++;
> }
> } else {
> @@ -178,17 +176,21 @@ ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
> {
> if (!opaque_data) {
> uintptr_t dump[255];
> - unsigned short n_seg;
> + int n_seg;
> unsigned short read, write;
> unsigned short nb_packet = 0;
> unsigned short mask = MAX_ENQUEUED_SIZE - 1;
> unsigned short i;
> +
> int dev_id = dma_bind[vid].dmas[queue_id * 2
> + VIRTIO_RXQ].dev_id;
> n_seg = rte_ioat_completed_ops(dev_id, 255, dump, dump);
> - n_seg += cb_tracker[dev_id].last_remain;
> - if (!n_seg)
> + if (n_seg <= 0)
> return 0;
In a separate patch, it might make sense to propagate the error if
rte_ioat_completed_ops return -1.
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Maxime
> +
> + cb_tracker[dev_id].ioat_space += n_seg;
> + n_seg += cb_tracker[dev_id].last_remain;
> +
> read = cb_tracker[dev_id].next_read;
> write = cb_tracker[dev_id].next_write;
> for (i = 0; i < max_packets; i++) {
> --
> 2.29.2
>
Hi,
> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Thursday, January 21, 2021 8:35 PM
> To: Jiang, Cheng1 <cheng1.jiang@intel.com>; Xia, Chenbo
> <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> <yvonnex.yang@intel.com>; Wang, Yinan <yinan.wang@intel.com>
> Subject: Re: [PATCH v9 1/2] examples/vhost: add ioat ring space count and
> check
>
>
>
> On 1/12/21 5:38 AM, Cheng Jiang wrote:
> > Add ioat ring space count and check, if ioat ring space is not enough
> > for the next async vhost packet enqueue, then just return to prevent
> > enqueue failure. Add rte_ioat_completed_ops() fail handler.
> >
> > Signed-off-by: Cheng Jiang <Cheng1.jiang@intel.com>
> > Reviewed-by: Jiayu Hu <jiayu.hu@intel.com>
> > Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> > ---
> > examples/vhost/ioat.c | 24 +++++++++++++-----------
> > 1 file changed, 13 insertions(+), 11 deletions(-)
> >
> > diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c index
> > 71d8a1f1f..dbad28d43 100644
> > --- a/examples/vhost/ioat.c
> > +++ b/examples/vhost/ioat.c
> > @@ -17,6 +17,7 @@ struct packet_tracker {
> > unsigned short next_read;
> > unsigned short next_write;
> > unsigned short last_remain;
> > + unsigned short ioat_space;
> > };
> >
> > struct packet_tracker cb_tracker[MAX_VHOST_DEVICE]; @@ -113,7 +114,7
> > @@ open_ioat(const char *value)
> > goto out;
> > }
> > rte_rawdev_start(dev_id);
> > -
> > + cb_tracker[dev_id].ioat_space = IOAT_RING_SIZE;
> > dma_info->nr++;
> > i++;
> > }
> > @@ -140,13 +141,9 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
> > src = descs[i_desc].src;
> > dst = descs[i_desc].dst;
> > i_seg = 0;
> > + if (cb_tracker[dev_id].ioat_space < src->nr_segs)
> > + break;
> > while (i_seg < src->nr_segs) {
> > - /*
> > - * TODO: Assuming that the ring space of the
> > - * IOAT device is large enough, so there is no
> > - * error here, and the actual error handling
> > - * will be added later.
> > - */
> > rte_ioat_enqueue_copy(dev_id,
> > (uintptr_t)(src->iov[i_seg].iov_base)
> > + src->offset,
> > @@ -158,7 +155,8 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
> > i_seg++;
> > }
> > write &= mask;
> > - cb_tracker[dev_id].size_track[write] = i_seg;
> > + cb_tracker[dev_id].size_track[write] = src->nr_segs;
> > + cb_tracker[dev_id].ioat_space -= src->nr_segs;
> > write++;
> > }
> > } else {
> > @@ -178,17 +176,21 @@ ioat_check_completed_copies_cb(int vid,
> uint16_t
> > queue_id, {
> > if (!opaque_data) {
> > uintptr_t dump[255];
> > - unsigned short n_seg;
> > + int n_seg;
> > unsigned short read, write;
> > unsigned short nb_packet = 0;
> > unsigned short mask = MAX_ENQUEUED_SIZE - 1;
> > unsigned short i;
> > +
> > int dev_id = dma_bind[vid].dmas[queue_id * 2
> > + VIRTIO_RXQ].dev_id;
> > n_seg = rte_ioat_completed_ops(dev_id, 255, dump, dump);
> > - n_seg += cb_tracker[dev_id].last_remain;
> > - if (!n_seg)
> > + if (n_seg <= 0)
> > return 0;
>
> In a separate patch, it might make sense to propagate the error if
> rte_ioat_completed_ops return -1.
Sure, I'll send a patch to fix it when this patch set is merged.
Thanks,
Cheng
>
> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
>
> Maxime
> > +
> > + cb_tracker[dev_id].ioat_space += n_seg;
> > + n_seg += cb_tracker[dev_id].last_remain;
> > +
> > read = cb_tracker[dev_id].next_read;
> > write = cb_tracker[dev_id].next_write;
> > for (i = 0; i < max_packets; i++) {
> > --
> > 2.29.2
> >
@@ -17,6 +17,7 @@ struct packet_tracker {
unsigned short next_read;
unsigned short next_write;
unsigned short last_remain;
+ unsigned short ioat_space;
};
struct packet_tracker cb_tracker[MAX_VHOST_DEVICE];
@@ -113,7 +114,7 @@ open_ioat(const char *value)
goto out;
}
rte_rawdev_start(dev_id);
-
+ cb_tracker[dev_id].ioat_space = IOAT_RING_SIZE;
dma_info->nr++;
i++;
}
@@ -140,13 +141,9 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
src = descs[i_desc].src;
dst = descs[i_desc].dst;
i_seg = 0;
+ if (cb_tracker[dev_id].ioat_space < src->nr_segs)
+ break;
while (i_seg < src->nr_segs) {
- /*
- * TODO: Assuming that the ring space of the
- * IOAT device is large enough, so there is no
- * error here, and the actual error handling
- * will be added later.
- */
rte_ioat_enqueue_copy(dev_id,
(uintptr_t)(src->iov[i_seg].iov_base)
+ src->offset,
@@ -158,7 +155,8 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
i_seg++;
}
write &= mask;
- cb_tracker[dev_id].size_track[write] = i_seg;
+ cb_tracker[dev_id].size_track[write] = src->nr_segs;
+ cb_tracker[dev_id].ioat_space -= src->nr_segs;
write++;
}
} else {
@@ -178,17 +176,21 @@ ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
{
if (!opaque_data) {
uintptr_t dump[255];
- unsigned short n_seg;
+ int n_seg;
unsigned short read, write;
unsigned short nb_packet = 0;
unsigned short mask = MAX_ENQUEUED_SIZE - 1;
unsigned short i;
+
int dev_id = dma_bind[vid].dmas[queue_id * 2
+ VIRTIO_RXQ].dev_id;
n_seg = rte_ioat_completed_ops(dev_id, 255, dump, dump);
- n_seg += cb_tracker[dev_id].last_remain;
- if (!n_seg)
+ if (n_seg <= 0)
return 0;
+
+ cb_tracker[dev_id].ioat_space += n_seg;
+ n_seg += cb_tracker[dev_id].last_remain;
+
read = cb_tracker[dev_id].next_read;
write = cb_tracker[dev_id].next_write;
for (i = 0; i < max_packets; i++) {