[PATCH v4 01/15] vdpa/mlx5: fix usage of capability for max number of virtqs

Li Zhang lizh at nvidia.com
Sat Jun 18 11:02:44 CEST 2022


The driver wrongly takes the capability value for
the number of virtq pairs instead of just the number of virtqs.

Adjust all the usages of it to be the number of virtqs.

Fixes: c2eb33a ("vdpa/mlx5: manage virtqs by array")
Cc: stable at dpdk.org

Signed-off-by: Li Zhang <lizh at nvidia.com>
Acked-by: Matan Azrad <matan at nvidia.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin at redhat.com>
---
 drivers/vdpa/mlx5/mlx5_vdpa.c       | 12 ++++++------
 drivers/vdpa/mlx5/mlx5_vdpa_virtq.c |  6 +++---
 2 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index 76fa5d4299..ee71339b78 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -84,7 +84,7 @@ mlx5_vdpa_get_queue_num(struct rte_vdpa_device *vdev, uint32_t *queue_num)
 		DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
 		return -1;
 	}
-	*queue_num = priv->caps.max_num_virtio_queues;
+	*queue_num = priv->caps.max_num_virtio_queues / 2;
 	return 0;
 }
 
@@ -141,7 +141,7 @@ mlx5_vdpa_set_vring_state(int vid, int vring, int state)
 		DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
 		return -EINVAL;
 	}
-	if (vring >= (int)priv->caps.max_num_virtio_queues * 2) {
+	if (vring >= (int)priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Too big vring id: %d.", vring);
 		return -E2BIG;
 	}
@@ -388,7 +388,7 @@ mlx5_vdpa_get_stats(struct rte_vdpa_device *vdev, int qid,
 		DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
 		return -ENODEV;
 	}
-	if (qid >= (int)priv->caps.max_num_virtio_queues * 2) {
+	if (qid >= (int)priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
 				vdev->device->name);
 		return -E2BIG;
@@ -411,7 +411,7 @@ mlx5_vdpa_reset_stats(struct rte_vdpa_device *vdev, int qid)
 		DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
 		return -ENODEV;
 	}
-	if (qid >= (int)priv->caps.max_num_virtio_queues * 2) {
+	if (qid >= (int)priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
 				vdev->device->name);
 		return -E2BIG;
@@ -624,7 +624,7 @@ mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev,
 		DRV_LOG(DEBUG, "No capability to support virtq statistics.");
 	priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
 			   sizeof(struct mlx5_vdpa_virtq) *
-			   attr->vdpa.max_num_virtio_queues * 2,
+			   attr->vdpa.max_num_virtio_queues,
 			   RTE_CACHE_LINE_SIZE);
 	if (!priv) {
 		DRV_LOG(ERR, "Failed to allocate private memory.");
@@ -685,7 +685,7 @@ mlx5_vdpa_release_dev_resources(struct mlx5_vdpa_priv *priv)
 	uint32_t i;
 
 	mlx5_vdpa_dev_cache_clean(priv);
-	for (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {
+	for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
 		if (!priv->virtqs[i].counters)
 			continue;
 		claim_zero(mlx5_devx_cmd_destroy(priv->virtqs[i].counters));
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index e025be47d2..c258eb3024 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -72,7 +72,7 @@ mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)
 {
 	unsigned int i, j;
 
-	for (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {
+	for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
 		struct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];
 
 		for (j = 0; j < RTE_DIM(virtq->umems); ++j) {
@@ -492,9 +492,9 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
 		DRV_LOG(INFO, "TSO is enabled without CSUM, force CSUM.");
 		priv->features |= (1ULL << VIRTIO_NET_F_CSUM);
 	}
-	if (nr_vring > priv->caps.max_num_virtio_queues * 2) {
+	if (nr_vring > priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
-			(int)priv->caps.max_num_virtio_queues * 2,
+			(int)priv->caps.max_num_virtio_queues,
 			(int)nr_vring);
 		return -1;
 	}
-- 
2.31.1



More information about the stable mailing list