[PATCH v3] vhost: fix build issues with GCC 12

Maxime Coquelin maxime.coquelin at redhat.com
Wed Oct 5 22:35:24 CEST 2022


This patch fixes a compilation issue met with GCC 12 on
LoongArch64:

In function ‘mbuf_to_desc’,
    inlined from ‘vhost_enqueue_async_packed’ at ../../../dpdk/lib/vhost/virtio_net.c:1822:6,
    inlined from ‘virtio_dev_rx_async_packed’ at ../../../dpdk/lib/vhost/virtio_net.c:1836:6,
    inlined from ‘virtio_dev_rx_async_submit_packed’ at ../../../dpdk/lib/vhost/virtio_net.c:1895:7:
../../../dpdk/lib/vhost/virtio_net.c:1159:18: error: ‘buf_vec[0].buf_addr’ may be used uninitialized [-Werror=maybe-uninitialized]
 1159 |         buf_addr = buf_vec[vec_idx].buf_addr;
      |         ~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~
../../../dpdk/lib/vhost/virtio_net.c: In function ‘virtio_dev_rx_async_submit_packed’:
../../../dpdk/lib/vhost/virtio_net.c:1834:27: note: ‘buf_vec’ declared here
 1834 |         struct buf_vector buf_vec[BUF_VECTOR_MAX];
      |                           ^~~~~~~

It happens because the compiler assumes that 'size'
variable in vhost_enqueue_async_packed could wrap to 0 since
'size' is uint32_t and pkt->pkt_len too.

In practice, it would never happen since 'pkt->pkt_len' is
unlikely to be close to UINT32_MAX, but let's just change
'size' to uint64_t to make the compiler happy without
having to add runtime checks.

This patch also fixes similar patterns in three other
places, including one that also produces similar build
issue on ARM64 in vhost_enqueue_single_packed().

Fixes: 873e8dad6f49 ("vhost: support packed ring in async datapath")
Cc: stable at dpdk.org

Signed-off-by: Maxime Coquelin <maxime.coquelin at redhat.com>
---
- Fix similar issue in 3 other places (David)


 lib/vhost/virtio_net.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 8f4d0f0502..6b4a062df3 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -787,7 +787,7 @@ fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
  */
 static inline int
 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
-				uint32_t size, struct buf_vector *buf_vec,
+				uint64_t size, struct buf_vector *buf_vec,
 				uint16_t *num_buffers, uint16_t avail_head,
 				uint16_t *nr_vec)
 {
@@ -1277,7 +1277,7 @@ vhost_enqueue_single_packed(struct virtio_net *dev,
 	uint16_t buf_id = 0;
 	uint32_t len = 0;
 	uint16_t desc_count;
-	uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
+	uint64_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
 	uint16_t num_buffers = 0;
 	uint32_t buffer_len[vq->size];
 	uint16_t buffer_buf_id[vq->size];
@@ -1345,7 +1345,7 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
 
 	for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
-		uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
+		uint64_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
 		uint16_t nr_vec = 0;
 
 		if (unlikely(reserve_avail_buf_split(dev, vq,
@@ -1689,7 +1689,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue
 	async_iter_reset(async);
 
 	for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
-		uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
+		uint64_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
 		uint16_t nr_vec = 0;
 
 		if (unlikely(reserve_avail_buf_split(dev, vq, pkt_len, buf_vec,
@@ -1780,7 +1780,7 @@ vhost_enqueue_async_packed(struct virtio_net *dev,
 	uint16_t buf_id = 0;
 	uint32_t len = 0;
 	uint16_t desc_count = 0;
-	uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
+	uint64_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
 	uint32_t buffer_len[vq->size];
 	uint16_t buffer_buf_id[vq->size];
 	uint16_t buffer_desc_count[vq->size];
-- 
2.37.3



More information about the stable mailing list