net/virtio: allocate vrings on device NUMA node

Message ID 20181127105427.11641-1-maxime.coquelin@redhat.com (mailing list archive)
State Accepted, archived
Delegated to: Maxime Coquelin
Headers
Series net/virtio: allocate vrings on device NUMA node |

Checks

Context Check Description
ci/Intel-compilation success Compilation OK
ci/mellanox-Performance-Testing success Performance Testing PASS
ci/intel-Performance-Testing success Performance Testing PASS

Commit Message

Maxime Coquelin Nov. 27, 2018, 10:54 a.m. UTC
  When a guest is spanned on multiple NUMA nodes and
multiple Virtio devices are spanned onto these nodes,
we expect that their ring memory is allocated in the
right memory node.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 drivers/net/virtio/virtio_ethdev.c | 12 +++++++-----
 1 file changed, 7 insertions(+), 5 deletions(-)
  

Comments

David Marchand Nov. 27, 2018, 1:29 p.m. UTC | #1
On Tue, Nov 27, 2018 at 11:54 AM Maxime Coquelin <maxime.coquelin@redhat.com>
wrote:

> When a guest is spanned on multiple NUMA nodes and
> multiple Virtio devices are spanned onto these nodes,
> we expect that their ring memory is allocated in the
> right memory node.
>
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>  drivers/net/virtio/virtio_ethdev.c | 12 +++++++-----
>  1 file changed, 7 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_ethdev.c
> b/drivers/net/virtio/virtio_ethdev.c
> index 2ba66d291..d99571d93 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -335,8 +335,10 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> vtpci_queue_idx)
>         void *sw_ring = NULL;
>         int queue_type = virtio_get_queue_type(hw, vtpci_queue_idx);
>         int ret;
> +       int numa_node = dev->device->numa_node;
>
> -       PMD_INIT_LOG(DEBUG, "setting up queue: %u", vtpci_queue_idx);
> +       PMD_INIT_LOG(INFO, "setting up queue: %u on NUMA node %u",
>

%d ?

+                       vtpci_queue_idx, numa_node);
>
>         /*
>          * Read the virtqueue size from the Queue Size field
> @@ -372,7 +374,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> vtpci_queue_idx)
>         }
>
>         vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
> -                               SOCKET_ID_ANY);
> +                               numa_node);
>         if (vq == NULL) {
>                 PMD_INIT_LOG(ERR, "can not allocate vq");
>                 return -ENOMEM;
> @@ -392,7 +394,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> vtpci_queue_idx)
>                      size, vq->vq_ring_size);
>
>         mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
> -                       SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG,
> +                       numa_node, RTE_MEMZONE_IOVA_CONTIG,
>                         VIRTIO_PCI_VRING_ALIGN);
>         if (mz == NULL) {
>                 if (rte_errno == EEXIST)
> @@ -418,7 +420,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> vtpci_queue_idx)
>                 snprintf(vq_hdr_name, sizeof(vq_hdr_name),
> "port%d_vq%d_hdr",
>                          dev->data->port_id, vtpci_queue_idx);
>                 hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name,
> sz_hdr_mz,
> -                               SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG,
> +                               numa_node, RTE_MEMZONE_IOVA_CONTIG,
>                                 RTE_CACHE_LINE_SIZE);
>                 if (hdr_mz == NULL) {
>                         if (rte_errno == EEXIST)
> @@ -435,7 +437,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> vtpci_queue_idx)
>                                sizeof(vq->sw_ring[0]);
>
>                 sw_ring = rte_zmalloc_socket("sw_ring", sz_sw,
> -                               RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
> +                               RTE_CACHE_LINE_SIZE, numa_node);
>                 if (!sw_ring) {
>                         PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
>                         ret = -ENOMEM;
>


Reviewed-by: David Marchand <david.marchand@redhat.com>
  
Maxime Coquelin Nov. 27, 2018, 2:27 p.m. UTC | #2
On 11/27/18 2:29 PM, David Marchand wrote:
> On Tue, Nov 27, 2018 at 11:54 AM Maxime Coquelin 
> <maxime.coquelin@redhat.com <mailto:maxime.coquelin@redhat.com>> wrote:
> 
>     When a guest is spanned on multiple NUMA nodes and
>     multiple Virtio devices are spanned onto these nodes,
>     we expect that their ring memory is allocated in the
>     right memory node.
> 
>     Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com
>     <mailto:maxime.coquelin@redhat.com>>
>     ---
>       drivers/net/virtio/virtio_ethdev.c | 12 +++++++-----
>       1 file changed, 7 insertions(+), 5 deletions(-)
> 
>     diff --git a/drivers/net/virtio/virtio_ethdev.c
>     b/drivers/net/virtio/virtio_ethdev.c
>     index 2ba66d291..d99571d93 100644
>     --- a/drivers/net/virtio/virtio_ethdev.c
>     +++ b/drivers/net/virtio/virtio_ethdev.c
>     @@ -335,8 +335,10 @@ virtio_init_queue(struct rte_eth_dev *dev,
>     uint16_t vtpci_queue_idx)
>              void *sw_ring = NULL;
>              int queue_type = virtio_get_queue_type(hw, vtpci_queue_idx);
>              int ret;
>     +       int numa_node = dev->device->numa_node;
> 
>     -       PMD_INIT_LOG(DEBUG, "setting up queue: %u", vtpci_queue_idx);
>     +       PMD_INIT_LOG(INFO, "setting up queue: %u on NUMA node %u",
> 
> 
> %d ?

Right, thanks for spotting it.

Maxime
  
Maxime Coquelin Dec. 11, 2018, 6:31 p.m. UTC | #3
On 11/27/18 11:54 AM, Maxime Coquelin wrote:
> When a guest is spanned on multiple NUMA nodes and
> multiple Virtio devices are spanned onto these nodes,
> we expect that their ring memory is allocated in the
> right memory node.
> 
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>   drivers/net/virtio/virtio_ethdev.c | 12 +++++++-----
>   1 file changed, 7 insertions(+), 5 deletions(-)
> 


Applied with fix suggested by David.

Thanks,
Maxime
  

Patch

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 2ba66d291..d99571d93 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -335,8 +335,10 @@  virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
 	void *sw_ring = NULL;
 	int queue_type = virtio_get_queue_type(hw, vtpci_queue_idx);
 	int ret;
+	int numa_node = dev->device->numa_node;
 
-	PMD_INIT_LOG(DEBUG, "setting up queue: %u", vtpci_queue_idx);
+	PMD_INIT_LOG(INFO, "setting up queue: %u on NUMA node %u",
+			vtpci_queue_idx, numa_node);
 
 	/*
 	 * Read the virtqueue size from the Queue Size field
@@ -372,7 +374,7 @@  virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
 	}
 
 	vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
-				SOCKET_ID_ANY);
+				numa_node);
 	if (vq == NULL) {
 		PMD_INIT_LOG(ERR, "can not allocate vq");
 		return -ENOMEM;
@@ -392,7 +394,7 @@  virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
 		     size, vq->vq_ring_size);
 
 	mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
-			SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG,
+			numa_node, RTE_MEMZONE_IOVA_CONTIG,
 			VIRTIO_PCI_VRING_ALIGN);
 	if (mz == NULL) {
 		if (rte_errno == EEXIST)
@@ -418,7 +420,7 @@  virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
 		snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr",
 			 dev->data->port_id, vtpci_queue_idx);
 		hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
-				SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG,
+				numa_node, RTE_MEMZONE_IOVA_CONTIG,
 				RTE_CACHE_LINE_SIZE);
 		if (hdr_mz == NULL) {
 			if (rte_errno == EEXIST)
@@ -435,7 +437,7 @@  virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
 			       sizeof(vq->sw_ring[0]);
 
 		sw_ring = rte_zmalloc_socket("sw_ring", sz_sw,
-				RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+				RTE_CACHE_LINE_SIZE, numa_node);
 		if (!sw_ring) {
 			PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
 			ret = -ENOMEM;