patch 'examples/vhost: fix launch with physical port' has been queued to stable release 20.11.5

luca.boccassi at gmail.com luca.boccassi at gmail.com
Wed Mar 9 17:30:35 CET 2022


Hi,

FYI, your patch has been queued to stable release 20.11.5

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 03/11/22. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://github.com/bluca/dpdk-stable

This queued commit can be viewed at:
https://github.com/bluca/dpdk-stable/commit/80b49318e6d4100cee8f9be4f6d0385f079f5ad2

Thanks.

Luca Boccassi

---
>From 80b49318e6d4100cee8f9be4f6d0385f079f5ad2 Mon Sep 17 00:00:00 2001
From: Wenwu Ma <wenwux.ma at intel.com>
Date: Fri, 4 Mar 2022 16:24:24 +0000
Subject: [PATCH] examples/vhost: fix launch with physical port

[ upstream commit 917229c24e871bbc3225a0227eb3f0faaa7aaa69 ]

dpdk-vhost will fail to launch with a 40G i40e port because
there are not enough mbufs. This patch adds a new option
--total-num-mbufs, through which the user can set larger
mbuf pool to avoid this problem.

Fixes: 4796ad63ba1f ("examples/vhost: import userspace vhost application")

Signed-off-by: Wenwu Ma <wenwux.ma at intel.com>
Reviewed-by: Chenbo Xia <chenbo.xia at intel.com>
---
 examples/vhost/main.c | 83 +++++++++++++++----------------------------
 1 file changed, 28 insertions(+), 55 deletions(-)

diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index e05a270a2d..fc07b31b2e 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -33,6 +33,8 @@
 #define MAX_QUEUES 128
 #endif
 
+#define NUM_MBUFS_DEFAULT 0x24000
+
 /* the maximum number of external ports supported */
 #define MAX_SUP_PORTS 1
 
@@ -60,6 +62,9 @@
 /* Maximum long option length for option parsing. */
 #define MAX_LONG_OPT_SZ 64
 
+/* number of mbufs in all pools - if specified on command-line. */
+static int total_num_mbufs = NUM_MBUFS_DEFAULT;
+
 /* mask of enabled ports */
 static uint32_t enabled_port_mask = 0;
 
@@ -463,7 +468,8 @@ us_vhost_usage(const char *prgname)
 	"		--tso [0|1] disable/enable TCP segment offload.\n"
 	"		--client register a vhost-user socket as client mode.\n"
 	"		--dma-type register dma type for your vhost async driver. For example \"ioat\" for now.\n"
-	"		--dmas register dma channel for specific vhost device.\n",
+	"		--dmas register dma channel for specific vhost device.\n"
+	"		--total-num-mbufs [0-N] set the number of mbufs to be allocated in mbuf pools, the default value is 147456.\n",
 	       prgname);
 }
 
@@ -491,7 +497,7 @@ us_vhost_parse_args(int argc, char **argv)
 		{"builtin-net-driver", no_argument, &builtin_net_driver, 1},
 		{"dma-type", required_argument, NULL, 0},
 		{"dmas", required_argument, NULL, 0},
-		{NULL, 0, 0, 0},
+		{"total-num-mbufs", required_argument, NULL, 0},
 	};
 
 	/* Parse command line */
@@ -655,6 +661,21 @@ us_vhost_parse_args(int argc, char **argv)
 				async_vhost_driver = 1;
 			}
 
+
+			if (!strncmp(long_option[option_index].name,
+						"total-num-mbufs", MAX_LONG_OPT_SZ)) {
+				ret = parse_num_opt(optarg, INT32_MAX);
+				if (ret == -1) {
+					RTE_LOG(INFO, VHOST_CONFIG,
+						"Invalid argument for total-num-mbufs [0..N]\n");
+					us_vhost_usage(prgname);
+					return -1;
+				}
+
+				if (total_num_mbufs < ret)
+					total_num_mbufs = ret;
+			}
+
 			break;
 
 			/* Invalid option - print options. */
@@ -1443,57 +1464,6 @@ sigint_handler(__rte_unused int signum)
 	exit(0);
 }
 
-/*
- * While creating an mbuf pool, one key thing is to figure out how
- * many mbuf entries is enough for our use. FYI, here are some
- * guidelines:
- *
- * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
- *
- * - For each switch core (A CPU core does the packet switch), we need
- *   also make some reservation for receiving the packets from virtio
- *   Tx queue. How many is enough depends on the usage. It's normally
- *   a simple calculation like following:
- *
- *       MAX_PKT_BURST * max packet size / mbuf size
- *
- *   So, we definitely need allocate more mbufs when TSO is enabled.
- *
- * - Similarly, for each switching core, we should serve @nr_rx_desc
- *   mbufs for receiving the packets from physical NIC device.
- *
- * - We also need make sure, for each switch core, we have allocated
- *   enough mbufs to fill up the mbuf cache.
- */
-static void
-create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
-	uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
-{
-	uint32_t nr_mbufs;
-	uint32_t nr_mbufs_per_core;
-	uint32_t mtu = 1500;
-
-	if (mergeable)
-		mtu = 9000;
-	if (enable_tso)
-		mtu = 64 * 1024;
-
-	nr_mbufs_per_core  = (mtu + mbuf_size) * MAX_PKT_BURST /
-			(mbuf_size - RTE_PKTMBUF_HEADROOM);
-	nr_mbufs_per_core += nr_rx_desc;
-	nr_mbufs_per_core  = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
-
-	nr_mbufs  = nr_queues * nr_rx_desc;
-	nr_mbufs += nr_mbufs_per_core * nr_switch_core;
-	nr_mbufs *= nr_port;
-
-	mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
-					    nr_mbuf_cache, 0, mbuf_size,
-					    rte_socket_id());
-	if (mbuf_pool == NULL)
-		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
-}
-
 /*
  * Main function, does initialisation and calls the per-lcore functions.
  */
@@ -1552,8 +1522,11 @@ main(int argc, char *argv[])
 	 * many queues here. We probably should only do allocation for
 	 * those queues we are going to use.
 	 */
-	create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
-			 MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
+	mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", total_num_mbufs,
+					    MBUF_CACHE_SIZE, 0, MBUF_DATA_SIZE,
+					    rte_socket_id());
+	if (mbuf_pool == NULL)
+		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
 
 	if (vm2vm_mode == VM2VM_HARDWARE) {
 		/* Enable VT loop back to let L2 switch to do it. */
-- 
2.30.2

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2022-03-09 16:30:09.245866424 +0000
+++ 0019-examples-vhost-fix-launch-with-physical-port.patch	2022-03-09 16:30:08.547025154 +0000
@@ -1 +1 @@
-From 917229c24e871bbc3225a0227eb3f0faaa7aaa69 Mon Sep 17 00:00:00 2001
+From 80b49318e6d4100cee8f9be4f6d0385f079f5ad2 Mon Sep 17 00:00:00 2001
@@ -5,0 +6,2 @@
+[ upstream commit 917229c24e871bbc3225a0227eb3f0faaa7aaa69 ]
+
@@ -12 +13,0 @@
-Cc: stable at dpdk.org
@@ -18 +19 @@
- 1 file changed, 29 insertions(+), 54 deletions(-)
+ 1 file changed, 28 insertions(+), 55 deletions(-)
@@ -21 +22 @@
-index 68afd398bb..d94fabb060 100644
+index e05a270a2d..fc07b31b2e 100644
@@ -33,3 +34,3 @@
-@@ -61,6 +63,9 @@
- 
- #define DMA_RING_SIZE 4096
+@@ -60,6 +62,9 @@
+ /* Maximum long option length for option parsing. */
+ #define MAX_LONG_OPT_SZ 64
@@ -40,5 +41,4 @@
- struct dma_for_vhost dma_bind[RTE_MAX_VHOST_DEVICE];
- int16_t dmas_id[RTE_DMADEV_DEFAULT_MAX];
- static int dma_count;
-@@ -608,7 +613,8 @@ us_vhost_usage(const char *prgname)
- 	"		--tx-csum [0|1] disable/enable TX checksum offload.\n"
+ /* mask of enabled ports */
+ static uint32_t enabled_port_mask = 0;
+ 
+@@ -463,7 +468,8 @@ us_vhost_usage(const char *prgname)
@@ -46,0 +47 @@
+ 	"		--dma-type register dma type for your vhost async driver. For example \"ioat\" for now.\n"
@@ -53,16 +54,6 @@
-@@ -637,6 +643,8 @@ enum {
- 	OPT_BUILTIN_NET_DRIVER_NUM,
- #define OPT_DMAS                "dmas"
- 	OPT_DMAS_NUM,
-+#define OPT_NUM_MBUFS           "total-num-mbufs"
-+	OPT_NUM_MBUFS_NUM,
- };
- 
- /*
-@@ -674,6 +682,8 @@ us_vhost_parse_args(int argc, char **argv)
- 				NULL, OPT_BUILTIN_NET_DRIVER_NUM},
- 		{OPT_DMAS, required_argument,
- 				NULL, OPT_DMAS_NUM},
-+		{OPT_NUM_MBUFS, required_argument,
-+				NULL, OPT_NUM_MBUFS_NUM},
- 		{NULL, 0, 0, 0},
+@@ -491,7 +497,7 @@ us_vhost_parse_args(int argc, char **argv)
+ 		{"builtin-net-driver", no_argument, &builtin_net_driver, 1},
+ 		{"dma-type", required_argument, NULL, 0},
+ 		{"dmas", required_argument, NULL, 0},
+-		{NULL, 0, 0, 0},
++		{"total-num-mbufs", required_argument, NULL, 0},
@@ -71 +62,3 @@
-@@ -801,6 +811,19 @@ us_vhost_parse_args(int argc, char **argv)
+ 	/* Parse command line */
+@@ -655,6 +661,21 @@ us_vhost_parse_args(int argc, char **argv)
+ 				async_vhost_driver = 1;
@@ -73 +65,0 @@
- 			break;
@@ -75,8 +66,0 @@
-+		case OPT_NUM_MBUFS_NUM:
-+			ret = parse_num_opt(optarg, INT32_MAX);
-+			if (ret == -1) {
-+				RTE_LOG(INFO, VHOST_CONFIG,
-+					"Invalid argument for total-num-mbufs [0..N]\n");
-+				us_vhost_usage(prgname);
-+				return -1;
-+			}
@@ -84,3 +68,13 @@
-+			if (total_num_mbufs < ret)
-+				total_num_mbufs = ret;
-+			break;
++			if (!strncmp(long_option[option_index].name,
++						"total-num-mbufs", MAX_LONG_OPT_SZ)) {
++				ret = parse_num_opt(optarg, INT32_MAX);
++				if (ret == -1) {
++					RTE_LOG(INFO, VHOST_CONFIG,
++						"Invalid argument for total-num-mbufs [0..N]\n");
++					us_vhost_usage(prgname);
++					return -1;
++				}
++
++				if (total_num_mbufs < ret)
++					total_num_mbufs = ret;
++			}
@@ -88,2 +81,0 @@
- 		case OPT_CLIENT_NUM:
- 			client_mode = 1;
@@ -91 +83,3 @@
-@@ -1730,57 +1753,6 @@ sigint_handler(__rte_unused int signum)
+ 
+ 			/* Invalid option - print options. */
+@@ -1443,57 +1464,6 @@ sigint_handler(__rte_unused int signum)
@@ -146,4 +140,4 @@
- static void
- reset_dma(void)
- {
-@@ -1860,8 +1832,11 @@ main(int argc, char *argv[])
+ /*
+  * Main function, does initialisation and calls the per-lcore functions.
+  */
+@@ -1552,8 +1522,11 @@ main(int argc, char *argv[])


More information about the stable mailing list