[dpdk-dev] [PATCH v7] net/i40e: determine number of queues per VF during run time

Peng, Yuan yuan.peng at intel.com
Mon Dec 11 03:33:39 CET 2017


Tested-by: Peng,Yuan<yuan.peng at intel.com>

- Tested Branch: dpdk master
- Tested Commit: 224374cc0e3ca44af5141fb7035a97f338d00c18
- OS: 4.5.5-300.fc24.x86_64
- GCC: gcc (GCC) 5.3.1 20151207 (Red Hat 5.3.1-2)
- CPU: Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
- NIC: X710 for 10GbE SFP+ [8086:1572]
- Default x86_64-native-linuxapp-gcc configuration
- Prerequisites:
- Total 7 cases, 7 passed, 0 failed

- Prerequisites command / instruction:
 1.bind the pf port to dpdk driver::
    ./usertools/dpdk-devbind.py -b igb_uio 05:00.0
 2. set up two vfs from the pf with DPDK driver::
    echo 2 > /sys/bus/pci/devices/0000\:05\:00.0/max_vfs
   bind the two vfs to DPDK driver::
    ./usertools/dpdk-devbind.py -b vfio-pci 05:02.0 05:02.1

- Case: 
Test case 1: set valid VF max queue number
==========================================
1. try the valid values 1::
    ./x86_64-native-linuxapp-gcc/app/testpmd -c f -n 4 \
    -w 05:00.0,queue-num-per-vf=1 --file-prefix=test1 \
    --socket-mem 1024,1024 -- -i
   testpmd can be started normally without any wrong or error.
2. start VF testpmd with "--rxq=1 --txq=1", the number of rxq and txq is
   consistent with the configured VF max queue number::
    ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf0 -n 4 -w 05:02.0 \
    --file-prefix=test2 --socket-mem 1024,1024 -- -i --rxq=1 --txq=1
   check the Max possible RX queues and TX queues is 1::
   start forwarding, you can see the actual queue number is 1::
3. repeat step1-2 with "queue-num-per-vf=2/4/8/16", and start VF testpmd
   with consistent rxq and txq number. check the max queue num and actual
   queue number is 2/4/8/16.

Test case 2: set invalid VF max queue number
============================================
1. try the invalid value 0::
    ./x86_64-native-linuxapp-gcc/app/testpmd -c f -n 4 \
    -w 05:00.0,queue-num-per-vf=0 --file-prefix=test1 \
    --socket-mem 1024,1024 -- -i
   testpmd started with "i40e_pf_parse_vf_queue_number_handler(): Wrong
   VF queue number = 0, it must be power of 2 and equal or less than 16 !,
   Now it is kept the value = 4"
2. start VF testpmd with "--rxq=4 --txq=4", the number of rxq and txq is
   consistent with the default VF max queue number::
   check the Max possible RX queues and TX queues is 4::
   start forwarding, you can see the actual queue number is 4::
3. repeat step1-2 with "queue-num-per-vf=6/17/32", and start VF testpmd
   with default max rxq and txq number. check the max queue num and actual
   queue number is 4.

Test case 3: set VF queue number in testpmd command-line options
================================================================
1. set VF max queue number by PF::
    ./x86_64-native-linuxapp-gcc/app/testpmd -c f -n 4 \
    -w 05:00.0,queue-num-per-vf=8 --file-prefix=test1 \
    --socket-mem 1024,1024 -- -i
2. start VF testpmd with "--rxq=3 --txq=3"::
   check the Max possible RX queues and TX queues is 8::
   start forwarding, you can see the actual queue number is 3::
3. quit the VF testpmd, then restart VF testpmd with "--rxq=9 --txq=9"::
   VF testpmd failed to start with the print::
    Fail: nb_rxq(9) is greater than max_rx_queues(8)

Test case 4: set VF queue number with testpmd function command
==============================================================
1. set VF max queue number by PF::
    ./x86_64-native-linuxapp-gcc/app/testpmd -c f -n 4 \
    -w 05:00.0,queue-num-per-vf=8 --file-prefix=test1 \
    --socket-mem 1024,1024 -- -i
2. start VF testpmd without setting "rxq" and "txq"::
   check the Max possible RX queues and TX queues is 8::
   start forwarding, you can see the actual queue number is 1::
3. set rx queue number and tx queue number with testpmd function command::
    testpmd> port config all rxq 8
    testpmd> port config all txq 8
   start forwarding, you can see the actual queue number is 8::
4. reset rx queue number and tx queue number to 9::
    testpmd> port config all txq 9
    Fail: nb_txq(9) is greater than max_tx_queues(8)
    testpmd> port config all rxq 9
    Fail: nb_rxq(9) is greater than max_rx_queues(8)
  Failed to set it.

Test case 5: VF max queue number when VF bound to kernel driver
===============================================================
1. set VF max queue number to 2 by PF::
2. check the VF0 and VF1 rxq and txq number is 2::
    # ethtool -S enp5s2
3. repeat step1-2 with "queue-num-per-vf=1/4/8/16", check the rxq and txq
   number is 1/4/8/16.

Test case 6: set VF max queue number with 32 VFs on one PF port
===============================================================
1. set up 32 VFs from one PF with DPDK driver::
    echo 32 > /sys/bus/pci/devices/0000\:05\:00.0/max_vfs
   bind the two of the VFs to DPDK driver::
    ./usertools/dpdk-devbind.py -b vfio-pci 05:02.0 05:05.7
2. set VF max queue number to 16 by PF::
    ./x86_64-native-linuxapp-gcc/app/testpmd -c f -n 4 \
    -w 05:00.0,queue-num-per-vf=16 --file-prefix=test1 \
    --socket-mem 1024,1024 -- -i
   PF port failed to started with "i40e_pf_parameter_init():
   Failed to allocate 577 queues, which exceeds the hardware maximum 384"
3. set VF max queue number to 8 by PF::
   testpmd can be started normally without any wrong or error.
4. start the two VFs testpmd with "--rxq=8 --txq=8" and "--rxq=6 --txq=6"::
    ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf0 -n 4 -w 05:02.0 \
    --file-prefix=test2 --socket-mem 1024,1024 -- -i --rxq=8 --txq=8
    ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf00 -n 4 -w 05:05.7 \
    --file-prefix=test3 --socket-mem 1024,1024 -- -i --rxq=6 --txq=6
   check the Max possible RX queues and TX queues of the two VFs are both 8::
   start forwarding, you can see the actual queue number
   VF0 is 8, VF1 is 6.

Test case 7: pass through VF to VM
==================================
1. bind the pf to dpdk driver, then create 1 vf from pf,
   Detach VF from the host, bind VF to pci-stub driver,
   Lauch the VM with the VF PCI passthrough::
    taskset -c 5-20 qemu-system-x86_64 \
    -enable-kvm -m 8192 -smp cores=16,sockets=1 -cpu host -name dpdk1-vm1 \
    -drive file=/home/VM/ubuntu-14.04.img \
    -device pci-assign,host=0000:05:02.0 \
    -netdev tap,id=ipvm1,ifname=tap3,script=/etc/qemu-ifup -device rtl8139,netdev=ipvm1,id=net0,mac=00:00:00:00:00:01 \
    -localtime -vnc :2 -daemonize
2. set VF Max possible RX queues and TX queues to 8 by PF::
    ./x86_64-native-linuxapp-gcc/app/testpmd -c f -n 4 \
    -w 05:00.0,queue-num-per-vf=8 --file-prefix=test1 \
    --socket-mem 1024,1024 -- -i
   testpmd can be started normally without any wrong or error.
3. start VF testpmd with "--rxq=6 --txq=6", the number of rxq and txq is
   consistent with the configured VF max queue number,
   check the Max possible RX queues and TX queues is 8,
   start forwarding, you can see the actual queue number is 6.
   modify the queue number of VF::
    testpmd> port config all rxq 8
    testpmd> port config all txq 8
   start forwarding, you can see the VF1 actual queue number is 8.
4. repeat step2-3 with "queue-num-per-vf=1/2/4/16", and start VF testpmd
   with consistent rxq and txq number. check the max queue num and actual
   queue number is 1/2/4/16.
5. bind VF to kernel driver i40evf, check the rxq and txq number.
   if set VF Max possible RX queues and TX queues to 2 by PF,
   the VF rxq and txq number is 2::
    #ethtool -S eth0
   try to set VF Max possible RX queues and TX queues to 1/4/8/16 by PF,
   the VF rxq and txq number is 1/4/8/16::

                                                                         
- Case: 
  Description: l2fwd_crypto with SHA224_HMAC authentication test
  Command / instruction:
    Start l2fwd_crypto with QAT technology enable.
    Authentication method is SHA224_HMAC, auth key is also inputted in.
    Authentication key length for SHA224_HMAC should be 64 bytes.
      ./examples/l2fwd-crypto/build/app/l2fwd-crypto -c0xf -n4 -- -p0x1 \
        --chain HASH_ONLY --cdev_type ANY --auth_algo SHA224_HMAC \
        --auth_op GENERATE --auth_key $auth_key

    Send 65 packets with random 64 bytes payload and capture forwarded packets.
    Check all forwarded packets contained of 28 bytes hashed value.
    Check hash values are same as automatic calculated value.

-----Original Message-----
From: Dai, Wei 
Sent: Friday, December 8, 2017 9:54 AM
To: Ananyev, Konstantin <konstantin.ananyev at intel.com>; Wu, Jingjing <jingjing.wu at intel.com>; Xing, Beilei <beilei.xing at intel.com>; Peng, Yuan <yuan.peng at intel.com>
Cc: dev at dpdk.org; Dai, Wei <wei.dai at intel.com>
Subject: [PATCH v7] net/i40e: determine number of queues per VF during run time

Without this patch, the number of queues per i40e  VF is defined as 4 by CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF=4 in config/common_base.
It is fixed value determined in building time and can't be changed during run time.
With this patch, the number of queues per i40e VF can be determinated during run time. For example, if the PCI address of an i40e PF is aaaa:bb.cc, with the EAL parameter -w aaaa:bb.cc,queue-num-per-vf=8 , the number of queues per VF created from this PF is 8.
If there is no "queue-num-per-vf" setting in EAL parameters, it is 4 by default as before. And if the value after the "queue-num-per-vf"
is invalid, it is set as 4 forcibly. The valid values include 1, 2, 4, 8, 16 .

Signed-off-by: Wei Dai <wei.dai at intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev at intel.com>

---
v7:
    use the macro instead of natural number
    correct git log message as the EAL parameter is only valid for PF
v6:
    fix a small bug when detecting end character of strtoul
v5:
    fix git log message and WARNING of coding stype
v4:
    use rte_kvargs instead of pervious parsing function;
    use malloc/free instead of rte_zmalloc/rte_free.
v3:
    fix WARNING of coding style issues from checkpatch at dpdk.org
v2:
    fix WARNING of coding style issues from checkpatch at dpdk.org
---
 config/common_base             |  1 -
 drivers/net/i40e/i40e_ethdev.c | 67 ++++++++++++++++++++++++++++++++++++++++--
 2 files changed, 65 insertions(+), 3 deletions(-)

diff --git a/config/common_base b/config/common_base index e74febe..4e20389 100644
--- a/config/common_base
+++ b/config/common_base
@@ -208,7 +208,6 @@ CONFIG_RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC=y
 CONFIG_RTE_LIBRTE_I40E_INC_VECTOR=y
 CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n
 CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF=64
-CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF=4
 CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM=4
 # interval up to 8160 us, aligned to 2 (or default value)
 CONFIG_RTE_LIBRTE_I40E_ITR_INTERVAL=-1
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 811cc9f..9295e1b 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3971,6 +3971,67 @@ i40e_get_cap(struct i40e_hw *hw)
 	return ret;
 }
 
+#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF	4
+#define QUEUE_NUM_PER_VF_ARG			"queue-num-per-vf"
+static int i40e_pf_parse_vf_queue_number_handler(const char *key,
+		const char *value,
+		void *opaque)
+{
+	struct i40e_pf *pf;
+	unsigned long num;
+	char *end;
+
+	pf = (struct i40e_pf *)opaque;
+	RTE_SET_USED(key);
+
+	errno = 0;
+	num = strtoul(value, &end, 0);
+	if (errno != 0 || end == value || *end != 0) {
+		PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
+			    "kept the value = %hu", value, pf->vf_nb_qp_max);
+		return -(EINVAL);
+	}
+
+	if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
+		pf->vf_nb_qp_max = (uint16_t)num;
+	else
+		/* here return 0 to make next valid same argument work */
+		PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
+			    "power of 2 and equal or less than 16 !, Now it is "
+			    "kept the value = %hu", num, pf->vf_nb_qp_max);
+
+	return 0;
+}
+
+static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev) {
+	static const char * const valid_keys[] = {QUEUE_NUM_PER_VF_ARG, ""};
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_kvargs *kvlist;
+
+	/* set default queue number per VF as 4 */
+	pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
+
+	if (dev->device->devargs == NULL)
+		return 0;
+
+	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
+	if (kvlist == NULL)
+		return -(EINVAL);
+
+	if (rte_kvargs_count(kvlist, QUEUE_NUM_PER_VF_ARG) > 1)
+		PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
+			    "the first invalid or last valid one is used !",
+			    QUEUE_NUM_PER_VF_ARG);
+
+	rte_kvargs_process(kvlist, QUEUE_NUM_PER_VF_ARG,
+			   i40e_pf_parse_vf_queue_number_handler, pf);
+
+	rte_kvargs_free(kvlist);
+
+	return 0;
+}
+
 static int
 i40e_pf_parameter_init(struct rte_eth_dev *dev)  { @@ -3983,6 +4044,9 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
 		PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
 		return -EINVAL;
 	}
+
+	i40e_pf_config_vf_rxq_number(dev);
+
 	/* Add the parameter init for LFC */
 	pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
 	pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER; @@ -3992,7 +4056,6 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
 	pf->max_num_vsi = hw->func_caps.num_vsis;
 	pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
 	pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
-	pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
 
 	/* FDir queue/VSI allocation */
 	pf->fdir_qp_offset = 0;
@@ -4022,7 +4085,7 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
 	pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
 	if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
 		pf->flags |= I40E_FLAG_SRIOV;
-		pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
+		pf->vf_nb_qps = pf->vf_nb_qp_max;
 		pf->vf_num = pci_dev->max_vfs;
 		PMD_DRV_LOG(DEBUG,
 			"%u VF VSIs, %u queues per VF VSI, in total %u queues",
--
2.7.5



More information about the dev mailing list