[dpdk-dev,v4,3/3] net/i40e: enable runtime queue setup
Checks
Commit Message
Expose the runtime queue configuration capability and enhance
i40e_dev_[rx|tx]_queue_setup to handle the situation when
device already started.
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
v4:
- fix rx/tx conflict check.
- no need conflict check for first rx/tx queue at runtime setup.
v3:
- no queue start/stop in setup/release
- return fail when required rx/tx function conflict with
exist setup
drivers/net/i40e/i40e_ethdev.c | 4 +
drivers/net/i40e/i40e_rxtx.c | 195 ++++++++++++++++++++++++++++++++++++-----
2 files changed, 176 insertions(+), 23 deletions(-)
Comments
> +
> +static int
> +i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev,
> + struct i40e_rx_queue *rxq)
> +{
> + struct i40e_adapter *ad =
> + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> + int use_def_burst_func =
> + check_rx_burst_bulk_alloc_preconditions(rxq);
> + uint16_t buf_size =
> + (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
> + RTE_PKTMBUF_HEADROOM);
> + int use_scattered_rx =
> + ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) ?
> + 1 : 0;
As a nit:
int use_scattered_rx = ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size);
would do exactly the same.
> +
> + if (i40e_rx_queue_init(rxq) != I40E_SUCCESS) {
> + PMD_DRV_LOG(ERR,
> + "Failed to do RX queue initialization");
> + return -EINVAL;
> + }
> +
> + if (i40e_dev_first_rx_queue(dev, rxq->queue_id)) {
> + /**
> + * If it is the first queue to setup,
> + * set all flags to default and call
> + * i40e_set_rx_function.
> + */
> + ad->rx_bulk_alloc_allowed = true;
> + ad->rx_vec_allowed = true;
> + dev->data->scattered_rx = use_scattered_rx;
> + if (use_def_burst_func)
> + ad->rx_bulk_alloc_allowed = false;
> + i40e_set_rx_function(dev);
> + return 0;
> + }
> +
> + /* check bulk alloc conflict */
> + if (ad->rx_bulk_alloc_allowed && use_def_burst_func) {
> + PMD_DRV_LOG(ERR, "Can't use default burst.");
> + return -EINVAL;
> + }
> + /* check scatterred conflict */
> + if (!dev->data->scattered_rx && use_scattered_rx) {
> + PMD_DRV_LOG(ERR, "Scattered rx is required.");
> + return -EINVAL;
> + }
> + /* check vector conflict */
> + if (ad->rx_vec_allowed && i40e_rxq_vec_setup(rxq)) {
> + PMD_DRV_LOG(ERR, "Failed vector rx setup.");
> + return -EINVAL;
> + }
> +
> + return 0;
> +}
...
> +
> +static int
> +i40e_dev_first_tx_queue(struct rte_eth_dev *dev,
> + uint16_t queue_idx)
> +{
> + uint16_t i;
> +
> + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> + if (i != queue_idx && dev->data->rx_queues[i])
> + return 0;
> + }
> +
> + return 1;
> +}
I suppose it should be tx_qeueues and nb_tx_queues here.
BTW you probably can merge i40e_dev_first_tx_queue() and i40e_dev_first_rx_queue()
into one function.
Konstantin
> -----Original Message-----
> From: Ananyev, Konstantin
> Sent: Sunday, April 1, 2018 8:18 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; thomas@monjalon.net
> Cc: dev@dpdk.org; Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing
> <jingjing.wu@intel.com>; Lu, Wenzhuo <wenzhuo.lu@intel.com>
> Subject: RE: [PATCH v4 3/3] net/i40e: enable runtime queue setup
>
>
>
> > +
> > +static int
> > +i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev,
> > + struct i40e_rx_queue *rxq)
> > +{
> > + struct i40e_adapter *ad =
> > + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> > + int use_def_burst_func =
> > + check_rx_burst_bulk_alloc_preconditions(rxq);
> > + uint16_t buf_size =
> > + (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
> > + RTE_PKTMBUF_HEADROOM);
> > + int use_scattered_rx =
> > + ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) ?
> > + 1 : 0;
>
> As a nit:
> int use_scattered_rx = ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) >
> buf_size); would do exactly the same.
>
> > +
> > + if (i40e_rx_queue_init(rxq) != I40E_SUCCESS) {
> > + PMD_DRV_LOG(ERR,
> > + "Failed to do RX queue initialization");
> > + return -EINVAL;
> > + }
> > +
> > + if (i40e_dev_first_rx_queue(dev, rxq->queue_id)) {
> > + /**
> > + * If it is the first queue to setup,
> > + * set all flags to default and call
> > + * i40e_set_rx_function.
> > + */
> > + ad->rx_bulk_alloc_allowed = true;
> > + ad->rx_vec_allowed = true;
> > + dev->data->scattered_rx = use_scattered_rx;
> > + if (use_def_burst_func)
> > + ad->rx_bulk_alloc_allowed = false;
> > + i40e_set_rx_function(dev);
> > + return 0;
> > + }
> > +
> > + /* check bulk alloc conflict */
> > + if (ad->rx_bulk_alloc_allowed && use_def_burst_func) {
> > + PMD_DRV_LOG(ERR, "Can't use default burst.");
> > + return -EINVAL;
> > + }
> > + /* check scatterred conflict */
> > + if (!dev->data->scattered_rx && use_scattered_rx) {
> > + PMD_DRV_LOG(ERR, "Scattered rx is required.");
> > + return -EINVAL;
> > + }
> > + /* check vector conflict */
> > + if (ad->rx_vec_allowed && i40e_rxq_vec_setup(rxq)) {
> > + PMD_DRV_LOG(ERR, "Failed vector rx setup.");
> > + return -EINVAL;
> > + }
> > +
> > + return 0;
> > +}
>
> ...
>
> > +
> > +static int
> > +i40e_dev_first_tx_queue(struct rte_eth_dev *dev,
> > + uint16_t queue_idx)
> > +{
> > + uint16_t i;
> > +
> > + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> > + if (i != queue_idx && dev->data->rx_queues[i])
> > + return 0;
> > + }
> > +
> > + return 1;
> > +}
>
> I suppose it should be tx_qeueues and nb_tx_queues here.
> BTW you probably can merge i40e_dev_first_tx_queue() and
> i40e_dev_first_rx_queue() into one function.
Thanks for capture this, will fix.
> Konstantin
@@ -3197,6 +3197,10 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_TX_OFFLOAD_GRE_TNL_TSO |
DEV_TX_OFFLOAD_IPIP_TNL_TSO |
DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+ dev_info->runtime_queue_setup_capa =
+ DEV_RUNTIME_RX_QUEUE_SETUP |
+ DEV_RUNTIME_TX_QUEUE_SETUP;
+
dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
sizeof(uint32_t);
dev_info->reta_size = pf->hash_lut_size;
@@ -1692,6 +1692,75 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int
+i40e_dev_first_rx_queue(struct rte_eth_dev *dev,
+ uint16_t queue_idx)
+{
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (i != queue_idx && dev->data->rx_queues[i])
+ return 0;
+ }
+
+ return 1;
+}
+
+static int
+i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev,
+ struct i40e_rx_queue *rxq)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ int use_def_burst_func =
+ check_rx_burst_bulk_alloc_preconditions(rxq);
+ uint16_t buf_size =
+ (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
+ RTE_PKTMBUF_HEADROOM);
+ int use_scattered_rx =
+ ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) ?
+ 1 : 0;
+
+ if (i40e_rx_queue_init(rxq) != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Failed to do RX queue initialization");
+ return -EINVAL;
+ }
+
+ if (i40e_dev_first_rx_queue(dev, rxq->queue_id)) {
+ /**
+ * If it is the first queue to setup,
+ * set all flags to default and call
+ * i40e_set_rx_function.
+ */
+ ad->rx_bulk_alloc_allowed = true;
+ ad->rx_vec_allowed = true;
+ dev->data->scattered_rx = use_scattered_rx;
+ if (use_def_burst_func)
+ ad->rx_bulk_alloc_allowed = false;
+ i40e_set_rx_function(dev);
+ return 0;
+ }
+
+ /* check bulk alloc conflict */
+ if (ad->rx_bulk_alloc_allowed && use_def_burst_func) {
+ PMD_DRV_LOG(ERR, "Can't use default burst.");
+ return -EINVAL;
+ }
+ /* check scatterred conflict */
+ if (!dev->data->scattered_rx && use_scattered_rx) {
+ PMD_DRV_LOG(ERR, "Scattered rx is required.");
+ return -EINVAL;
+ }
+ /* check vector conflict */
+ if (ad->rx_vec_allowed && i40e_rxq_vec_setup(rxq)) {
+ PMD_DRV_LOG(ERR, "Failed vector rx setup.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int
i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1808,25 +1877,6 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
i40e_reset_rx_queue(rxq);
rxq->q_set = TRUE;
- dev->data->rx_queues[queue_idx] = rxq;
-
- use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
-
- if (!use_def_burst_func) {
-#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
- "satisfied. Rx Burst Bulk Alloc function will be "
- "used on port=%d, queue=%d.",
- rxq->port_id, rxq->queue_id);
-#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
- } else {
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
- "not satisfied, Scattered Rx is requested, "
- "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
- "not enabled on port=%d, queue=%d.",
- rxq->port_id, rxq->queue_id);
- ad->rx_bulk_alloc_allowed = false;
- }
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (!(vsi->enabled_tc & (1 << i)))
@@ -1841,6 +1891,34 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->dcb_tc = i;
}
+ if (dev->data->dev_started) {
+ if (i40e_dev_rx_queue_setup_runtime(dev, rxq)) {
+ i40e_dev_rx_queue_release(rxq);
+ return -EINVAL;
+ }
+ } else {
+ use_def_burst_func =
+ check_rx_burst_bulk_alloc_preconditions(rxq);
+ if (!use_def_burst_func) {
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ PMD_INIT_LOG(DEBUG,
+ "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function will be "
+ "used on port=%d, queue=%d.",
+ rxq->port_id, rxq->queue_id);
+#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
+ } else {
+ PMD_INIT_LOG(DEBUG,
+ "Rx Burst Bulk Alloc Preconditions are "
+ "not satisfied, Scattered Rx is requested, "
+ "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
+ "not enabled on port=%d, queue=%d.",
+ rxq->port_id, rxq->queue_id);
+ ad->rx_bulk_alloc_allowed = false;
+ }
+ }
+
+ dev->data->rx_queues[queue_idx] = rxq;
return 0;
}
@@ -1972,6 +2050,67 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
return RTE_ETH_TX_DESC_FULL;
}
+static int
+i40e_dev_first_tx_queue(struct rte_eth_dev *dev,
+ uint16_t queue_idx)
+{
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (i != queue_idx && dev->data->rx_queues[i])
+ return 0;
+ }
+
+ return 1;
+}
+
+static int
+i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev,
+ struct i40e_tx_queue *txq)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ if (i40e_tx_queue_init(txq) != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Failed to do TX queue initialization");
+ return -EINVAL;
+ }
+
+ if (i40e_dev_first_tx_queue(dev, txq->queue_id)) {
+ /**
+ * If it is the first queue to setup,
+ * set all flags to default and call
+ * i40e_set_tx_function.
+ */
+ ad->tx_simple_allowed = true;
+ ad->tx_vec_allowed = true;
+ i40e_set_tx_function_flag(dev, txq);
+ i40e_set_tx_function(dev);
+ return 0;
+ }
+
+ /* check vector conflict */
+ if (ad->tx_vec_allowed) {
+ if (txq->tx_rs_thresh > RTE_I40E_TX_MAX_FREE_BUF_SZ ||
+ i40e_txq_vec_setup(txq)) {
+ PMD_DRV_LOG(ERR, "Failed vector tx setup.");
+ return -EINVAL;
+ }
+ }
+ /* check simple tx conflict */
+ if (ad->tx_simple_allowed) {
+ if (((txq->txq_flags & I40E_SIMPLE_FLAGS) !=
+ I40E_SIMPLE_FLAGS) ||
+ (txq->tx_rs_thresh < RTE_PMD_I40E_TX_MAX_BURST)) {
+ }
+ PMD_DRV_LOG(ERR, "No-simple tx is required.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int
i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -2144,10 +2283,6 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
i40e_reset_tx_queue(txq);
txq->q_set = TRUE;
- dev->data->tx_queues[queue_idx] = txq;
-
- /* Use a simple TX queue without offloads or multi segs if possible */
- i40e_set_tx_function_flag(dev, txq);
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (!(vsi->enabled_tc & (1 << i)))
@@ -2162,6 +2297,20 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->dcb_tc = i;
}
+ if (dev->data->dev_started) {
+ if (i40e_dev_tx_queue_setup_runtime(dev, txq)) {
+ i40e_dev_tx_queue_release(txq);
+ return -EINVAL;
+ }
+ } else {
+ /**
+ * Use a simple TX queue without offloads or
+ * multi segs if possible
+ */
+ i40e_set_tx_function_flag(dev, txq);
+ }
+ dev->data->tx_queues[queue_idx] = txq;
+
return 0;
}