@@ -112,19 +112,6 @@ Deprecation Notices
The new API add rss_level field to ``rte_eth_rss_conf`` to enable a choice
of RSS hash calculation on outer or inner header of tunneled packet.
-* ethdev: Currently, if the rte_eth_rx_burst() function returns a value less
- than *nb_pkts*, the application will assume that no more packets are present.
- Some of the hw queue based hardware can only support smaller burst for RX
- and TX and thus break the expectation of the rx_burst API. Similar is the
- case for TX burst as well as ring sizes. ``rte_eth_dev_info`` will be added
- with following new parameters so as to support semantics for drivers to
- define a preferred size for Rx/Tx burst and rings.
-
- - Member ``struct preferred_size`` would be added to enclose all preferred
- size to be fetched from driver/implementation.
- - Members ``uint16_t rx_burst``, ``uint16_t tx_burst``, ``uint16_t rx_ring``,
- and ``uint16_t tx_ring`` would be added to ``struct preferred_size``.
-
* ethdev: A work is being planned for 18.05 to expose VF port representors
as a mean to perform control and data path operation on the different VFs.
As VF representor is an ethdev port, new fields are needed in order to map
@@ -58,6 +58,11 @@ New Features
* Added support for NVGRE, VXLAN and GENEVE filters in flow API.
* Added support for DROP action in flow API.
+* **Added PMD-recommended Tx and Rx parameters**
+
+ Applications can now query drivers for device-tuned values of
+ ring sizes, burst sizes, and number of queues.
+
API Changes
-----------
@@ -72,6 +77,29 @@ API Changes
Also, make sure to start the actual text at the margin.
=========================================================
+* **Changes to semantics of rte_eth_dev_configure() parameters.**
+
+ If both the ``nb_rx_q`` and ``nb_tx_q`` parameters are zero,
+ ``rte_eth_dev_configure`` will now use PMD-recommended queue sizes, or if
+ recommendations are not provided by the PMD the function will use ethdev
+ fall-back values. Previously setting both of the parameters to zero would
+ have resulted in ``-EINVAL`` being returned.
+
+* **Changes to semantics of rte_eth_rx_queue_setup() parameters.**
+
+ If the ``nb_rx_desc`` parameter is zero, ``rte_eth_rx_queue_setup`` will
+ now use the PMD-recommended Rx ring size, or in the case where the PMD
+ does not provide a recommendation, will use an ethdev-provided
+ fall-back value. Previously, setting ``nb_rx_desc`` to zero would have
+ resulted in an error.
+
+* **Changes to semantics of rte_eth_tx_queue_setup() parameters.**
+
+ If the ``nb_tx_desc`` parameter is zero, ``rte_eth_tx_queue_setup`` will
+ now use the PMD-recommended Tx ring size, or in the case where the PMD
+ does not provide a recoomendation, will use an ethdev-provided
+ fall-back value. Previously, setting ``nb_tx_desc`` to zero would have
+ resulted in an error.
ABI Changes
-----------
@@ -86,6 +114,13 @@ ABI Changes
Also, make sure to start the actual text at the margin.
=========================================================
+* **Additional fields in rte_eth_dev_info.**
+
+ The ``rte_eth_dev_info`` structure has had two extra entries appended to the
+ end of it: ``default_rxportconf`` and ``default_txportconf``. Each of these
+ in turn are ``rte_eth_dev_portconf`` structures containing three fields of
+ type ``uint16_t``: ``burst_size``, ``ring_size``, and ``nb_queues``. These
+ are parameter values recommended for use by the PMD.
Removed Items
-------------
@@ -1061,6 +1061,26 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+ (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
+
+ /* If number of queues specified by application for both Rx and Tx is
+ * zero, use driver preferred values. This cannot be done individually
+ * as it is valid for either Tx or Rx (but not both) to be zero.
+ * If driver does not provide any preferred valued, fall back on
+ * EAL defaults.
+ */
+ if (nb_rx_q == 0 && nb_tx_q == 0) {
+ nb_rx_q = dev_info.default_rxportconf.nb_queues;
+ if (nb_rx_q == 0)
+ nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
+ nb_tx_q = dev_info.default_txportconf.nb_queues;
+ if (nb_tx_q == 0)
+ nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
+ }
+
if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
RTE_PMD_DEBUG_TRACE(
"Number of RX queues requested (%u) is greater than max supported(%d)\n",
@@ -1075,8 +1095,6 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
return -EINVAL;
}
- dev = &rte_eth_devices[port_id];
-
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
@@ -1106,13 +1124,6 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
* than the maximum number of RX and TX queues supported by the
* configured device.
*/
- (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
-
- if (nb_rx_q == 0 && nb_tx_q == 0) {
- RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
- return -EINVAL;
- }
-
if (nb_rx_q > dev_info.max_rx_queues) {
RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
port_id, nb_rx_q, dev_info.max_rx_queues);
@@ -1477,6 +1488,14 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
return -EINVAL;
}
+ /* Use default specified by driver, if nb_rx_desc is zero */
+ if (nb_rx_desc == 0) {
+ nb_rx_desc = dev_info.default_rxportconf.ring_size;
+ /* If driver default is also zero, fall back on EAL default */
+ if (nb_rx_desc == 0)
+ nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
+ }
+
if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
@@ -1600,6 +1619,13 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
rte_eth_dev_info_get(port_id, &dev_info);
+ /* Use default specified by driver, if nb_tx_desc is zero */
+ if (nb_tx_desc == 0) {
+ nb_tx_desc = dev_info.default_txportconf.ring_size;
+ /* If driver default is zero, fall back on EAL default */
+ if (nb_tx_desc == 0)
+ nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
+ }
if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
@@ -988,6 +988,27 @@ struct rte_eth_conf {
struct rte_pci_device;
+/*
+ * Fallback default preferred Rx/Tx port parameters.
+ * These are used if an application requests default parameters
+ * but the PMD does not provide preferred values.
+ */
+#define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
+#define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
+#define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
+#define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
+
+/**
+ * Preferred Rx/Tx port parameters.
+ * There are separate instances of this structure for transmission
+ * and reception respectively.
+ */
+struct rte_eth_dev_portconf {
+ uint16_t burst_size; /**< Device-preferred burst size */
+ uint16_t ring_size; /**< Device-preferred size of queue rings */
+ uint16_t nb_queues; /**< Device-preferred number of queues */
+};
+
/**
* Ethernet device information
*/
@@ -1029,6 +1050,10 @@ struct rte_eth_dev_info {
/** Configured number of rx/tx queues */
uint16_t nb_rx_queues; /**< Number of RX queues. */
uint16_t nb_tx_queues; /**< Number of TX queues. */
+ /** Rx parameter recommendations */
+ struct rte_eth_dev_portconf default_rxportconf;
+ /** Tx parameter recommendations */
+ struct rte_eth_dev_portconf default_txportconf;
};
/**