[v6,1/6] ethdev: allocate max space for internal queue array
Checks
Commit Message
At queue configure stage always allocate space for maximum possible
number (RTE_MAX_QUEUES_PER_PORT) of queue pointers.
That will allow 'fast' inline functions (eth_rx_burst, etc.) to refer
pointer to internal queue data without extra checking of current number
of configured queues.
That would help in future to hide rte_eth_dev and related structures.
It means that from now on, each ethdev port will always consume:
((2*sizeof(uintptr_t))* RTE_MAX_QUEUES_PER_PORT)
bytes of memory for its queue pointers.
With RTE_MAX_QUEUES_PER_PORT==1024 (default value) it is 16KB per port.
Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
lib/ethdev/rte_ethdev.c | 38 +++++---------------------------------
1 file changed, 5 insertions(+), 33 deletions(-)
Comments
On 10/13/21 4:36 PM, Konstantin Ananyev wrote:
> At queue configure stage always allocate space for maximum possible
> number (RTE_MAX_QUEUES_PER_PORT) of queue pointers.
> That will allow 'fast' inline functions (eth_rx_burst, etc.) to refer
> pointer to internal queue data without extra checking of current number
> of configured queues.
> That would help in future to hide rte_eth_dev and related structures.
> It means that from now on, each ethdev port will always consume:
> ((2*sizeof(uintptr_t))* RTE_MAX_QUEUES_PER_PORT)
> bytes of memory for its queue pointers.
> With RTE_MAX_QUEUES_PER_PORT==1024 (default value) it is 16KB per port.
>
> Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
@@ -919,12 +919,12 @@ static int
eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
{
uint16_t old_nb_queues = dev->data->nb_rx_queues;
- void **rxq;
unsigned i;
if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
- sizeof(dev->data->rx_queues[0]) * nb_queues,
+ sizeof(dev->data->rx_queues[0]) *
+ RTE_MAX_QUEUES_PER_PORT,
RTE_CACHE_LINE_SIZE);
if (dev->data->rx_queues == NULL) {
dev->data->nb_rx_queues = 0;
@@ -934,20 +934,6 @@ eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
for (i = nb_queues; i < old_nb_queues; i++)
eth_dev_rxq_release(dev, i);
- rxq = dev->data->rx_queues;
- rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
- RTE_CACHE_LINE_SIZE);
- if (rxq == NULL)
- return -(ENOMEM);
- if (nb_queues > old_nb_queues) {
- uint16_t new_qs = nb_queues - old_nb_queues;
-
- memset(rxq + old_nb_queues, 0,
- sizeof(rxq[0]) * new_qs);
- }
-
- dev->data->rx_queues = rxq;
-
} else if (dev->data->rx_queues != NULL && nb_queues == 0) {
for (i = nb_queues; i < old_nb_queues; i++)
eth_dev_rxq_release(dev, i);
@@ -1153,13 +1139,13 @@ static int
eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
{
uint16_t old_nb_queues = dev->data->nb_tx_queues;
- void **txq;
unsigned i;
if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
- sizeof(dev->data->tx_queues[0]) * nb_queues,
- RTE_CACHE_LINE_SIZE);
+ sizeof(dev->data->tx_queues[0]) *
+ RTE_MAX_QUEUES_PER_PORT,
+ RTE_CACHE_LINE_SIZE);
if (dev->data->tx_queues == NULL) {
dev->data->nb_tx_queues = 0;
return -(ENOMEM);
@@ -1168,20 +1154,6 @@ eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
for (i = nb_queues; i < old_nb_queues; i++)
eth_dev_txq_release(dev, i);
- txq = dev->data->tx_queues;
- txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
- RTE_CACHE_LINE_SIZE);
- if (txq == NULL)
- return -ENOMEM;
- if (nb_queues > old_nb_queues) {
- uint16_t new_qs = nb_queues - old_nb_queues;
-
- memset(txq + old_nb_queues, 0,
- sizeof(txq[0]) * new_qs);
- }
-
- dev->data->tx_queues = txq;
-
} else if (dev->data->tx_queues != NULL && nb_queues == 0) {
for (i = nb_queues; i < old_nb_queues; i++)
eth_dev_txq_release(dev, i);