[dpdk-dev] [PATCH v2] net/af_packet: remove limitation on number of qpairs

John W. Linville linville at tuxdriver.com
Fri Feb 28 17:52:34 CET 2020


On Fri, Feb 28, 2020 at 10:08:43AM +0000, Ferruh Yigit wrote:
> On 2/27/2020 8:00 PM, Stephen Hemminger wrote:
> > Since qpairs is part of the vdev arguments, there is no need to
> > limit it to 16. The queue arrays can be dynamically sized based
> > on the requested parameters.
> > 
> > Signed-off-by: Stephen Hemminger <stephen at networkplumber.org>
> > ---
> >  drivers/net/af_packet/rte_eth_af_packet.c | 23 +++++++++++++++++------
> >  1 file changed, 17 insertions(+), 6 deletions(-)
> > 
> > diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
> > index f5806bf42c46..e5e0aa9277a8 100644
> > --- a/drivers/net/af_packet/rte_eth_af_packet.c
> > +++ b/drivers/net/af_packet/rte_eth_af_packet.c
> > @@ -37,8 +37,6 @@
> >  #define DFLT_FRAME_SIZE		(1 << 11)
> >  #define DFLT_FRAME_COUNT	(1 << 9)
> >  
> > -#define RTE_PMD_AF_PACKET_MAX_RINGS 16
> > -
> >  struct pkt_rx_queue {
> >  	int sockfd;
> >  
> > @@ -77,8 +75,8 @@ struct pmd_internals {
> >  
> >  	struct tpacket_req req;
> >  
> > -	struct pkt_rx_queue rx_queue[RTE_PMD_AF_PACKET_MAX_RINGS];
> > -	struct pkt_tx_queue tx_queue[RTE_PMD_AF_PACKET_MAX_RINGS];
> > +	struct pkt_rx_queue *rx_queue;
> > +	struct pkt_tx_queue *tx_queue;
> >  };
> >  
> >  static const char *valid_arguments[] = {
> > @@ -601,6 +599,18 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
> >  	if (*internals == NULL)
> >  		return -1;
> >  
> > +
> > +	(*internals)->rx_queue = rte_calloc_socket("af_packet_rx",
> > +						nb_queues,
> > +						sizeof(struct pkt_rx_queue),
> > +						0, numa_node);
> > +	(*internals)->tx_queue = rte_calloc_socket("af_packet_tx",
> > +						nb_queues,
> > +						sizeof(struct pkt_tx_queue),
> > +						0, numa_node);
> 
> Not for this patch but right now all queue initialization done during init based
> on max queue PMD can support, we may move allocating and configuring queues in
> 'eth_rx_queue_setup' & 'eth_tx_queue_setup' based on number of queue application
> request, in the future...
> 
> > +	if (!(*internals)->rx_queue || !(*internals)->tx_queue)
> > +		return -1;
> 
> If only one allocation fails, should we free the other?

Yeah, good catch.

> > +
> >  	for (q = 0; q < nb_queues; q++) {
> >  		(*internals)->rx_queue[q].map = MAP_FAILED;
> >  		(*internals)->tx_queue[q].map = MAP_FAILED;
> > @@ -846,8 +856,7 @@ rte_eth_from_packet(struct rte_vdev_device *dev,
> >  		pair = &kvlist->pairs[k_idx];
> >  		if (strstr(pair->key, ETH_AF_PACKET_NUM_Q_ARG) != NULL) {
> >  			qpairs = atoi(pair->value);
> > -			if (qpairs < 1 ||
> > -			    qpairs > RTE_PMD_AF_PACKET_MAX_RINGS) {
> > +			if (qpairs < 1) {
> >  				PMD_LOG(ERR,
> >  					"%s: invalid qpairs value",
> >  				        name);
> > @@ -1019,6 +1028,8 @@ rte_pmd_af_packet_remove(struct rte_vdev_device *dev)
> >  		rte_free(internals->tx_queue[q].rd);
> >  	}
> >  	free(internals->if_name);
> > +	rte_free(internals->rx_queue);
> > +	rte_free(internals->tx_queue);
> >  
> >  	rte_eth_dev_release_port(eth_dev);
> >  
> > 
> 
> 

-- 
John W. Linville		Someday the world will need a hero, and you
linville at tuxdriver.com			might be all we have.  Be ready.


More information about the dev mailing list