[dpdk-dev,RFC,4/4] ethdev: add helpers to move to the new offloads API

Message ID 1e9c5ead0fbd92a3e7fb4c76397cb3dc369eca89.1502096064.git.shahafs@mellanox.com (mailing list archive)
State RFC, archived
Headers

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail Compilation issues

Commit Message

Shahaf Shuler Aug. 7, 2017, 10:54 a.m. UTC
  A new offloads API was introduced by commits:

commit 8b07fcae6061 ("ethdev: introduce Tx queue offloads API")
commit c6504557763e ("ethdev: introduce Rx queue offloads API")

In order to enable PMDs to support only one of the APIs,
and applications to avoid branching according to the underlying device
a copy functions to/from the old/new APIs were added.

Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
---
 lib/librte_ether/rte_ethdev.c | 140 +++++++++++++++++++++++++++++++++++++
 1 file changed, 140 insertions(+)
  

Comments

Ananyev, Konstantin Aug. 23, 2017, 12:28 p.m. UTC | #1
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Shahaf Shuler
> Sent: Monday, August 7, 2017 1:55 PM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [RFC PATCH 4/4] ethdev: add helpers to move to the new offloads API
> 
> A new offloads API was introduced by commits:
> 
> commit 8b07fcae6061 ("ethdev: introduce Tx queue offloads API")
> commit c6504557763e ("ethdev: introduce Rx queue offloads API")
> 
> In order to enable PMDs to support only one of the APIs,
> and applications to avoid branching according to the underlying device
> a copy functions to/from the old/new APIs were added.
> 
> Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
> ---
>  lib/librte_ether/rte_ethdev.c | 140 +++++++++++++++++++++++++++++++++++++
>  1 file changed, 140 insertions(+)
> 
> diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
> index f73307e99..2b4a28c97 100644
> --- a/lib/librte_ether/rte_ethdev.c
> +++ b/lib/librte_ether/rte_ethdev.c
> @@ -1003,6 +1003,63 @@ rte_eth_dev_close(uint8_t port_id)
>  	dev->data->tx_queues = NULL;
>  }
> 
> +/**
> + * A copy function from rxmode offloads API to rte_eth_rxq_conf
> + * offloads API, to enable PMDs to support only one of the APIs.
> + */
> +static void
> +rte_eth_copy_rxmode_offloads(struct rte_eth_rxmode *rxmode,
> +			     struct rte_eth_rxq_conf *rxq_conf)
> +{
> +	if (rxmode->header_split == 1)
> +		rxq_conf->offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
> +	if (rxmode->hw_ip_checksum == 1)
> +		rxq_conf->offloads |= DEV_RX_OFFLOAD_CHECKSUM;
> +	if (rxmode->hw_vlan_filter == 1)
> +		rxq_conf->offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
> +	if (rxmode->hw_vlan_strip == 1)
> +		rxq_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
> +	if (rxmode->hw_vlan_extend == 1)
> +		rxq_conf->offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
> +	if (rxmode->jumbo_frame == 1)
> +		rxq_conf->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
> +	if (rxmode->hw_strip_crc == 1)
> +		rxq_conf->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
> +	if (rxmode->enable_scatter == 1)
> +		rxq_conf->offloads |= DEV_RX_OFFLOAD_SCATTER;
> +	if (rxmode->enable_lro == 1)
> +		rxq_conf->offloads |= DEV_RX_OFFLOAD_LRO;
> +}
> +
> +/**
> + * A copy function between rte_eth_rxq_conf offloads API to rxmode
> + * offloads API, to enable application to be agnostic to the PMD supported
> + * offload API.
> + */
> +static void
> +rte_eth_copy_rxq_offloads(struct rte_eth_rxmode *rxmode,
> +			  struct rte_eth_rxq_conf *rxq_conf)
> +{
> +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
> +		rxmode->header_split = 1;
> +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
> +		rxmode->hw_ip_checksum = 1;
> +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
> +		rxmode->hw_vlan_filter = 1;
> +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
> +		rxmode->hw_vlan_strip = 1;
> +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
> +		rxmode->hw_vlan_extend = 1;
> +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
> +		rxmode->jumbo_frame = 1;
> +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
> +		rxmode->hw_strip_crc = 1;
> +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
> +		rxmode->enable_scatter = 1;
> +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_LRO)
> +		rxmode->enable_lro = 1;
> +}
> +
>  int
>  rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
>  		       uint16_t nb_rx_desc, unsigned int socket_id,
> @@ -1083,6 +1140,37 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
>  	if (rx_conf == NULL)
>  		rx_conf = &dev_info.default_rxconf;
> 
> +	if ((dev->data->dev_flags & RTE_ETH_DEV_RXQ_OFFLOAD) &&
> +	    (dev->data->dev_conf.rxmode.ignore == 0)) {
> +		rte_eth_copy_rxmode_offloads(&dev->data->dev_conf.rxmode,
> +					     rx_conf);
> +	} else if ((!(dev->data->dev_flags & RTE_ETH_DEV_RXQ_OFFLOAD)) &&
> +		   (dev->data->dev_conf.rxmode.ignore == 1)) {
> +		int ret;
> +		struct rte_eth_rxmode rxmode;
> +
> +		rte_eth_copy_rxq_offloads(&rxmode, rx_conf);
> +		if (memcmp(&rxmode, &dev->data->dev_conf.rxmode,
> +			   sizeof(rxmode))) {
> +			/*
> +			 * device which work with rxmode offloads API requires
> +			 * a re-configuration in order to apply the new offloads
> +			 * configuration.
> +			 */
> +			dev->data->dev_conf.rxmode = rxmode;
> +			ret = rte_eth_dev_configure(port_id,
> +					dev->data->nb_rx_queues,
> +					dev->data->nb_tx_queues,
> +					&dev->data->dev_conf);


Hmm, and why we would need to reconfigure our device in the middle of rx queue setup? 

> +			if (ret < 0) {
> +				RTE_PMD_DEBUG_TRACE(
> +					"unable to re-configure port %d "
> +					"in order to apply rxq offloads "
> +					"configuration\n", port_id);
> +			}
> +		}
> +	}
> +
>  	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
>  					      socket_id, rx_conf, mp);

BTW, I don't see changes in any PMD for new offload flags?
Is it because it is just a n RFC and full patch would contain such changes?


>  	if (!ret) {
> @@ -1094,6 +1182,51 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
>  	return ret;
>  }
> 
> +/**
> + * A copy function from txq_flags to rte_eth_txq_conf offloads API,
> + * to enable PMDs to support only one of the APIs.
> + */
> +static void
> +rte_eth_copy_txq_flags(struct rte_eth_txq_conf *txq_conf)
> +{
> +	uint32_t txq_flags = txq_conf->txq_flags
> +	uint64_t *offloads = &txq_conf->offloads;
> +
> +	if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
> +		*offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
> +	if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
> +		*offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
> +	if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
> +		*offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
> +	if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
> +		*offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
> +	if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
> +		*offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
> +}
> +
> +/**
> + * A copy function between rte_eth_txq_conf offloads API to txq_flags
> + * offloads API, to enable application to be agnostic to the PMD supported
> + * API.
> + */
> +static void
> +rte_eth_copy_txq_offloads(struct rte_eth_txq_conf *txq_conf)
> +{
> +	uint32_t *txq_flags = &txq_conf->txq_flags
> +	uint64_t offloads = txq_conf->offloads;
> +
> +	if (!(offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
> +		*txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
> +	if (!(offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
> +		*txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
> +	if (!(offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
> +		*txq_flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
> +	if (!(offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
> +		*txq_flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
> +	if (!(offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
> +		*txq_flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
> +}
> +
>  int
>  rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
>  		       uint16_t nb_tx_desc, unsigned int socket_id,
> @@ -1145,6 +1278,13 @@ rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
>  	if (tx_conf == NULL)
>  		tx_conf = &dev_info.default_txconf;
> 
> +	if ((dev->data->dev_flags & RTE_ETH_DEV_TXQ_OFFLOAD) &&
> +	    (!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)))
> +		rte_eth_copy_txq_flags(tx_conf);
> +	else if (!(dev->data->dev_flags & RTE_ETH_DEV_TXQ_OFFLOAD) &&
> +		   (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE))
> +		rte_eth_copy_txq_offloads(tx_conf);
> +

As I said in my previous mail - I think better to always convrert from old txq_flags
to new TX offload flags and make each PMD to understand new offload values only.
Konstantin

>  	return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
>  					       socket_id, tx_conf);
>  }
> --
> 2.12.0
  
Shahaf Shuler Aug. 23, 2017, 1:13 p.m. UTC | #2
Wednesday, August 23, 2017 3:29 PM, Ananyev, Konstantin:
> 
> 
> > -----Original Message-----
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Shahaf Shuler
> > Sent: Monday, August 7, 2017 1:55 PM
> > To: dev@dpdk.org
> > Subject: [dpdk-dev] [RFC PATCH 4/4] ethdev: add helpers to move to the
> > new offloads API
> >
> > A new offloads API was introduced by commits:
> >
> > commit 8b07fcae6061 ("ethdev: introduce Tx queue offloads API") commit
> > c6504557763e ("ethdev: introduce Rx queue offloads API")
> >
> > In order to enable PMDs to support only one of the APIs, and
> > applications to avoid branching according to the underlying device a
> > copy functions to/from the old/new APIs were added.
> >
> > Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
> > ---
> >  lib/librte_ether/rte_ethdev.c | 140
> > +++++++++++++++++++++++++++++++++++++
> >  1 file changed, 140 insertions(+)
> >
> > diff --git a/lib/librte_ether/rte_ethdev.c
> > b/lib/librte_ether/rte_ethdev.c index f73307e99..2b4a28c97 100644
> > --- a/lib/librte_ether/rte_ethdev.c
> > +++ b/lib/librte_ether/rte_ethdev.c
> > @@ -1003,6 +1003,63 @@ rte_eth_dev_close(uint8_t port_id)
> >  	dev->data->tx_queues = NULL;
> >  }
> >
> > +/**
> > + * A copy function from rxmode offloads API to rte_eth_rxq_conf
> > + * offloads API, to enable PMDs to support only one of the APIs.
> > + */
> > +static void
> > +rte_eth_copy_rxmode_offloads(struct rte_eth_rxmode *rxmode,
> > +			     struct rte_eth_rxq_conf *rxq_conf) {
> > +	if (rxmode->header_split == 1)
> > +		rxq_conf->offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
> > +	if (rxmode->hw_ip_checksum == 1)
> > +		rxq_conf->offloads |= DEV_RX_OFFLOAD_CHECKSUM;
> > +	if (rxmode->hw_vlan_filter == 1)
> > +		rxq_conf->offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
> > +	if (rxmode->hw_vlan_strip == 1)
> > +		rxq_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
> > +	if (rxmode->hw_vlan_extend == 1)
> > +		rxq_conf->offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
> > +	if (rxmode->jumbo_frame == 1)
> > +		rxq_conf->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
> > +	if (rxmode->hw_strip_crc == 1)
> > +		rxq_conf->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
> > +	if (rxmode->enable_scatter == 1)
> > +		rxq_conf->offloads |= DEV_RX_OFFLOAD_SCATTER;
> > +	if (rxmode->enable_lro == 1)
> > +		rxq_conf->offloads |= DEV_RX_OFFLOAD_LRO; }
> > +
> > +/**
> > + * A copy function between rte_eth_rxq_conf offloads API to rxmode
> > + * offloads API, to enable application to be agnostic to the PMD
> > +supported
> > + * offload API.
> > + */
> > +static void
> > +rte_eth_copy_rxq_offloads(struct rte_eth_rxmode *rxmode,
> > +			  struct rte_eth_rxq_conf *rxq_conf) {
> > +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
> > +		rxmode->header_split = 1;
> > +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
> > +		rxmode->hw_ip_checksum = 1;
> > +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
> > +		rxmode->hw_vlan_filter = 1;
> > +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
> > +		rxmode->hw_vlan_strip = 1;
> > +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
> > +		rxmode->hw_vlan_extend = 1;
> > +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
> > +		rxmode->jumbo_frame = 1;
> > +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
> > +		rxmode->hw_strip_crc = 1;
> > +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
> > +		rxmode->enable_scatter = 1;
> > +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_LRO)
> > +		rxmode->enable_lro = 1;
> > +}
> > +
> >  int
> >  rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
> >  		       uint16_t nb_rx_desc, unsigned int socket_id, @@ -1083,6
> > +1140,37 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t
> rx_queue_id,
> >  	if (rx_conf == NULL)
> >  		rx_conf = &dev_info.default_rxconf;
> >
> > +	if ((dev->data->dev_flags & RTE_ETH_DEV_RXQ_OFFLOAD) &&
> > +	    (dev->data->dev_conf.rxmode.ignore == 0)) {
> > +		rte_eth_copy_rxmode_offloads(&dev->data-
> >dev_conf.rxmode,
> > +					     rx_conf);
> > +	} else if ((!(dev->data->dev_flags & RTE_ETH_DEV_RXQ_OFFLOAD))
> &&
> > +		   (dev->data->dev_conf.rxmode.ignore == 1)) {
> > +		int ret;
> > +		struct rte_eth_rxmode rxmode;
> > +
> > +		rte_eth_copy_rxq_offloads(&rxmode, rx_conf);
> > +		if (memcmp(&rxmode, &dev->data->dev_conf.rxmode,
> > +			   sizeof(rxmode))) {
> > +			/*
> > +			 * device which work with rxmode offloads API
> requires
> > +			 * a re-configuration in order to apply the new
> offloads
> > +			 * configuration.
> > +			 */
> > +			dev->data->dev_conf.rxmode = rxmode;
> > +			ret = rte_eth_dev_configure(port_id,
> > +					dev->data->nb_rx_queues,
> > +					dev->data->nb_tx_queues,
> > +					&dev->data->dev_conf);
> 
> 
> Hmm, and why we would need to reconfigure our device in the middle of rx
> queue setup?

The reason is the old Rx offloads API is configured on device configure. 
This if section is for applications which already moved to the new offload API however the underlying PMD still uses the old one.

> 
> > +			if (ret < 0) {
> > +				RTE_PMD_DEBUG_TRACE(
> > +					"unable to re-configure port %d "
> > +					"in order to apply rxq offloads "
> > +					"configuration\n", port_id);
> > +			}
> > +		}
> > +	}
> > +
> >  	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id,
> nb_rx_desc,
> >  					      socket_id, rx_conf, mp);
> 
> BTW, I don't see changes in any PMD for new offload flags?
> Is it because it is just a n RFC and full patch would contain such changes?

Yes this is because this is an RFC.
 
The full patch I intend will move all examples and testpmd to the new offloads API.
In addition it will include the mlx5 PMD support for the new offloads API. 

As I said on previous mail, I believe that the work to move the different PMDs to the new API should be done by their developers or maintainers.

> 
> 
> >  	if (!ret) {
> > @@ -1094,6 +1182,51 @@ rte_eth_rx_queue_setup(uint8_t port_id,
> uint16_t rx_queue_id,
> >  	return ret;
> >  }
> >
> > +/**
> > + * A copy function from txq_flags to rte_eth_txq_conf offloads API,
> > + * to enable PMDs to support only one of the APIs.
> > + */
> > +static void
> > +rte_eth_copy_txq_flags(struct rte_eth_txq_conf *txq_conf) {
> > +	uint32_t txq_flags = txq_conf->txq_flags
> > +	uint64_t *offloads = &txq_conf->offloads;
> > +
> > +	if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
> > +		*offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
> > +	if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
> > +		*offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
> > +	if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
> > +		*offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
> > +	if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
> > +		*offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
> > +	if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
> > +		*offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; }
> > +
> > +/**
> > + * A copy function between rte_eth_txq_conf offloads API to txq_flags
> > + * offloads API, to enable application to be agnostic to the PMD
> > +supported
> > + * API.
> > + */
> > +static void
> > +rte_eth_copy_txq_offloads(struct rte_eth_txq_conf *txq_conf) {
> > +	uint32_t *txq_flags = &txq_conf->txq_flags
> > +	uint64_t offloads = txq_conf->offloads;
> > +
> > +	if (!(offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
> > +		*txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
> > +	if (!(offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
> > +		*txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
> > +	if (!(offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
> > +		*txq_flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
> > +	if (!(offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
> > +		*txq_flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
> > +	if (!(offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
> > +		*txq_flags |= ETH_TXQ_FLAGS_NOXSUMTCP; }
> > +
> >  int
> >  rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
> >  		       uint16_t nb_tx_desc, unsigned int socket_id, @@ -1145,6
> > +1278,13 @@ rte_eth_tx_queue_setup(uint8_t port_id, uint16_t
> tx_queue_id,
> >  	if (tx_conf == NULL)
> >  		tx_conf = &dev_info.default_txconf;
> >
> > +	if ((dev->data->dev_flags & RTE_ETH_DEV_TXQ_OFFLOAD) &&
> > +	    (!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)))
> > +		rte_eth_copy_txq_flags(tx_conf);
> > +	else if (!(dev->data->dev_flags & RTE_ETH_DEV_TXQ_OFFLOAD) &&
> > +		   (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE))
> > +		rte_eth_copy_txq_offloads(tx_conf);
> > +
> 
> As I said in my previous mail - I think better to always convrert from old
> txq_flags to new TX offload flags and make each PMD to understand new
> offload values only.
> Konstantin
> 
> >  	return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id,
> nb_tx_desc,
> >  					       socket_id, tx_conf);
> >  }
> > --
> > 2.12.0
  
Thomas Monjalon Aug. 23, 2017, 10:06 p.m. UTC | #3
23/08/2017 15:13, Shahaf Shuler:
> Wednesday, August 23, 2017 3:29 PM, Ananyev, Konstantin:
> > From: Shahaf Shuler
> > > In order to enable PMDs to support only one of the APIs, and
> > > applications to avoid branching according to the underlying device a
> > > copy functions to/from the old/new APIs were added.

Looks a good intent.
I would prefer the word "convert" instead of "copy".

> > >  int
> > >  rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
[...]
> > > +	} else if ((!(dev->data->dev_flags & RTE_ETH_DEV_RXQ_OFFLOAD)) &&
> > > +		   (dev->data->dev_conf.rxmode.ignore == 1)) {
> > > +		int ret;
> > > +		struct rte_eth_rxmode rxmode;
> > > +
> > > +		rte_eth_copy_rxq_offloads(&rxmode, rx_conf);
> > > +		if (memcmp(&rxmode, &dev->data->dev_conf.rxmode,
> > > +			   sizeof(rxmode))) {
> > > +			/*
> > > +			 * device which work with rxmode offloads API requires
> > > +			 * a re-configuration in order to apply the new offloads
> > > +			 * configuration.
> > > +			 */
> > > +			dev->data->dev_conf.rxmode = rxmode;
> > > +			ret = rte_eth_dev_configure(port_id,
> > > +					dev->data->nb_rx_queues,
> > > +					dev->data->nb_tx_queues,
> > > +					&dev->data->dev_conf);
> > 
> > Hmm, and why we would need to reconfigure our device in the middle of rx
> > queue setup?
> 
> The reason is the old Rx offloads API is configured on device configure. 
> This if section is for applications which already moved to the new offload
> API however the underlying PMD still uses the old one.

Isn't it risky to re-run configure here?
We could also declare this case as an error.

I think applications which have migrated to the new API,
could use the convert functions themselves before calling configure
to support not migrated PMDs.
The cons of my solution are:
- discourage apps to migrate before all PMDs have migrated
- expose a temporary function to convert API
I propose it anyway because there is always someone to like bad ideas ;)
  
Shahaf Shuler Aug. 24, 2017, 7:12 a.m. UTC | #4
Thursday, August 24, 2017 1:06 AM, Thomas Monjalon:
> 23/08/2017 15:13, Shahaf Shuler:
> > Wednesday, August 23, 2017 3:29 PM, Ananyev, Konstantin:
> > > From: Shahaf Shuler
> > > > In order to enable PMDs to support only one of the APIs, and
> > > > applications to avoid branching according to the underlying device
> > > > a copy functions to/from the old/new APIs were added.
> 
> Looks a good intent.
> I would prefer the word "convert" instead of "copy".
> 
> > > >  int
> > > >  rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
> [...]
> > > > +	} else if ((!(dev->data->dev_flags & RTE_ETH_DEV_RXQ_OFFLOAD))
> &&
> > > > +		   (dev->data->dev_conf.rxmode.ignore == 1)) {
> > > > +		int ret;
> > > > +		struct rte_eth_rxmode rxmode;
> > > > +
> > > > +		rte_eth_copy_rxq_offloads(&rxmode, rx_conf);
> > > > +		if (memcmp(&rxmode, &dev->data->dev_conf.rxmode,
> > > > +			   sizeof(rxmode))) {
> > > > +			/*
> > > > +			 * device which work with rxmode offloads API
> requires
> > > > +			 * a re-configuration in order to apply the new
> offloads
> > > > +			 * configuration.
> > > > +			 */
> > > > +			dev->data->dev_conf.rxmode = rxmode;
> > > > +			ret = rte_eth_dev_configure(port_id,
> > > > +					dev->data->nb_rx_queues,
> > > > +					dev->data->nb_tx_queues,
> > > > +					&dev->data->dev_conf);
> > >
> > > Hmm, and why we would need to reconfigure our device in the middle
> > > of rx queue setup?
> >
> > The reason is the old Rx offloads API is configured on device configure.
> > This if section is for applications which already moved to the new
> > offload API however the underlying PMD still uses the old one.
> 
> Isn't it risky to re-run configure here?
> We could also declare this case as an error.
> 
> I think applications which have migrated to the new API, could use the
> convert functions themselves before calling configure to support not
> migrated PMDs.
> The cons of my solution are:
> - discourage apps to migrate before all PMDs have migrated
> - expose a temporary function to convert API I propose it anyway because
> there is always someone to like bad ideas ;)

Yes. I tried to make it as simple as possible for application to move to the new API.
Defining it as error flow, will enforce the application to check the PMD offload mode and branch accordingly. The conversion functions are a good helpers, yet the code remains complex due to the different cases with the different PMDs.

Considering the re-configuration is risky, and without other ideas I will need to fall back to the error flow case.
Are we OK with that?
  
Thomas Monjalon Aug. 25, 2017, 1:26 p.m. UTC | #5
24/08/2017 09:12, Shahaf Shuler:
> Thursday, August 24, 2017 1:06 AM, Thomas Monjalon:
> > 23/08/2017 15:13, Shahaf Shuler:
> > > Wednesday, August 23, 2017 3:29 PM, Ananyev, Konstantin:
> > > > From: Shahaf Shuler
> > > > > In order to enable PMDs to support only one of the APIs, and
> > > > > applications to avoid branching according to the underlying device
> > > > > a copy functions to/from the old/new APIs were added.
> > 
> > Looks a good intent.
> > I would prefer the word "convert" instead of "copy".
> > 
> > > > >  int
> > > > >  rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
> > [...]
> > > > > +	} else if ((!(dev->data->dev_flags & RTE_ETH_DEV_RXQ_OFFLOAD)) &&
> > > > > +		   (dev->data->dev_conf.rxmode.ignore == 1)) {
> > > > > +		int ret;
> > > > > +		struct rte_eth_rxmode rxmode;
> > > > > +
> > > > > +		rte_eth_copy_rxq_offloads(&rxmode, rx_conf);
> > > > > +		if (memcmp(&rxmode, &dev->data->dev_conf.rxmode,
> > > > > +			   sizeof(rxmode))) {
> > > > > +			/*
> > > > > +			 * device which work with rxmode offloads API requires
> > > > > +			 * a re-configuration in order to apply the new offloads
> > > > > +			 * configuration.
> > > > > +			 */
> > > > > +			dev->data->dev_conf.rxmode = rxmode;
> > > > > +			ret = rte_eth_dev_configure(port_id,
> > > > > +					dev->data->nb_rx_queues,
> > > > > +					dev->data->nb_tx_queues,
> > > > > +					&dev->data->dev_conf);
> > > >
> > > > Hmm, and why we would need to reconfigure our device in the middle
> > > > of rx queue setup?
> > >
> > > The reason is the old Rx offloads API is configured on device configure.
> > > This if section is for applications which already moved to the new
> > > offload API however the underlying PMD still uses the old one.
> > 
> > Isn't it risky to re-run configure here?
> > We could also declare this case as an error.
> > 
> > I think applications which have migrated to the new API, could use the
> > convert functions themselves before calling configure to support not
> > migrated PMDs.
> > The cons of my solution are:
> > - discourage apps to migrate before all PMDs have migrated
> > - expose a temporary function to convert API I propose it anyway because
> > there is always someone to like bad ideas ;)
> 
> Yes. I tried to make it as simple as possible for application to move to the new API.
> Defining it as error flow, will enforce the application to check the PMD offload mode and branch accordingly. The conversion functions are a good helpers, yet the code remains complex due to the different cases with the different PMDs.
> 
> Considering the re-configuration is risky, and without other ideas I will need to fall back to the error flow case.
> Are we OK with that?

I think we can take the risk of keeping this call to
rte_eth_dev_configure() in the middle of rte_eth_rx_queue_setup().
In theory it should be acceptable.
If we merge it soon, it can be better tested with every drivers.
  
Ananyev, Konstantin Aug. 28, 2017, 2:12 p.m. UTC | #6
> -----Original Message-----
> From: Shahaf Shuler [mailto:shahafs@mellanox.com]
> Sent: Wednesday, August 23, 2017 2:13 PM
> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>; dev@dpdk.org
> Subject: RE: [dpdk-dev] [RFC PATCH 4/4] ethdev: add helpers to move to the new offloads API
> 
> Wednesday, August 23, 2017 3:29 PM, Ananyev, Konstantin:
> >
> >
> > > -----Original Message-----
> > > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Shahaf Shuler
> > > Sent: Monday, August 7, 2017 1:55 PM
> > > To: dev@dpdk.org
> > > Subject: [dpdk-dev] [RFC PATCH 4/4] ethdev: add helpers to move to the
> > > new offloads API
> > >
> > > A new offloads API was introduced by commits:
> > >
> > > commit 8b07fcae6061 ("ethdev: introduce Tx queue offloads API") commit
> > > c6504557763e ("ethdev: introduce Rx queue offloads API")
> > >
> > > In order to enable PMDs to support only one of the APIs, and
> > > applications to avoid branching according to the underlying device a
> > > copy functions to/from the old/new APIs were added.
> > >
> > > Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
> > > ---
> > >  lib/librte_ether/rte_ethdev.c | 140
> > > +++++++++++++++++++++++++++++++++++++
> > >  1 file changed, 140 insertions(+)
> > >
> > > diff --git a/lib/librte_ether/rte_ethdev.c
> > > b/lib/librte_ether/rte_ethdev.c index f73307e99..2b4a28c97 100644
> > > --- a/lib/librte_ether/rte_ethdev.c
> > > +++ b/lib/librte_ether/rte_ethdev.c
> > > @@ -1003,6 +1003,63 @@ rte_eth_dev_close(uint8_t port_id)
> > >  	dev->data->tx_queues = NULL;
> > >  }
> > >
> > > +/**
> > > + * A copy function from rxmode offloads API to rte_eth_rxq_conf
> > > + * offloads API, to enable PMDs to support only one of the APIs.
> > > + */
> > > +static void
> > > +rte_eth_copy_rxmode_offloads(struct rte_eth_rxmode *rxmode,
> > > +			     struct rte_eth_rxq_conf *rxq_conf) {
> > > +	if (rxmode->header_split == 1)
> > > +		rxq_conf->offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
> > > +	if (rxmode->hw_ip_checksum == 1)
> > > +		rxq_conf->offloads |= DEV_RX_OFFLOAD_CHECKSUM;
> > > +	if (rxmode->hw_vlan_filter == 1)
> > > +		rxq_conf->offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
> > > +	if (rxmode->hw_vlan_strip == 1)
> > > +		rxq_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
> > > +	if (rxmode->hw_vlan_extend == 1)
> > > +		rxq_conf->offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
> > > +	if (rxmode->jumbo_frame == 1)
> > > +		rxq_conf->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
> > > +	if (rxmode->hw_strip_crc == 1)
> > > +		rxq_conf->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
> > > +	if (rxmode->enable_scatter == 1)
> > > +		rxq_conf->offloads |= DEV_RX_OFFLOAD_SCATTER;
> > > +	if (rxmode->enable_lro == 1)
> > > +		rxq_conf->offloads |= DEV_RX_OFFLOAD_LRO; }
> > > +
> > > +/**
> > > + * A copy function between rte_eth_rxq_conf offloads API to rxmode
> > > + * offloads API, to enable application to be agnostic to the PMD
> > > +supported
> > > + * offload API.
> > > + */
> > > +static void
> > > +rte_eth_copy_rxq_offloads(struct rte_eth_rxmode *rxmode,
> > > +			  struct rte_eth_rxq_conf *rxq_conf) {
> > > +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
> > > +		rxmode->header_split = 1;
> > > +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
> > > +		rxmode->hw_ip_checksum = 1;
> > > +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
> > > +		rxmode->hw_vlan_filter = 1;
> > > +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
> > > +		rxmode->hw_vlan_strip = 1;
> > > +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
> > > +		rxmode->hw_vlan_extend = 1;
> > > +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
> > > +		rxmode->jumbo_frame = 1;
> > > +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
> > > +		rxmode->hw_strip_crc = 1;
> > > +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
> > > +		rxmode->enable_scatter = 1;
> > > +	if (rxq_conf->offloads & DEV_RX_OFFLOAD_LRO)
> > > +		rxmode->enable_lro = 1;
> > > +}
> > > +
> > >  int
> > >  rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
> > >  		       uint16_t nb_rx_desc, unsigned int socket_id, @@ -1083,6
> > > +1140,37 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t
> > rx_queue_id,
> > >  	if (rx_conf == NULL)
> > >  		rx_conf = &dev_info.default_rxconf;
> > >
> > > +	if ((dev->data->dev_flags & RTE_ETH_DEV_RXQ_OFFLOAD) &&
> > > +	    (dev->data->dev_conf.rxmode.ignore == 0)) {
> > > +		rte_eth_copy_rxmode_offloads(&dev->data-
> > >dev_conf.rxmode,
> > > +					     rx_conf);
> > > +	} else if ((!(dev->data->dev_flags & RTE_ETH_DEV_RXQ_OFFLOAD))
> > &&
> > > +		   (dev->data->dev_conf.rxmode.ignore == 1)) {
> > > +		int ret;
> > > +		struct rte_eth_rxmode rxmode;
> > > +
> > > +		rte_eth_copy_rxq_offloads(&rxmode, rx_conf);
> > > +		if (memcmp(&rxmode, &dev->data->dev_conf.rxmode,
> > > +			   sizeof(rxmode))) {
> > > +			/*
> > > +			 * device which work with rxmode offloads API
> > requires
> > > +			 * a re-configuration in order to apply the new
> > offloads
> > > +			 * configuration.
> > > +			 */
> > > +			dev->data->dev_conf.rxmode = rxmode;
> > > +			ret = rte_eth_dev_configure(port_id,
> > > +					dev->data->nb_rx_queues,
> > > +					dev->data->nb_tx_queues,
> > > +					&dev->data->dev_conf);
> >
> >
> > Hmm, and why we would need to reconfigure our device in the middle of rx
> > queue setup?
> 
> The reason is the old Rx offloads API is configured on device configure.
> This if section is for applications which already moved to the new offload API however the underlying PMD still uses the old one.

Ok, but as I remember, right now the initialization order is pretty strict:
rx_queue_setup() has to be always called after dev_configure().
One of the reasons for that: rx_queue_setup() might change fileds inside  dev->data->dev_private.
Second call for dev_configure() will void these changes and some of rxq config information will be lost.
So I think we should avoid calling dev_configure() inside rx_queue_setup().

My preference still would be to force all PMDs to move to the new version of rx_queue_setup() first.
I think it would be much more error prone then supporting two flavors of PMD config
and will allow us to catch errors early - in case this new scheme doesn't work by some PMD for any reason.   

Also it seems that you  forgot to:
struct rte_eth_rxmode rxmode  = dev->data->dev_conf.rxmode;

Konstantin


> 
> >
> > > +			if (ret < 0) {
> > > +				RTE_PMD_DEBUG_TRACE(
> > > +					"unable to re-configure port %d "
> > > +					"in order to apply rxq offloads "
> > > +					"configuration\n", port_id);
> > > +			}
> > > +		}
> > > +	}
> > > +
> > >  	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id,
> > nb_rx_desc,
> > >  					      socket_id, rx_conf, mp);
> >
> > BTW, I don't see changes in any PMD for new offload flags?
> > Is it because it is just a n RFC and full patch would contain such changes?
> 
> Yes this is because this is an RFC.
> 
> The full patch I intend will move all examples and testpmd to the new offloads API.
> In addition it will include the mlx5 PMD support for the new offloads API.
> 
> As I said on previous mail, I believe that the work to move the different PMDs to the new API should be done by their developers or
> maintainers.
> 
> >
> >
> > >  	if (!ret) {
> > > @@ -1094,6 +1182,51 @@ rte_eth_rx_queue_setup(uint8_t port_id,
> > uint16_t rx_queue_id,
> > >  	return ret;
> > >  }
> > >
> > > +/**
> > > + * A copy function from txq_flags to rte_eth_txq_conf offloads API,
> > > + * to enable PMDs to support only one of the APIs.
> > > + */
> > > +static void
> > > +rte_eth_copy_txq_flags(struct rte_eth_txq_conf *txq_conf) {
> > > +	uint32_t txq_flags = txq_conf->txq_flags
> > > +	uint64_t *offloads = &txq_conf->offloads;
> > > +
> > > +	if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
> > > +		*offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
> > > +	if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
> > > +		*offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
> > > +	if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
> > > +		*offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
> > > +	if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
> > > +		*offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
> > > +	if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
> > > +		*offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; }
> > > +
> > > +/**
> > > + * A copy function between rte_eth_txq_conf offloads API to txq_flags
> > > + * offloads API, to enable application to be agnostic to the PMD
> > > +supported
> > > + * API.
> > > + */
> > > +static void
> > > +rte_eth_copy_txq_offloads(struct rte_eth_txq_conf *txq_conf) {
> > > +	uint32_t *txq_flags = &txq_conf->txq_flags
> > > +	uint64_t offloads = txq_conf->offloads;
> > > +
> > > +	if (!(offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
> > > +		*txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
> > > +	if (!(offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
> > > +		*txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
> > > +	if (!(offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
> > > +		*txq_flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
> > > +	if (!(offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
> > > +		*txq_flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
> > > +	if (!(offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
> > > +		*txq_flags |= ETH_TXQ_FLAGS_NOXSUMTCP; }
> > > +
> > >  int
> > >  rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
> > >  		       uint16_t nb_tx_desc, unsigned int socket_id, @@ -1145,6
> > > +1278,13 @@ rte_eth_tx_queue_setup(uint8_t port_id, uint16_t
> > tx_queue_id,
> > >  	if (tx_conf == NULL)
> > >  		tx_conf = &dev_info.default_txconf;
> > >
> > > +	if ((dev->data->dev_flags & RTE_ETH_DEV_TXQ_OFFLOAD) &&
> > > +	    (!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)))
> > > +		rte_eth_copy_txq_flags(tx_conf);
> > > +	else if (!(dev->data->dev_flags & RTE_ETH_DEV_TXQ_OFFLOAD) &&
> > > +		   (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE))
> > > +		rte_eth_copy_txq_offloads(tx_conf);
> > > +
> >
> > As I said in my previous mail - I think better to always convrert from old
> > txq_flags to new TX offload flags and make each PMD to understand new
> > offload values only.
> > Konstantin
> >
> > >  	return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id,
> > nb_tx_desc,
> > >  					       socket_id, tx_conf);
> > >  }
> > > --
> > > 2.12.0
  
Shahaf Shuler Aug. 29, 2017, 6:26 a.m. UTC | #7
Monday, August 28, 2017 5:12 PM, Ananyev, Konstantin:
> > Wednesday, August 23, 2017 3:29 PM, Ananyev, Konstantin:
> > >
> > > Hmm, and why we would need to reconfigure our device in the middle
> > > of rx queue setup?
> >
> > The reason is the old Rx offloads API is configured on device configure.
> > This if section is for applications which already moved to the new offload
> API however the underlying PMD still uses the old one.
> 
> Ok, but as I remember, right now the initialization order is pretty strict:
> rx_queue_setup() has to be always called after dev_configure().

Is this restriction backed up in the API definition? 
From what I saw in  dev_configure API documentation:

" 
This function must be invoked first before any other function in the Ethernet API. This function can also be re-invoked when a device is in the stopped state.
"

It does not dictate one is forbidden to do (while port is stopped):
dev_configure
rx_queue_setup(queue 1)
dev _configure (for the same configuration)
rx_queue_setup(queue 2)

didn't saw any words on in it also in rx_queue_setup API. maybe API doc should be updated.

You are correct though that all examples and apps on dpdk tree behaves in the way you described.

> One of the reasons for that: rx_queue_setup() might change fileds inside
> dev->data->dev_private.
> Second call for dev_configure() will void these changes and some of rxq
> config information will be lost.
> So I think we should avoid calling dev_configure() inside rx_queue_setup().

In continue to above comment, is this reason is because of a wrong implementation of the callbacks by the PMDs or actual limitation from ethdev?

> 
> My preference still would be to force all PMDs to move to the new version of
> rx_queue_setup() first.
> I think it would be much more error prone then supporting two flavors of
> PMD config
> and will allow us to catch errors early - in case this new scheme doesn't work
> by some PMD for any reason.

I Fully agree with you. If we can force all PMDs to move the new API it will be the best.
No need in risky re-configuration in the middle of the queue setup. 
But how can we force all to do such transition? I saw there is a discussion on this RFC on the techboard - maybe it can be decided there.

As I said before, I think I will be very hard to migrate all PMDs to the new API by myself.
It is not a simple function prototype change or some identical swap I need to do on each PMD.
Each device has its own characteristics and restriction on the offloads it can provide.
Some offloads can be enabled only on specific places in the device initialization.
Some offloads are only per port.
To do such transition properly I must fully understand the underlying device of each PMD.

I can commit on transition for the devices I familiar with: Mlx4 and mlx5. 

> 
> Also it seems that you  forgot to:
> struct rte_eth_rxmode rxmode  = dev->data->dev_conf.rxmode;

Thanks, discovered it right after I sent the RFC. Already fixed. 

> 
> Konstantin
> 
> 
> >
> > >
> > > > +			if (ret < 0) {
> > > > +				RTE_PMD_DEBUG_TRACE(
> > > > +					"unable to re-configure port %d "
> > > > +					"in order to apply rxq offloads "
> > > > +					"configuration\n", port_id);
> > > > +			}
> > > > +		}
> > > > +	}
> > > > +
> > > >  	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id,
> > > nb_rx_desc,
> > > >  					      socket_id, rx_conf, mp);
> > >
> > > BTW, I don't see changes in any PMD for new offload flags?
> > > Is it because it is just a n RFC and full patch would contain such changes?
> >
> > Yes this is because this is an RFC.
> >
> > The full patch I intend will move all examples and testpmd to the new
> offloads API.
> > In addition it will include the mlx5 PMD support for the new offloads API.
> >
> > As I said on previous mail, I believe that the work to move the
> > different PMDs to the new API should be done by their developers or
> maintainers.
> >
> > >
> > >
> > > >  	if (!ret) {
> > > > @@ -1094,6 +1182,51 @@ rte_eth_rx_queue_setup(uint8_t port_id,
> > > uint16_t rx_queue_id,
> > > >  	return ret;
> > > >  }
> > > >
> > > > +/**
> > > > + * A copy function from txq_flags to rte_eth_txq_conf offloads
> > > > +API,
> > > > + * to enable PMDs to support only one of the APIs.
> > > > + */
> > > > +static void
> > > > +rte_eth_copy_txq_flags(struct rte_eth_txq_conf *txq_conf) {
> > > > +	uint32_t txq_flags = txq_conf->txq_flags
> > > > +	uint64_t *offloads = &txq_conf->offloads;
> > > > +
> > > > +	if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
> > > > +		*offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
> > > > +	if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
> > > > +		*offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
> > > > +	if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
> > > > +		*offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
> > > > +	if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
> > > > +		*offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
> > > > +	if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
> > > > +		*offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; }
> > > > +
> > > > +/**
> > > > + * A copy function between rte_eth_txq_conf offloads API to
> > > > +txq_flags
> > > > + * offloads API, to enable application to be agnostic to the PMD
> > > > +supported
> > > > + * API.
> > > > + */
> > > > +static void
> > > > +rte_eth_copy_txq_offloads(struct rte_eth_txq_conf *txq_conf) {
> > > > +	uint32_t *txq_flags = &txq_conf->txq_flags
> > > > +	uint64_t offloads = txq_conf->offloads;
> > > > +
> > > > +	if (!(offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
> > > > +		*txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
> > > > +	if (!(offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
> > > > +		*txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
> > > > +	if (!(offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
> > > > +		*txq_flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
> > > > +	if (!(offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
> > > > +		*txq_flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
> > > > +	if (!(offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
> > > > +		*txq_flags |= ETH_TXQ_FLAGS_NOXSUMTCP; }
> > > > +
> > > >  int
> > > >  rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
> > > >  		       uint16_t nb_tx_desc, unsigned int socket_id, @@ -1145,6
> > > > +1278,13 @@ rte_eth_tx_queue_setup(uint8_t port_id, uint16_t
> > > tx_queue_id,
> > > >  	if (tx_conf == NULL)
> > > >  		tx_conf = &dev_info.default_txconf;
> > > >
> > > > +	if ((dev->data->dev_flags & RTE_ETH_DEV_TXQ_OFFLOAD) &&
> > > > +	    (!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)))
> > > > +		rte_eth_copy_txq_flags(tx_conf);
> > > > +	else if (!(dev->data->dev_flags & RTE_ETH_DEV_TXQ_OFFLOAD) &&
> > > > +		   (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE))
> > > > +		rte_eth_copy_txq_offloads(tx_conf);
> > > > +
> > >
> > > As I said in my previous mail - I think better to always convrert
> > > from old txq_flags to new TX offload flags and make each PMD to
> > > understand new offload values only.
> > > Konstantin
> > >
> > > >  	return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id,
> > > nb_tx_desc,
> > > >  					       socket_id, tx_conf);
> > > >  }
> > > > --
> > > > 2.12.0
  
Ananyev, Konstantin Aug. 29, 2017, 9:43 a.m. UTC | #8
> -----Original Message-----
> From: Shahaf Shuler [mailto:shahafs@mellanox.com]
> Sent: Tuesday, August 29, 2017 7:27 AM
> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>; dev@dpdk.org
> Subject: RE: [dpdk-dev] [RFC PATCH 4/4] ethdev: add helpers to move to the new offloads API
> 
> Monday, August 28, 2017 5:12 PM, Ananyev, Konstantin:
> > > Wednesday, August 23, 2017 3:29 PM, Ananyev, Konstantin:
> > > >
> > > > Hmm, and why we would need to reconfigure our device in the middle
> > > > of rx queue setup?
> > >
> > > The reason is the old Rx offloads API is configured on device configure.
> > > This if section is for applications which already moved to the new offload
> > API however the underlying PMD still uses the old one.
> >
> > Ok, but as I remember, right now the initialization order is pretty strict:
> > rx_queue_setup() has to be always called after dev_configure().
> 
> Is this restriction backed up in the API definition?
> From what I saw in  dev_configure API documentation:
> 
> "
> This function must be invoked first before any other function in the Ethernet API. This function can also be re-invoked when a device is in
> the stopped state.
> "
> 
> It does not dictate one is forbidden to do (while port is stopped):
> dev_configure
> rx_queue_setup(queue 1)
> dev _configure (for the same configuration)
> rx_queue_setup(queue 2)
> 

It might work in some cases, but no guarantee it would work in general.
Though, as I understand, in your case for second call of dev_configure() the 
config parameters might  be different anyway.

> didn't saw any words on in it also in rx_queue_setup API. maybe API doc should be updated.

Yes, good point.

> 
> You are correct though that all examples and apps on dpdk tree behaves in the way you described.
> 
> > One of the reasons for that: rx_queue_setup() might change fileds inside
> > dev->data->dev_private.
> > Second call for dev_configure() will void these changes and some of rxq
> > config information will be lost.
> > So I think we should avoid calling dev_configure() inside rx_queue_setup().
> 
> In continue to above comment, is this reason is because of a wrong implementation of the callbacks by the PMDs or actual limitation from
> ethdev?

I'd say it is a combination of limitations of ethdev design and actual HW restrictions.
If we'll have a rx/tx function per queue some of these limitations might be removed I think.
Though for some HW - offloads can be enabled/disabled per device, not queue,
so I guess some limitations would  persist anyway.

> 
> >
> > My preference still would be to force all PMDs to move to the new version of
> > rx_queue_setup() first.
> > I think it would be much more error prone then supporting two flavors of
> > PMD config
> > and will allow us to catch errors early - in case this new scheme doesn't work
> > by some PMD for any reason.
> 
> I Fully agree with you. If we can force all PMDs to move the new API it will be the best.
> No need in risky re-configuration in the middle of the queue setup.
> But how can we force all to do such transition? I saw there is a discussion on this RFC on the techboard - maybe it can be decided there.
> 
> As I said before, I think I will be very hard to migrate all PMDs to the new API by myself.
> It is not a simple function prototype change or some identical swap I need to do on each PMD.
> Each device has its own characteristics and restriction on the offloads it can provide.
> Some offloads can be enabled only on specific places in the device initialization.
> Some offloads are only per port.
> To do such transition properly I must fully understand the underlying device of each PMD.
> 
> I can commit on transition for the devices I familiar with: Mlx4 and mlx5.

That's understandable.
For tx_prepare() work, we used the following approach:
1. submitted patch with changes in rte_ethdev and PMDs we  are familiar with (Intel ones).
    For other PMDs - patch contained just minimal changes to make it build cleanly.
2. Asked other PMD maintainers to review rte_ethdev changes and provide a proper patch
    for the PMD they own.

Probably same approach can be used here.
Konstantin

> 
> >
> > Also it seems that you  forgot to:
> > struct rte_eth_rxmode rxmode  = dev->data->dev_conf.rxmode;
> 
> Thanks, discovered it right after I sent the RFC. Already fixed.
> 
> >
> > Konstantin
> >
> >
> > >
> > > >
> > > > > +			if (ret < 0) {
> > > > > +				RTE_PMD_DEBUG_TRACE(
> > > > > +					"unable to re-configure port %d "
> > > > > +					"in order to apply rxq offloads "
> > > > > +					"configuration\n", port_id);
> > > > > +			}
> > > > > +		}
> > > > > +	}
> > > > > +
> > > > >  	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id,
> > > > nb_rx_desc,
> > > > >  					      socket_id, rx_conf, mp);
> > > >
> > > > BTW, I don't see changes in any PMD for new offload flags?
> > > > Is it because it is just a n RFC and full patch would contain such changes?
> > >
> > > Yes this is because this is an RFC.
> > >
> > > The full patch I intend will move all examples and testpmd to the new
> > offloads API.
> > > In addition it will include the mlx5 PMD support for the new offloads API.
> > >
> > > As I said on previous mail, I believe that the work to move the
> > > different PMDs to the new API should be done by their developers or
> > maintainers.
> > >
> > > >
> > > >
> > > > >  	if (!ret) {
> > > > > @@ -1094,6 +1182,51 @@ rte_eth_rx_queue_setup(uint8_t port_id,
> > > > uint16_t rx_queue_id,
> > > > >  	return ret;
> > > > >  }
> > > > >
> > > > > +/**
> > > > > + * A copy function from txq_flags to rte_eth_txq_conf offloads
> > > > > +API,
> > > > > + * to enable PMDs to support only one of the APIs.
> > > > > + */
> > > > > +static void
> > > > > +rte_eth_copy_txq_flags(struct rte_eth_txq_conf *txq_conf) {
> > > > > +	uint32_t txq_flags = txq_conf->txq_flags
> > > > > +	uint64_t *offloads = &txq_conf->offloads;
> > > > > +
> > > > > +	if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
> > > > > +		*offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
> > > > > +	if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
> > > > > +		*offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
> > > > > +	if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
> > > > > +		*offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
> > > > > +	if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
> > > > > +		*offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
> > > > > +	if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
> > > > > +		*offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; }
> > > > > +
> > > > > +/**
> > > > > + * A copy function between rte_eth_txq_conf offloads API to
> > > > > +txq_flags
> > > > > + * offloads API, to enable application to be agnostic to the PMD
> > > > > +supported
> > > > > + * API.
> > > > > + */
> > > > > +static void
> > > > > +rte_eth_copy_txq_offloads(struct rte_eth_txq_conf *txq_conf) {
> > > > > +	uint32_t *txq_flags = &txq_conf->txq_flags
> > > > > +	uint64_t offloads = txq_conf->offloads;
> > > > > +
> > > > > +	if (!(offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
> > > > > +		*txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
> > > > > +	if (!(offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
> > > > > +		*txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
> > > > > +	if (!(offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
> > > > > +		*txq_flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
> > > > > +	if (!(offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
> > > > > +		*txq_flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
> > > > > +	if (!(offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
> > > > > +		*txq_flags |= ETH_TXQ_FLAGS_NOXSUMTCP; }
> > > > > +
> > > > >  int
> > > > >  rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
> > > > >  		       uint16_t nb_tx_desc, unsigned int socket_id, @@ -1145,6
> > > > > +1278,13 @@ rte_eth_tx_queue_setup(uint8_t port_id, uint16_t
> > > > tx_queue_id,
> > > > >  	if (tx_conf == NULL)
> > > > >  		tx_conf = &dev_info.default_txconf;
> > > > >
> > > > > +	if ((dev->data->dev_flags & RTE_ETH_DEV_TXQ_OFFLOAD) &&
> > > > > +	    (!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)))
> > > > > +		rte_eth_copy_txq_flags(tx_conf);
> > > > > +	else if (!(dev->data->dev_flags & RTE_ETH_DEV_TXQ_OFFLOAD) &&
> > > > > +		   (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE))
> > > > > +		rte_eth_copy_txq_offloads(tx_conf);
> > > > > +
> > > >
> > > > As I said in my previous mail - I think better to always convrert
> > > > from old txq_flags to new TX offload flags and make each PMD to
> > > > understand new offload values only.
> > > > Konstantin
> > > >
> > > > >  	return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id,
> > > > nb_tx_desc,
> > > > >  					       socket_id, tx_conf);
> > > > >  }
> > > > > --
> > > > > 2.12.0
  
Ferruh Yigit Aug. 29, 2017, 12:55 p.m. UTC | #9
On 8/25/2017 2:26 PM, Thomas Monjalon wrote:
> 24/08/2017 09:12, Shahaf Shuler:
>> Thursday, August 24, 2017 1:06 AM, Thomas Monjalon:
>>> 23/08/2017 15:13, Shahaf Shuler:
>>>> Wednesday, August 23, 2017 3:29 PM, Ananyev, Konstantin:
>>>>> From: Shahaf Shuler
>>>>>> In order to enable PMDs to support only one of the APIs, and
>>>>>> applications to avoid branching according to the underlying device
>>>>>> a copy functions to/from the old/new APIs were added.
>>>
>>> Looks a good intent.
>>> I would prefer the word "convert" instead of "copy".
>>>
>>>>>>  int
>>>>>>  rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
>>> [...]
>>>>>> +	} else if ((!(dev->data->dev_flags & RTE_ETH_DEV_RXQ_OFFLOAD)) &&
>>>>>> +		   (dev->data->dev_conf.rxmode.ignore == 1)) {
>>>>>> +		int ret;
>>>>>> +		struct rte_eth_rxmode rxmode;
>>>>>> +
>>>>>> +		rte_eth_copy_rxq_offloads(&rxmode, rx_conf);
>>>>>> +		if (memcmp(&rxmode, &dev->data->dev_conf.rxmode,
>>>>>> +			   sizeof(rxmode))) {
>>>>>> +			/*
>>>>>> +			 * device which work with rxmode offloads API requires
>>>>>> +			 * a re-configuration in order to apply the new offloads
>>>>>> +			 * configuration.
>>>>>> +			 */
>>>>>> +			dev->data->dev_conf.rxmode = rxmode;
>>>>>> +			ret = rte_eth_dev_configure(port_id,
>>>>>> +					dev->data->nb_rx_queues,
>>>>>> +					dev->data->nb_tx_queues,
>>>>>> +					&dev->data->dev_conf);
>>>>>
>>>>> Hmm, and why we would need to reconfigure our device in the middle
>>>>> of rx queue setup?
>>>>
>>>> The reason is the old Rx offloads API is configured on device configure.
>>>> This if section is for applications which already moved to the new
>>>> offload API however the underlying PMD still uses the old one.
>>>
>>> Isn't it risky to re-run configure here?
>>> We could also declare this case as an error.
>>>
>>> I think applications which have migrated to the new API, could use the
>>> convert functions themselves before calling configure to support not
>>> migrated PMDs.
>>> The cons of my solution are:
>>> - discourage apps to migrate before all PMDs have migrated
>>> - expose a temporary function to convert API I propose it anyway because
>>> there is always someone to like bad ideas ;)
>>
>> Yes. I tried to make it as simple as possible for application to move to the new API.
>> Defining it as error flow, will enforce the application to check the PMD offload mode and branch accordingly. The conversion functions are a good helpers, yet the code remains complex due to the different cases with the different PMDs.
>>
>> Considering the re-configuration is risky, and without other ideas I will need to fall back to the error flow case.
>> Are we OK with that?
> 
> I think we can take the risk of keeping this call to
> rte_eth_dev_configure() in the middle of rte_eth_rx_queue_setup().
> In theory it should be acceptable.
> If we merge it soon, it can be better tested with every drivers.

I doubt about taking that risk. Some driver does HW configuration via
configure() and combination of start/stop, setup_queue and configure can
be complex.

I am for generating error for this case.

Generating error also can be good motivation for PMDs to adapt new method.
  
Shahaf Shuler Aug. 30, 2017, 6:30 a.m. UTC | #10
Tuesday, August 29, 2017 3:55 PM, Ferruh Yigit:
> >> Considering the re-configuration is risky, and without other ideas I will

> need to fall back to the error flow case.

> >> Are we OK with that?

> >

> > I think we can take the risk of keeping this call to

> > rte_eth_dev_configure() in the middle of rte_eth_rx_queue_setup().

> > In theory it should be acceptable.

> > If we merge it soon, it can be better tested with every drivers.

> 

> I doubt about taking that risk. Some driver does HW configuration via

> configure() and combination of start/stop, setup_queue and configure can

> be complex.

> 

> I am for generating error for this case.

> 

> Generating error also can be good motivation for PMDs to adapt new

> method.


Adding Ananyev suggestion from other thread:
For tx_prepare() work, we used the following approach:
1. submitted patch with changes in rte_ethdev and PMDs we  are familiar with (Intel ones).
    For other PMDs - patch contained just minimal changes to make it build cleanly.
2. Asked other PMD maintainers to review rte_ethdev changes and provide a proper patch
    for the PMD they own.

So I am OK with both suggestions. Meaning:
1. Define the case were application use the new offloads API with PMD which supports the old one as an error.
2. apply patches to ethdev with the above behavior.

Just to emphasize, it means that PMDs which won't move to the new API by the end of 17.11 will not be able to run with any of the examples and application on DPDK tree (and also with other applications which moved to the new API), as I plan to submit patches which convert them all to the new API.

Any objection to this approach?
  
Ferruh Yigit Aug. 30, 2017, 7:50 a.m. UTC | #11
On 8/30/2017 7:30 AM, Shahaf Shuler wrote:
> Tuesday, August 29, 2017 3:55 PM, Ferruh Yigit:
>>>> Considering the re-configuration is risky, and without other ideas I will
>> need to fall back to the error flow case.
>>>> Are we OK with that?
>>>
>>> I think we can take the risk of keeping this call to
>>> rte_eth_dev_configure() in the middle of rte_eth_rx_queue_setup().
>>> In theory it should be acceptable.
>>> If we merge it soon, it can be better tested with every drivers.
>>
>> I doubt about taking that risk. Some driver does HW configuration via
>> configure() and combination of start/stop, setup_queue and configure can
>> be complex.
>>
>> I am for generating error for this case.
>>
>> Generating error also can be good motivation for PMDs to adapt new
>> method.
> 
> Adding Ananyev suggestion from other thread:
> For tx_prepare() work, we used the following approach:
> 1. submitted patch with changes in rte_ethdev and PMDs we  are familiar with (Intel ones).
>     For other PMDs - patch contained just minimal changes to make it build cleanly.
> 2. Asked other PMD maintainers to review rte_ethdev changes and provide a proper patch
>     for the PMD they own.

tx_prepare() is a little different, since it was not clear if all PMDs
needs updating that is why asked to PMD owners, and the ones requires
updating already has been updated with ethdev patch. Here we know all
PMDs need updating, and they need proper time in advance.

> 
> So I am OK with both suggestions. Meaning:
> 1. Define the case were application use the new offloads API with PMD which supports the old one as an error.
> 2. apply patches to ethdev with the above behavior.
> 
> Just to emphasize, it means that PMDs which won't move to the new API by the end of 17.11 will not be able to run with any of the examples and application on DPDK tree (and also with other applications which moved to the new API), as I plan to submit patches which convert them all to the new API.

I think it is good idea to update samples/apps to new method, but this
can be short notice for PMD owners.

Can we wait one more release to update samples/apps, to give time for
PMDs to be updated, since old applications will work with new PMDs
(thanks to your helpers), I believe this won't be a problem.

> 
> Any objection to this approach? 
> 
>
  
Ananyev, Konstantin Aug. 30, 2017, 10:16 a.m. UTC | #12
Hi Ferruh,

> -----Original Message-----

> From: Yigit, Ferruh

> Sent: Wednesday, August 30, 2017 8:51 AM

> To: Shahaf Shuler <shahafs@mellanox.com>; Thomas Monjalon <thomas@monjalon.net>; Ananyev, Konstantin

> <konstantin.ananyev@intel.com>

> Cc: dev@dpdk.org

> Subject: Re: [dpdk-dev] [RFC PATCH 4/4] ethdev: add helpers to move to the new offloads API

> 

> On 8/30/2017 7:30 AM, Shahaf Shuler wrote:

> > Tuesday, August 29, 2017 3:55 PM, Ferruh Yigit:

> >>>> Considering the re-configuration is risky, and without other ideas I will

> >> need to fall back to the error flow case.

> >>>> Are we OK with that?

> >>>

> >>> I think we can take the risk of keeping this call to

> >>> rte_eth_dev_configure() in the middle of rte_eth_rx_queue_setup().

> >>> In theory it should be acceptable.

> >>> If we merge it soon, it can be better tested with every drivers.

> >>

> >> I doubt about taking that risk. Some driver does HW configuration via

> >> configure() and combination of start/stop, setup_queue and configure can

> >> be complex.

> >>

> >> I am for generating error for this case.

> >>

> >> Generating error also can be good motivation for PMDs to adapt new

> >> method.

> >

> > Adding Ananyev suggestion from other thread:

> > For tx_prepare() work, we used the following approach:

> > 1. submitted patch with changes in rte_ethdev and PMDs we  are familiar with (Intel ones).

> >     For other PMDs - patch contained just minimal changes to make it build cleanly.

> > 2. Asked other PMD maintainers to review rte_ethdev changes and provide a proper patch

> >     for the PMD they own.

> 

> tx_prepare() is a little different, since it was not clear if all PMDs

> needs updating that is why asked to PMD owners, and the ones requires

> updating already has been updated with ethdev patch. Here we know all

> PMDs need updating, and they need proper time in advance.

> 

> >

> > So I am OK with both suggestions. Meaning:

> > 1. Define the case were application use the new offloads API with PMD which supports the old one as an error.

> > 2. apply patches to ethdev with the above behavior.

> >

> > Just to emphasize, it means that PMDs which won't move to the new API by the end of 17.11 will not be able to run with any of the

> examples and application on DPDK tree (and also with other applications which moved to the new API), as I plan to submit patches which

> convert them all to the new API.

> 

> I think it is good idea to update samples/apps to new method, but this

> can be short notice for PMD owners.

> 

> Can we wait one more release to update samples/apps, to give time for

> PMDs to be updated, since old applications will work with new PMDs

> (thanks to your helpers), I believe this won't be a problem.


I am not sure what is your suggestion here?
Support both flavors of PMD API for 17.11? 
Konstantin

> 

> >

> > Any objection to this approach?

> >

> >
  
Ferruh Yigit Aug. 30, 2017, 12:42 p.m. UTC | #13
On 8/30/2017 11:16 AM, Ananyev, Konstantin wrote:
> Hi Ferruh,
> 
>> -----Original Message-----
>> From: Yigit, Ferruh
>> Sent: Wednesday, August 30, 2017 8:51 AM
>> To: Shahaf Shuler <shahafs@mellanox.com>; Thomas Monjalon <thomas@monjalon.net>; Ananyev, Konstantin
>> <konstantin.ananyev@intel.com>
>> Cc: dev@dpdk.org
>> Subject: Re: [dpdk-dev] [RFC PATCH 4/4] ethdev: add helpers to move to the new offloads API
>>
>> On 8/30/2017 7:30 AM, Shahaf Shuler wrote:
>>> Tuesday, August 29, 2017 3:55 PM, Ferruh Yigit:
>>>>>> Considering the re-configuration is risky, and without other ideas I will
>>>> need to fall back to the error flow case.
>>>>>> Are we OK with that?
>>>>>
>>>>> I think we can take the risk of keeping this call to
>>>>> rte_eth_dev_configure() in the middle of rte_eth_rx_queue_setup().
>>>>> In theory it should be acceptable.
>>>>> If we merge it soon, it can be better tested with every drivers.
>>>>
>>>> I doubt about taking that risk. Some driver does HW configuration via
>>>> configure() and combination of start/stop, setup_queue and configure can
>>>> be complex.
>>>>
>>>> I am for generating error for this case.
>>>>
>>>> Generating error also can be good motivation for PMDs to adapt new
>>>> method.
>>>
>>> Adding Ananyev suggestion from other thread:
>>> For tx_prepare() work, we used the following approach:
>>> 1. submitted patch with changes in rte_ethdev and PMDs we  are familiar with (Intel ones).
>>>     For other PMDs - patch contained just minimal changes to make it build cleanly.
>>> 2. Asked other PMD maintainers to review rte_ethdev changes and provide a proper patch
>>>     for the PMD they own.
>>
>> tx_prepare() is a little different, since it was not clear if all PMDs
>> needs updating that is why asked to PMD owners, and the ones requires
>> updating already has been updated with ethdev patch. Here we know all
>> PMDs need updating, and they need proper time in advance.
>>
>>>
>>> So I am OK with both suggestions. Meaning:
>>> 1. Define the case were application use the new offloads API with PMD which supports the old one as an error.
>>> 2. apply patches to ethdev with the above behavior.
>>>
>>> Just to emphasize, it means that PMDs which won't move to the new API by the end of 17.11 will not be able to run with any of the
>> examples and application on DPDK tree (and also with other applications which moved to the new API), as I plan to submit patches which
>> convert them all to the new API.
>>
>> I think it is good idea to update samples/apps to new method, but this
>> can be short notice for PMD owners.
>>
>> Can we wait one more release to update samples/apps, to give time for
>> PMDs to be updated, since old applications will work with new PMDs
>> (thanks to your helpers), I believe this won't be a problem.
> 
> I am not sure what is your suggestion here?
> Support both flavors of PMD API for 17.11? 

Support both with an exception, when application uses new method but PMD
uses old one, throw an error (because of above discussed technical issue).

This lets existing applications run without problem, and pushes PMDs to
adapt new method.

Your suggestion to have only new method and convert all PMDs to new
method is good, but I am not sure if this is realistic for this release,
since different PMDs have different pace for updates.

ethdev updates can be done in this release, with the PMDs that already
changed to new method. The sample/app modifications Shahaf mentioned can
be done in the beginning of the next release, and this gives time to
remaining PMDs until end of next release. What do you think?

> Konstantin
> 
>>
>>>
>>> Any objection to this approach?
>>>
>>>
>
  
Thomas Monjalon Aug. 30, 2017, 1:25 p.m. UTC | #14
30/08/2017 14:42, Ferruh Yigit:
> On 8/30/2017 11:16 AM, Ananyev, Konstantin wrote:
> > Hi Ferruh,
> > From: Yigit, Ferruh
> >> On 8/30/2017 7:30 AM, Shahaf Shuler wrote:
> >>> Tuesday, August 29, 2017 3:55 PM, Ferruh Yigit:
> >>>>>> Considering the re-configuration is risky, and without other ideas I will
> >>>> need to fall back to the error flow case.
> >>>>>> Are we OK with that?
> >>>>>
> >>>>> I think we can take the risk of keeping this call to
> >>>>> rte_eth_dev_configure() in the middle of rte_eth_rx_queue_setup().
> >>>>> In theory it should be acceptable.
> >>>>> If we merge it soon, it can be better tested with every drivers.
> >>>>
> >>>> I doubt about taking that risk. Some driver does HW configuration via
> >>>> configure() and combination of start/stop, setup_queue and configure can
> >>>> be complex.
> >>>>
> >>>> I am for generating error for this case.
> >>>>
> >>>> Generating error also can be good motivation for PMDs to adapt new
> >>>> method.
> >>>
> >>> Adding Ananyev suggestion from other thread:
> >>> For tx_prepare() work, we used the following approach:
> >>> 1. submitted patch with changes in rte_ethdev and PMDs we  are familiar with (Intel ones).
> >>>     For other PMDs - patch contained just minimal changes to make it build cleanly.
> >>> 2. Asked other PMD maintainers to review rte_ethdev changes and provide a proper patch
> >>>     for the PMD they own.
> >>
> >> tx_prepare() is a little different, since it was not clear if all PMDs
> >> needs updating that is why asked to PMD owners, and the ones requires
> >> updating already has been updated with ethdev patch. Here we know all
> >> PMDs need updating, and they need proper time in advance.
> >>
> >>>
> >>> So I am OK with both suggestions. Meaning:
> >>> 1. Define the case were application use the new offloads API with PMD which supports the old one as an error.
> >>> 2. apply patches to ethdev with the above behavior.
> >>>
> >>> Just to emphasize, it means that PMDs which won't move to the new API by the end of 17.11 will not be able to run with any of the
> >> examples and application on DPDK tree (and also with other applications which moved to the new API), as I plan to submit patches which
> >> convert them all to the new API.
> >>
> >> I think it is good idea to update samples/apps to new method, but this
> >> can be short notice for PMD owners.
> >>
> >> Can we wait one more release to update samples/apps, to give time for
> >> PMDs to be updated, since old applications will work with new PMDs
> >> (thanks to your helpers), I believe this won't be a problem.
> > 
> > I am not sure what is your suggestion here?
> > Support both flavors of PMD API for 17.11? 
> 
> Support both with an exception, when application uses new method but PMD
> uses old one, throw an error (because of above discussed technical issue).
> 
> This lets existing applications run without problem, and pushes PMDs to
> adapt new method.
> 
> Your suggestion to have only new method and convert all PMDs to new
> method is good, but I am not sure if this is realistic for this release,
> since different PMDs have different pace for updates.
> 
> ethdev updates can be done in this release, with the PMDs that already
> changed to new method. The sample/app modifications Shahaf mentioned can
> be done in the beginning of the next release, and this gives time to
> remaining PMDs until end of next release. What do you think?

I think it is a reasonnable migration path.
We will need you Ferruh to make sure all drivers will be converted soon,
hopefully in 18.02.
  
Ananyev, Konstantin Aug. 30, 2017, 2:15 p.m. UTC | #15
> -----Original Message-----

> From: Yigit, Ferruh

> Sent: Wednesday, August 30, 2017 1:43 PM

> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>; Shahaf Shuler <shahafs@mellanox.com>; Thomas Monjalon

> <thomas@monjalon.net>

> Cc: dev@dpdk.org

> Subject: Re: [dpdk-dev] [RFC PATCH 4/4] ethdev: add helpers to move to the new offloads API

> 

> On 8/30/2017 11:16 AM, Ananyev, Konstantin wrote:

> > Hi Ferruh,

> >

> >> -----Original Message-----

> >> From: Yigit, Ferruh

> >> Sent: Wednesday, August 30, 2017 8:51 AM

> >> To: Shahaf Shuler <shahafs@mellanox.com>; Thomas Monjalon <thomas@monjalon.net>; Ananyev, Konstantin

> >> <konstantin.ananyev@intel.com>

> >> Cc: dev@dpdk.org

> >> Subject: Re: [dpdk-dev] [RFC PATCH 4/4] ethdev: add helpers to move to the new offloads API

> >>

> >> On 8/30/2017 7:30 AM, Shahaf Shuler wrote:

> >>> Tuesday, August 29, 2017 3:55 PM, Ferruh Yigit:

> >>>>>> Considering the re-configuration is risky, and without other ideas I will

> >>>> need to fall back to the error flow case.

> >>>>>> Are we OK with that?

> >>>>>

> >>>>> I think we can take the risk of keeping this call to

> >>>>> rte_eth_dev_configure() in the middle of rte_eth_rx_queue_setup().

> >>>>> In theory it should be acceptable.

> >>>>> If we merge it soon, it can be better tested with every drivers.

> >>>>

> >>>> I doubt about taking that risk. Some driver does HW configuration via

> >>>> configure() and combination of start/stop, setup_queue and configure can

> >>>> be complex.

> >>>>

> >>>> I am for generating error for this case.

> >>>>

> >>>> Generating error also can be good motivation for PMDs to adapt new

> >>>> method.

> >>>

> >>> Adding Ananyev suggestion from other thread:

> >>> For tx_prepare() work, we used the following approach:

> >>> 1. submitted patch with changes in rte_ethdev and PMDs we  are familiar with (Intel ones).

> >>>     For other PMDs - patch contained just minimal changes to make it build cleanly.

> >>> 2. Asked other PMD maintainers to review rte_ethdev changes and provide a proper patch

> >>>     for the PMD they own.

> >>

> >> tx_prepare() is a little different, since it was not clear if all PMDs

> >> needs updating that is why asked to PMD owners, and the ones requires

> >> updating already has been updated with ethdev patch. Here we know all

> >> PMDs need updating, and they need proper time in advance.

> >>

> >>>

> >>> So I am OK with both suggestions. Meaning:

> >>> 1. Define the case were application use the new offloads API with PMD which supports the old one as an error.

> >>> 2. apply patches to ethdev with the above behavior.

> >>>

> >>> Just to emphasize, it means that PMDs which won't move to the new API by the end of 17.11 will not be able to run with any of the

> >> examples and application on DPDK tree (and also with other applications which moved to the new API), as I plan to submit patches which

> >> convert them all to the new API.

> >>

> >> I think it is good idea to update samples/apps to new method, but this

> >> can be short notice for PMD owners.

> >>

> >> Can we wait one more release to update samples/apps, to give time for

> >> PMDs to be updated, since old applications will work with new PMDs

> >> (thanks to your helpers), I believe this won't be a problem.

> >

> > I am not sure what is your suggestion here?

> > Support both flavors of PMD API for 17.11?

> 

> Support both with an exception, when application uses new method but PMD

> uses old one, throw an error (because of above discussed technical issue).

> 

> This lets existing applications run without problem, and pushes PMDs to

> adapt new method.

> 

> Your suggestion to have only new method and convert all PMDs to new

> method is good, but I am not sure if this is realistic for this release,

> since different PMDs have different pace for updates.

> 

> ethdev updates can be done in this release, with the PMDs that already

> changed to new method. The sample/app modifications Shahaf mentioned can

> be done in the beginning of the next release, and this gives time to

> remaining PMDs until end of next release. What do you think?


If it is just a timing concern - can we probably request PMD maintainers at least to:
- review the proposed new API and object if they have any concerns
- provide an estimation - how long it would take to adopt a new one
let say in a week time?
Then we could make a final decision - do things in one go, or prolong the pain for 2 releases.
Konstantin

> 

> > Konstantin

> >

> >>

> >>>

> >>> Any objection to this approach?

> >>>

> >>>

> >
  

Patch

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index f73307e99..2b4a28c97 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -1003,6 +1003,63 @@  rte_eth_dev_close(uint8_t port_id)
 	dev->data->tx_queues = NULL;
 }
 
+/**
+ * A copy function from rxmode offloads API to rte_eth_rxq_conf
+ * offloads API, to enable PMDs to support only one of the APIs.
+ */
+static void
+rte_eth_copy_rxmode_offloads(struct rte_eth_rxmode *rxmode,
+			     struct rte_eth_rxq_conf *rxq_conf)
+{
+	if (rxmode->header_split == 1)
+		rxq_conf->offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
+	if (rxmode->hw_ip_checksum == 1)
+		rxq_conf->offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+	if (rxmode->hw_vlan_filter == 1)
+		rxq_conf->offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	if (rxmode->hw_vlan_strip == 1)
+		rxq_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+	if (rxmode->hw_vlan_extend == 1)
+		rxq_conf->offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+	if (rxmode->jumbo_frame == 1)
+		rxq_conf->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+	if (rxmode->hw_strip_crc == 1)
+		rxq_conf->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
+	if (rxmode->enable_scatter == 1)
+		rxq_conf->offloads |= DEV_RX_OFFLOAD_SCATTER;
+	if (rxmode->enable_lro == 1)
+		rxq_conf->offloads |= DEV_RX_OFFLOAD_LRO;
+}
+
+/**
+ * A copy function between rte_eth_rxq_conf offloads API to rxmode
+ * offloads API, to enable application to be agnostic to the PMD supported
+ * offload API.
+ */
+static void
+rte_eth_copy_rxq_offloads(struct rte_eth_rxmode *rxmode,
+			  struct rte_eth_rxq_conf *rxq_conf)
+{
+	if (rxq_conf->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
+		rxmode->header_split = 1;
+	if (rxq_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+		rxmode->hw_ip_checksum = 1;
+	if (rxq_conf->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+		rxmode->hw_vlan_filter = 1;
+	if (rxq_conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		rxmode->hw_vlan_strip = 1;
+	if (rxq_conf->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+		rxmode->hw_vlan_extend = 1;
+	if (rxq_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+		rxmode->jumbo_frame = 1;
+	if (rxq_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
+		rxmode->hw_strip_crc = 1;
+	if (rxq_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+		rxmode->enable_scatter = 1;
+	if (rxq_conf->offloads & DEV_RX_OFFLOAD_LRO)
+		rxmode->enable_lro = 1;
+}
+
 int
 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
 		       uint16_t nb_rx_desc, unsigned int socket_id,
@@ -1083,6 +1140,37 @@  rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
 	if (rx_conf == NULL)
 		rx_conf = &dev_info.default_rxconf;
 
+	if ((dev->data->dev_flags & RTE_ETH_DEV_RXQ_OFFLOAD) &&
+	    (dev->data->dev_conf.rxmode.ignore == 0)) {
+		rte_eth_copy_rxmode_offloads(&dev->data->dev_conf.rxmode,
+					     rx_conf);
+	} else if ((!(dev->data->dev_flags & RTE_ETH_DEV_RXQ_OFFLOAD)) &&
+		   (dev->data->dev_conf.rxmode.ignore == 1)) {
+		int ret;
+		struct rte_eth_rxmode rxmode;
+
+		rte_eth_copy_rxq_offloads(&rxmode, rx_conf);
+		if (memcmp(&rxmode, &dev->data->dev_conf.rxmode,
+			   sizeof(rxmode))) {
+			/*
+			 * device which work with rxmode offloads API requires
+			 * a re-configuration in order to apply the new offloads
+			 * configuration.
+			 */
+			dev->data->dev_conf.rxmode = rxmode;
+			ret = rte_eth_dev_configure(port_id,
+					dev->data->nb_rx_queues,
+					dev->data->nb_tx_queues,
+					&dev->data->dev_conf);
+			if (ret < 0) {
+				RTE_PMD_DEBUG_TRACE(
+					"unable to re-configure port %d "
+					"in order to apply rxq offloads "
+					"configuration\n", port_id);
+			}
+		}
+	}
+
 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
 					      socket_id, rx_conf, mp);
 	if (!ret) {
@@ -1094,6 +1182,51 @@  rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
 	return ret;
 }
 
+/**
+ * A copy function from txq_flags to rte_eth_txq_conf offloads API,
+ * to enable PMDs to support only one of the APIs.
+ */
+static void
+rte_eth_copy_txq_flags(struct rte_eth_txq_conf *txq_conf)
+{
+	uint32_t txq_flags = txq_conf->txq_flags
+	uint64_t *offloads = &txq_conf->offloads;
+
+	if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
+		*offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+	if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
+		*offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
+	if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
+		*offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
+	if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
+		*offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
+	if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
+		*offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
+}
+
+/**
+ * A copy function between rte_eth_txq_conf offloads API to txq_flags
+ * offloads API, to enable application to be agnostic to the PMD supported
+ * API.
+ */
+static void
+rte_eth_copy_txq_offloads(struct rte_eth_txq_conf *txq_conf)
+{
+	uint32_t *txq_flags = &txq_conf->txq_flags
+	uint64_t offloads = txq_conf->offloads;
+
+	if (!(offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
+		*txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
+	if (!(offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
+		*txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
+	if (!(offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
+		*txq_flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
+	if (!(offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
+		*txq_flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
+	if (!(offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
+		*txq_flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
+}
+
 int
 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
 		       uint16_t nb_tx_desc, unsigned int socket_id,
@@ -1145,6 +1278,13 @@  rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
 	if (tx_conf == NULL)
 		tx_conf = &dev_info.default_txconf;
 
+	if ((dev->data->dev_flags & RTE_ETH_DEV_TXQ_OFFLOAD) &&
+	    (!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)))
+		rte_eth_copy_txq_flags(tx_conf);
+	else if (!(dev->data->dev_flags & RTE_ETH_DEV_TXQ_OFFLOAD) &&
+		   (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE))
+		rte_eth_copy_txq_offloads(tx_conf);
+
 	return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
 					       socket_id, tx_conf);
 }