[dpdk-dev,v2,3/4] net/ixgbe: convert to new Rx offloads API
Checks
Commit Message
Ethdev Rx offloads API has changed since:
commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.
Signed-off-by: Wei Dai <wei.dai@intel.com>
---
drivers/net/ixgbe/ixgbe_ethdev.c | 93 +++++++++--------
drivers/net/ixgbe/ixgbe_ipsec.c | 8 +-
drivers/net/ixgbe/ixgbe_rxtx.c | 163 ++++++++++++++++++++++++++----
drivers/net/ixgbe/ixgbe_rxtx.h | 3 +
drivers/net/ixgbe/ixgbe_rxtx_vec_common.h | 2 +-
drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c | 2 +-
6 files changed, 205 insertions(+), 66 deletions(-)
Comments
Hi Wei,
> -----Original Message-----
> From: Dai, Wei
> Sent: Wednesday, March 7, 2018 1:06 PM
> To: Lu, Wenzhuo <wenzhuo.lu@intel.com>; Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Cc: dev@dpdk.org; Dai, Wei <wei.dai@intel.com>
> Subject: [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API
>
> Ethdev Rx offloads API has changed since:
> commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
> This commit support the new Rx offloads API.
>
> Signed-off-by: Wei Dai <wei.dai@intel.com>
> ---
> drivers/net/ixgbe/ixgbe_ethdev.c | 93 +++++++++--------
> drivers/net/ixgbe/ixgbe_ipsec.c | 8 +-
> drivers/net/ixgbe/ixgbe_rxtx.c | 163 ++++++++++++++++++++++++++----
> drivers/net/ixgbe/ixgbe_rxtx.h | 3 +
> drivers/net/ixgbe/ixgbe_rxtx_vec_common.h | 2 +-
> drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c | 2 +-
> 6 files changed, 205 insertions(+), 66 deletions(-)
>
> diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
> index 8bb67ba..9437f05 100644
> --- a/drivers/net/ixgbe/ixgbe_ethdev.c
> +++ b/drivers/net/ixgbe/ixgbe_ethdev.c
> @@ -2105,19 +2105,22 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
> static int
> ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
> {
> + struct rte_eth_rxmode *rxmode;
> + rxmode = &dev->data->dev_conf.rxmode;
> +
> if (mask & ETH_VLAN_STRIP_MASK) {
> ixgbe_vlan_hw_strip_config(dev);
> }
>
> if (mask & ETH_VLAN_FILTER_MASK) {
> - if (dev->data->dev_conf.rxmode.hw_vlan_filter)
> + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
> ixgbe_vlan_hw_filter_enable(dev);
> else
> ixgbe_vlan_hw_filter_disable(dev);
> }
>
> if (mask & ETH_VLAN_EXTEND_MASK) {
> - if (dev->data->dev_conf.rxmode.hw_vlan_extend)
> + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
> ixgbe_vlan_hw_extend_enable(dev);
> else
> ixgbe_vlan_hw_extend_disable(dev);
> @@ -2332,6 +2335,8 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
> IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
> struct ixgbe_adapter *adapter =
> (struct ixgbe_adapter *)dev->data->dev_private;
> + struct rte_eth_dev_info dev_info;
> + uint64_t rx_offloads;
> int ret;
>
> PMD_INIT_FUNC_TRACE();
> @@ -2343,6 +2348,15 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
> return ret;
> }
>
> + ixgbe_dev_info_get(dev, &dev_info);
> + rx_offloads = dev->data->dev_conf.rxmode.offloads;
> + if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
> + PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
> + "requested 0x%" PRIx64 " supported 0x%" PRIx64,
> + rx_offloads, dev_info.rx_offload_capa);
> + return -ENOTSUP;
> + }
> +
> /* set flag to update link status after init */
> intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
>
> @@ -3632,30 +3646,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
> else
> dev_info->max_vmdq_pools = ETH_64_POOLS;
> dev_info->vmdq_queue_num = dev_info->max_rx_queues;
> - dev_info->rx_offload_capa =
> - DEV_RX_OFFLOAD_VLAN_STRIP |
> - DEV_RX_OFFLOAD_IPV4_CKSUM |
> - DEV_RX_OFFLOAD_UDP_CKSUM |
> - DEV_RX_OFFLOAD_TCP_CKSUM |
> - DEV_RX_OFFLOAD_CRC_STRIP;
> -
> - /*
> - * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
> - * mode.
> - */
> - if ((hw->mac.type == ixgbe_mac_82599EB ||
> - hw->mac.type == ixgbe_mac_X540) &&
> - !RTE_ETH_DEV_SRIOV(dev).active)
> - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
> -
> - if (hw->mac.type == ixgbe_mac_82599EB ||
> - hw->mac.type == ixgbe_mac_X540)
> - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
> -
> - if (hw->mac.type == ixgbe_mac_X550 ||
> - hw->mac.type == ixgbe_mac_X550EM_x ||
> - hw->mac.type == ixgbe_mac_X550EM_a)
> - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
> + dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
> + dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
> + dev_info->rx_queue_offload_capa);
>
> dev_info->tx_offload_capa =
> DEV_TX_OFFLOAD_VLAN_INSERT |
> @@ -3675,10 +3668,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
> dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
>
> #ifdef RTE_LIBRTE_SECURITY
> - if (dev->security_ctx) {
> - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
> + if (dev->security_ctx)
> dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
> - }
> #endif
>
> dev_info->default_rxconf = (struct rte_eth_rxconf) {
> @@ -3689,6 +3680,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
> },
> .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
> .rx_drop_en = 0,
> + .offloads = 0,
> };
>
> dev_info->default_txconf = (struct rte_eth_txconf) {
> @@ -3781,11 +3773,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
> dev_info->max_vmdq_pools = ETH_16_POOLS;
> else
> dev_info->max_vmdq_pools = ETH_64_POOLS;
> - dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
> - DEV_RX_OFFLOAD_IPV4_CKSUM |
> - DEV_RX_OFFLOAD_UDP_CKSUM |
> - DEV_RX_OFFLOAD_TCP_CKSUM |
> - DEV_RX_OFFLOAD_CRC_STRIP;
> + dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
> + dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
> + dev_info->rx_queue_offload_capa);
> dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
> DEV_TX_OFFLOAD_IPV4_CKSUM |
> DEV_TX_OFFLOAD_UDP_CKSUM |
> @@ -3801,6 +3791,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
> },
> .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
> .rx_drop_en = 0,
> + .offloads = 0,
> };
>
> dev_info->default_txconf = (struct rte_eth_txconf) {
> @@ -4894,10 +4885,12 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
>
> /* switch to jumbo mode if needed */
> if (frame_size > ETHER_MAX_LEN) {
> - dev->data->dev_conf.rxmode.jumbo_frame = 1;
> + dev->data->dev_conf.rxmode.offloads |=
> + DEV_RX_OFFLOAD_JUMBO_FRAME;
> hlreg0 |= IXGBE_HLREG0_JUMBOEN;
> } else {
> - dev->data->dev_conf.rxmode.jumbo_frame = 0;
> + dev->data->dev_conf.rxmode.offloads &=
> + ~DEV_RX_OFFLOAD_JUMBO_FRAME;
> hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
> }
> IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
> @@ -4946,23 +4939,34 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
> struct rte_eth_conf *conf = &dev->data->dev_conf;
> struct ixgbe_adapter *adapter =
> (struct ixgbe_adapter *)dev->data->dev_private;
> + struct rte_eth_dev_info dev_info;
> + uint64_t rx_offloads;
>
> PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
> dev->data->port_id);
>
> + ixgbevf_dev_info_get(dev, &dev_info);
> + rx_offloads = dev->data->dev_conf.rxmode.offloads;
> + if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
> + PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
> + "requested 0x%" PRIx64 " supported 0x%" PRIx64,
> + rx_offloads, dev_info.rx_offload_capa);
> + return -ENOTSUP;
> + }
> +
> /*
> * VF has no ability to enable/disable HW CRC
> * Keep the persistent behavior the same as Host PF
> */
> #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
> - if (!conf->rxmode.hw_strip_crc) {
> + if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
> PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
> - conf->rxmode.hw_strip_crc = 1;
> + conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
> }
> #else
> - if (conf->rxmode.hw_strip_crc) {
> + if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
> PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
> - conf->rxmode.hw_strip_crc = 0;
> + conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
> }
> #endif
>
> @@ -5850,6 +5854,7 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
> uint16_t queue_idx, uint16_t tx_rate)
> {
> struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> + struct rte_eth_rxmode *rxmode;
> uint32_t rf_dec, rf_int;
> uint32_t bcnrc_val;
> uint16_t link_speed = dev->data->dev_link.link_speed;
> @@ -5871,14 +5876,14 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
> bcnrc_val = 0;
> }
>
> + rxmode = &dev->data->dev_conf.rxmode;
> /*
> * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
> * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
> * set as 0x4.
> */
> - if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
> - (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
> - IXGBE_MAX_JUMBO_FRAME_SIZE))
> + if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
> + (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
> IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
> IXGBE_MMW_SIZE_JUMBO_FRAME);
> else
> @@ -6225,7 +6230,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
> /* refuse mtu that requires the support of scattered packets when this
> * feature has not been enabled before.
> */
> - if (!rx_conf->enable_scatter &&
> + if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) &&
> (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
> dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
> return -EINVAL;
> diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
> index 176ec0f..29e4728 100644
> --- a/drivers/net/ixgbe/ixgbe_ipsec.c
> +++ b/drivers/net/ixgbe/ixgbe_ipsec.c
> @@ -598,13 +598,15 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
> {
> struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> uint32_t reg;
> + uint64_t rx_offloads;
>
> + rx_offloads = dev->data->dev_conf.rxmode.offloads;
> /* sanity checks */
> - if (dev->data->dev_conf.rxmode.enable_lro) {
> + if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
> PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
> return -1;
> }
> - if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
> + if (!(rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
> PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
> return -1;
> }
> @@ -624,7 +626,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
> reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
> IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
>
> - if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
> + if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
> IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
> reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
> if (reg != 0) {
> diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
> index 5c45eb4..a5d4822 100644
> --- a/drivers/net/ixgbe/ixgbe_rxtx.c
> +++ b/drivers/net/ixgbe/ixgbe_rxtx.c
> @@ -2769,6 +2769,98 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
> #endif
> }
>
> +static int
> +ixgbe_is_vf(struct rte_eth_dev *dev)
> +{
> + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +
> + switch (hw->mac.type) {
> + case ixgbe_mac_82599_vf:
> + case ixgbe_mac_X540_vf:
> + case ixgbe_mac_X550_vf:
> + case ixgbe_mac_X550EM_x_vf:
> + case ixgbe_mac_X550EM_a_vf:
> + return 1;
> + default:
> + return 0;
> + }
> +}
> +
> +uint64_t
> +ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
> +{
> + uint64_t offloads;
> + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +
> + offloads = DEV_RX_OFFLOAD_HEADER_SPLIT;
As I can see I ixgbe all header_split code is enabled only if RTE_HEADER_SPLIT_ENABLE is on.
It is off by default and I doubt anyone really using it these days.
So I think the best thing would be not to advertise DEV_RX_OFFLOAD_HEADER_SPLIT for ixgbe at all,
and probably remove related code.
If you'd prefer to keep it, then at least we should set that capability only
at #ifdef RTE_HEADER_SPLIT_ENABLE.
Another thing - it should be per port, not per queue.
Thought I think better is just to remove it completely.
> + if (hw->mac.type != ixgbe_mac_82598EB)
> + offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
> +
> + return offloads;
> +}
> +
> +uint64_t
> +ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
> +{
> + uint64_t offloads;
> + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +
> + offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
> + DEV_RX_OFFLOAD_UDP_CKSUM |
> + DEV_RX_OFFLOAD_TCP_CKSUM |
> + DEV_RX_OFFLOAD_CRC_STRIP |
> + DEV_RX_OFFLOAD_JUMBO_FRAME |
> + DEV_RX_OFFLOAD_SCATTER;
> +
> + if (hw->mac.type == ixgbe_mac_82598EB)
> + offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
> +
> + if (ixgbe_is_vf(dev) == 0)
> + offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
> + DEV_RX_OFFLOAD_VLAN_EXTEND);
> +
> + /*
> + * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
> + * mode.
> + */
> + if ((hw->mac.type == ixgbe_mac_82599EB ||
> + hw->mac.type == ixgbe_mac_X540) &&
> + !RTE_ETH_DEV_SRIOV(dev).active)
> + offloads |= DEV_RX_OFFLOAD_TCP_LRO;
> +
> + if (hw->mac.type == ixgbe_mac_82599EB ||
> + hw->mac.type == ixgbe_mac_X540)
> + offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
> +
> + if (hw->mac.type == ixgbe_mac_X550 ||
> + hw->mac.type == ixgbe_mac_X550EM_x ||
> + hw->mac.type == ixgbe_mac_X550EM_a)
> + offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
> +
> +#ifdef RTE_LIBRTE_SECURITY
I don't think you need that ifdef here.
> + if (dev->security_ctx)
> + offloads |= DEV_RX_OFFLOAD_SECURITY;
> +#endif
> +
> + return offloads;
> +}
> +
> +static int
> +ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
> +{
> + uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
> + uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev);
> + uint64_t port_supported = ixgbe_get_rx_port_offloads(dev);
> +
> + if ((requested & (queue_supported | port_supported)) != requested)
> + return 0;
> +
> + if ((port_offloads ^ requested) & port_supported)
Could you explain a bit more what are you cheking here?
As I can see:
(port_offloads ^ requested) - that's a diff between already set and newly
requested offloads.
Then you check if that diff consists of supported by port offloads,
and if yes you return an error?
Konstantin
> + return 0;
> +
> + return 1;
> +}
> +
Hi, Konstantin
Thanks for your feedback.
> -----Original Message-----
> From: Ananyev, Konstantin
> Sent: Thursday, March 15, 2018 5:48 AM
> To: Dai, Wei <wei.dai@intel.com>; Lu, Wenzhuo <wenzhuo.lu@intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API
>
> Hi Wei,
>
> > -----Original Message-----
> > From: Dai, Wei
> > Sent: Wednesday, March 7, 2018 1:06 PM
> > To: Lu, Wenzhuo <wenzhuo.lu@intel.com>; Ananyev, Konstantin
> > <konstantin.ananyev@intel.com>
> > Cc: dev@dpdk.org; Dai, Wei <wei.dai@intel.com>
> > Subject: [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API
> >
> > Ethdev Rx offloads API has changed since:
> > commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") This
> > commit support the new Rx offloads API.
> >
> > Signed-off-by: Wei Dai <wei.dai@intel.com>
> > ---
> > drivers/net/ixgbe/ixgbe_ethdev.c | 93 +++++++++--------
> > drivers/net/ixgbe/ixgbe_ipsec.c | 8 +-
> > drivers/net/ixgbe/ixgbe_rxtx.c | 163
> ++++++++++++++++++++++++++----
> > drivers/net/ixgbe/ixgbe_rxtx.h | 3 +
> > drivers/net/ixgbe/ixgbe_rxtx_vec_common.h | 2 +-
> > drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c | 2 +-
> > 6 files changed, 205 insertions(+), 66 deletions(-)
> >
> > +uint64_t
> > +ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev) {
> > + uint64_t offloads;
> > + struct ixgbe_hw *hw =
> > +IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> > +
> > + offloads = DEV_RX_OFFLOAD_HEADER_SPLIT;
>
> As I can see I ixgbe all header_split code is enabled only if
> RTE_HEADER_SPLIT_ENABLE is on.
> It is off by default and I doubt anyone really using it these days.
> So I think the best thing would be not to advertise
> DEV_RX_OFFLOAD_HEADER_SPLIT for ixgbe at all, and probably remove
> related code.
> If you'd prefer to keep it, then at least we should set that capability only at
> #ifdef RTE_HEADER_SPLIT_ENABLE.
> Another thing - it should be per port, not per queue.
> Thought I think better is just to remove it completely.
I will set this header splitting capability in #ifdef RTE_HEADER_SPLIT_ENABLE in my next patch set.
I think it is a per queue capability as it can be configured on the register IXGBE_SRRCTL of every Rx queue
In this code line: IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl); in ixgbe_dev_rx_init( ).
Same case is also in the code line: IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl); in ixgbevf_dev_rx_init( ).
> > +static int
> > +ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t
> > +requested) {
> > + uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
> > + uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev);
> > + uint64_t port_supported = ixgbe_get_rx_port_offloads(dev);
> > +
> > + if ((requested & (queue_supported | port_supported)) != requested)
> > + return 0;
> > +
> > + if ((port_offloads ^ requested) & port_supported)
>
> Could you explain a bit more what are you cheking here?
> As I can see:
> (port_offloads ^ requested) - that's a diff between already set and newly
> requested offloads.
> Then you check if that diff consists of supported by port offloads, and if yes
> you return an error?
> Konstantin
>
This function is similar to mlx4_check_rx_queue_offloads() in mlx4 driver.
As the git log message in the commit ce17eddefc20285bbfe575bdc07f42f0b20f34cb say
that a per port capability should has same setting (enabling or disabling) on both port
configuration via rte_eth_dev_configure( ) and queue configuration via rte_eth_rx_queue_setup( ).
This function check if this requirement is matched or not.
It also check offloading request is supported as a per port or a per queue capability or not.
If above checking is pass, it return 1 else return 0.
Hi Wei,
>
> Hi, Konstantin
> Thanks for your feedback.
>
> > -----Original Message-----
> > From: Ananyev, Konstantin
> > Sent: Thursday, March 15, 2018 5:48 AM
> > To: Dai, Wei <wei.dai@intel.com>; Lu, Wenzhuo <wenzhuo.lu@intel.com>
> > Cc: dev@dpdk.org
> > Subject: RE: [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API
> >
> > Hi Wei,
> >
> > > -----Original Message-----
> > > From: Dai, Wei
> > > Sent: Wednesday, March 7, 2018 1:06 PM
> > > To: Lu, Wenzhuo <wenzhuo.lu@intel.com>; Ananyev, Konstantin
> > > <konstantin.ananyev@intel.com>
> > > Cc: dev@dpdk.org; Dai, Wei <wei.dai@intel.com>
> > > Subject: [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API
> > >
> > > Ethdev Rx offloads API has changed since:
> > > commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") This
> > > commit support the new Rx offloads API.
> > >
> > > Signed-off-by: Wei Dai <wei.dai@intel.com>
> > > ---
> > > drivers/net/ixgbe/ixgbe_ethdev.c | 93 +++++++++--------
> > > drivers/net/ixgbe/ixgbe_ipsec.c | 8 +-
> > > drivers/net/ixgbe/ixgbe_rxtx.c | 163
> > ++++++++++++++++++++++++++----
> > > drivers/net/ixgbe/ixgbe_rxtx.h | 3 +
> > > drivers/net/ixgbe/ixgbe_rxtx_vec_common.h | 2 +-
> > > drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c | 2 +-
> > > 6 files changed, 205 insertions(+), 66 deletions(-)
> > >
> > > +uint64_t
> > > +ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev) {
> > > + uint64_t offloads;
> > > + struct ixgbe_hw *hw =
> > > +IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> > > +
> > > + offloads = DEV_RX_OFFLOAD_HEADER_SPLIT;
> >
> > As I can see I ixgbe all header_split code is enabled only if
> > RTE_HEADER_SPLIT_ENABLE is on.
> > It is off by default and I doubt anyone really using it these days.
> > So I think the best thing would be not to advertise
> > DEV_RX_OFFLOAD_HEADER_SPLIT for ixgbe at all, and probably remove
> > related code.
> > If you'd prefer to keep it, then at least we should set that capability only at
> > #ifdef RTE_HEADER_SPLIT_ENABLE.
> > Another thing - it should be per port, not per queue.
> > Thought I think better is just to remove it completely.
> I will set this header splitting capability in #ifdef RTE_HEADER_SPLIT_ENABLE in my next patch set.
> I think it is a per queue capability as it can be configured on the register IXGBE_SRRCTL of every Rx queue
> In this code line: IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl); in ixgbe_dev_rx_init( ).
> Same case is also in the code line: IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl); in ixgbevf_dev_rx_init( ).
Yes, HW can enable/disable it on a per queue basis.
Though it affects rx function selection, and as right now we have one rx function per device -
That's why it looks to me more like a per port offload.
Though I believe these days ixgbe PMD doesn't support it properly anyway
(we always set rxd.hdr_addr to zero) - so probably better to remove it at all.
>
> > > +static int
> > > +ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t
> > > +requested) {
> > > + uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
> > > + uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev);
> > > + uint64_t port_supported = ixgbe_get_rx_port_offloads(dev);
> > > +
> > > + if ((requested & (queue_supported | port_supported)) != requested)
> > > + return 0;
> > > +
> > > + if ((port_offloads ^ requested) & port_supported)
> >
> > Could you explain a bit more what are you cheking here?
> > As I can see:
> > (port_offloads ^ requested) - that's a diff between already set and newly
> > requested offloads.
> > Then you check if that diff consists of supported by port offloads, and if yes
> > you return an error?
> > Konstantin
> >
> This function is similar to mlx4_check_rx_queue_offloads() in mlx4 driver.
> As the git log message in the commit ce17eddefc20285bbfe575bdc07f42f0b20f34cb say
> that a per port capability should has same setting (enabling or disabling) on both port
> configuration via rte_eth_dev_configure( ) and queue configuration via rte_eth_rx_queue_setup( ).
> This function check if this requirement is matched or not.
> It also check offloading request is supported as a per port or a per queue capability or not.
> If above checking is pass, it return 1 else return 0.
Ok, let be more specific here.
Let say:
requested == DEV_RX_OFFLOAD_VLAN_STRIP;
port_offloads == DEV_RX_OFFLOAD_IPV4_CKSUM;
port_supported = (DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_SCATTER);
(port_offloads ^ requested) == DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_IPV4_CKSUM;
(port_offloads ^ requested) & port_supported == DEV_RX_OFFLOAD_IPV4_CKSUM;
And that function will return failure, while as I understand it shouldn't - requested queue offload is valid.
Konstantin
Hi, Konstantin
Thanks for your patient guidance!
> IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl); in
> ixgbe_dev_rx_init( ).
> > Same case is also in the code line: IXGBE_WRITE_REG(hw,
> IXGBE_VFSRRCTL(i), srrctl); in ixgbevf_dev_rx_init( ).
>
> Yes, HW can enable/disable it on a per queue basis.
> Though it affects rx function selection, and as right now we have one rx
> function per device - That's why it looks to me more like a per port offload.
> Though I believe these days ixgbe PMD doesn't support it properly anyway
> (we always set rxd.hdr_addr to zero) - so probably better to remove it at all.
>
Yes, rx function is related with offloading.
I'll remove this header split capability in my next patch set.
> >
> > > > +static int
> > > > +ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t
> > > > +requested) {
> > > > + uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
> > > > + uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev);
> > > > + uint64_t port_supported = ixgbe_get_rx_port_offloads(dev);
> > > > +
> > > > + if ((requested & (queue_supported | port_supported)) != requested)
> > > > + return 0;
> > > > +
> > > > + if ((port_offloads ^ requested) & port_supported)
> > >
> > > Could you explain a bit more what are you cheking here?
> > > As I can see:
> > > (port_offloads ^ requested) - that's a diff between already set and
> > > newly requested offloads.
> > > Then you check if that diff consists of supported by port offloads,
> > > and if yes you return an error?
> > > Konstantin
> > >
> > This function is similar to mlx4_check_rx_queue_offloads() in mlx4 driver.
> > As the git log message in the commit
> > ce17eddefc20285bbfe575bdc07f42f0b20f34cb say that a per port
> > capability should has same setting (enabling or disabling) on both port
> configuration via rte_eth_dev_configure( ) and queue configuration via
> rte_eth_rx_queue_setup( ).
> > This function check if this requirement is matched or not.
> > It also check offloading request is supported as a per port or a per queue
> capability or not.
> > If above checking is pass, it return 1 else return 0.
>
> Ok, let be more specific here.
> Let say:
> requested == DEV_RX_OFFLOAD_VLAN_STRIP;
> port_offloads == DEV_RX_OFFLOAD_IPV4_CKSUM; port_supported =
> (DEV_RX_OFFLOAD_IPV4_CKSUM |
> DEV_RX_OFFLOAD_UDP_CKSUM |
> DEV_RX_OFFLOAD_TCP_CKSUM |
> DEV_RX_OFFLOAD_CRC_STRIP |
> DEV_RX_OFFLOAD_JUMBO_FRAME |
> DEV_RX_OFFLOAD_SCATTER);
>
> (port_offloads ^ requested) == DEV_RX_OFFLOAD_VLAN_STRIP |
> DEV_RX_OFFLOAD_IPV4_CKSUM; (port_offloads ^ requested) &
> port_supported == DEV_RX_OFFLOAD_IPV4_CKSUM; And that function will
> return failure, while as I understand it shouldn't - requested queue offload is
> valid.
>
> Konstantin
I'd like to list the git message of commit ce17eddefc20285bbfe575bdc07f42f0b20f34cb which
has been submitted by Shahaf Shuler and already been accepted.
SHA-1: ce17eddefc20285bbfe575bdc07f42f0b20f34cb
* ethdev: introduce Rx queue offloads API
Introduce a new API to configure Rx offloads.
In the new API, offloads are divided into per-port and per-queue
offloads. The PMD reports capability for each of them.
Offloads are enabled using the existing DEV_RX_OFFLOAD_* flags.
To enable per-port offload, the offload should be set on both device
configuration and queue configuration. To enable per-queue offload, the
offloads can be set only on queue configuration.
Applications should set the ignore_offload_bitfield bit on rxmode
structure in order to move to the new API.
The old Rx offloads API is kept for the meanwhile, in order to enable a
smooth transition for PMDs and application to the new API.
Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
Reviewed-by: Andrew Rybchenko <arybchenko@solarflare.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
In your example, IPV4_CKSUM is a per port offloading, it is
Enabled in port_offloads to rte_eth_dev_configure(), and it
Should also be enabled in requested to rte_eth_rx_queue_setup( ).
So your example fails in this checking.
This function is very similar with priv_is_rx_queue_offloads_allowed( ) in /net/mlx5/mlx5_rxq.c
In the patch http://dpdk.org/dev/patchwork/patch/33386/ which has already been accepted.
@@ -2105,19 +2105,22 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
static int
ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
+ struct rte_eth_rxmode *rxmode;
+ rxmode = &dev->data->dev_conf.rxmode;
+
if (mask & ETH_VLAN_STRIP_MASK) {
ixgbe_vlan_hw_strip_config(dev);
}
if (mask & ETH_VLAN_FILTER_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
ixgbe_vlan_hw_filter_enable(dev);
else
ixgbe_vlan_hw_filter_disable(dev);
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
ixgbe_vlan_hw_extend_enable(dev);
else
ixgbe_vlan_hw_extend_disable(dev);
@@ -2332,6 +2335,8 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
+ struct rte_eth_dev_info dev_info;
+ uint64_t rx_offloads;
int ret;
PMD_INIT_FUNC_TRACE();
@@ -2343,6 +2348,15 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
return ret;
}
+ ixgbe_dev_info_get(dev, &dev_info);
+ rx_offloads = dev->data->dev_conf.rxmode.offloads;
+ if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
+ PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+ rx_offloads, dev_info.rx_offload_capa);
+ return -ENOTSUP;
+ }
+
/* set flag to update link status after init */
intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -3632,30 +3646,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
else
dev_info->max_vmdq_pools = ETH_64_POOLS;
dev_info->vmdq_queue_num = dev_info->max_rx_queues;
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
-
- /*
- * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
- * mode.
- */
- if ((hw->mac.type == ixgbe_mac_82599EB ||
- hw->mac.type == ixgbe_mac_X540) &&
- !RTE_ETH_DEV_SRIOV(dev).active)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
-
- if (hw->mac.type == ixgbe_mac_82599EB ||
- hw->mac.type == ixgbe_mac_X540)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
-
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x ||
- hw->mac.type == ixgbe_mac_X550EM_a)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+ dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
+ dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
+ dev_info->rx_queue_offload_capa);
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
@@ -3675,10 +3668,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
#ifdef RTE_LIBRTE_SECURITY
- if (dev->security_ctx) {
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+ if (dev->security_ctx)
dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
- }
#endif
dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -3689,6 +3680,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3781,11 +3773,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
dev_info->max_vmdq_pools = ETH_16_POOLS;
else
dev_info->max_vmdq_pools = ETH_64_POOLS;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
+ dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
+ dev_info->rx_queue_offload_capa);
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
@@ -3801,6 +3791,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
},
.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -4894,10 +4885,12 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
/* switch to jumbo mode if needed */
if (frame_size > ETHER_MAX_LEN) {
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
} else {
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
}
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
@@ -4946,23 +4939,34 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
struct rte_eth_conf *conf = &dev->data->dev_conf;
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
+ struct rte_eth_dev_info dev_info;
+ uint64_t rx_offloads;
PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
dev->data->port_id);
+ ixgbevf_dev_info_get(dev, &dev_info);
+ rx_offloads = dev->data->dev_conf.rxmode.offloads;
+ if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
+ PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+ rx_offloads, dev_info.rx_offload_capa);
+ return -ENOTSUP;
+ }
+
/*
* VF has no ability to enable/disable HW CRC
* Keep the persistent behavior the same as Host PF
*/
#ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
- if (!conf->rxmode.hw_strip_crc) {
+ if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
- conf->rxmode.hw_strip_crc = 1;
+ conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
#else
- if (conf->rxmode.hw_strip_crc) {
+ if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
- conf->rxmode.hw_strip_crc = 0;
+ conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
}
#endif
@@ -5850,6 +5854,7 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
uint16_t queue_idx, uint16_t tx_rate)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_rxmode *rxmode;
uint32_t rf_dec, rf_int;
uint32_t bcnrc_val;
uint16_t link_speed = dev->data->dev_link.link_speed;
@@ -5871,14 +5876,14 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
bcnrc_val = 0;
}
+ rxmode = &dev->data->dev_conf.rxmode;
/*
* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
* register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
* set as 0x4.
*/
- if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
- (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
- IXGBE_MAX_JUMBO_FRAME_SIZE))
+ if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
+ (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
IXGBE_MMW_SIZE_JUMBO_FRAME);
else
@@ -6225,7 +6230,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
/* refuse mtu that requires the support of scattered packets when this
* feature has not been enabled before.
*/
- if (!rx_conf->enable_scatter &&
+ if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) &&
(max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
return -EINVAL;
@@ -598,13 +598,15 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t reg;
+ uint64_t rx_offloads;
+ rx_offloads = dev->data->dev_conf.rxmode.offloads;
/* sanity checks */
- if (dev->data->dev_conf.rxmode.enable_lro) {
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
return -1;
}
- if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
+ if (!(rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
return -1;
}
@@ -624,7 +626,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+ if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
if (reg != 0) {
@@ -2769,6 +2769,98 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
#endif
}
+static int
+ixgbe_is_vf(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599_vf:
+ case ixgbe_mac_X540_vf:
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ case ixgbe_mac_X550EM_a_vf:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+uint64_t
+ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t offloads;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ offloads = DEV_RX_OFFLOAD_HEADER_SPLIT;
+ if (hw->mac.type != ixgbe_mac_82598EB)
+ offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+
+ return offloads;
+}
+
+uint64_t
+ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t offloads;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_SCATTER;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+
+ if (ixgbe_is_vf(dev) == 0)
+ offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_VLAN_EXTEND);
+
+ /*
+ * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
+ * mode.
+ */
+ if ((hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540) &&
+ !RTE_ETH_DEV_SRIOV(dev).active)
+ offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540)
+ offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a)
+ offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+ if (dev->security_ctx)
+ offloads |= DEV_RX_OFFLOAD_SECURITY;
+#endif
+
+ return offloads;
+}
+
+static int
+ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
+ uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev);
+ uint64_t port_supported = ixgbe_get_rx_port_offloads(dev);
+
+ if ((requested & (queue_supported | port_supported)) != requested)
+ return 0;
+
+ if ((port_offloads ^ requested) & port_supported)
+ return 0;
+
+ return 1;
+}
+
int __attribute__((cold))
ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -2787,6 +2879,18 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!ixgbe_check_rx_queue_offloads(dev, rx_conf->offloads)) {
+ PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported port offloads 0x%" PRIx64
+ " or supported queue offloads 0x%" PRIx64,
+ (void *)dev, rx_conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ ixgbe_get_rx_port_offloads(dev),
+ ixgbe_get_rx_queue_offloads(dev));
+ return -ENOTSUP;
+ }
+
/*
* Validate number of receive descriptors.
* It must not exceed hardware maximum, and must be multiple
@@ -2816,8 +2920,8 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
rxq->offloads = rx_conf->offloads;
@@ -4575,7 +4679,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
rsc_capable = true;
- if (!rsc_capable && rx_conf->enable_lro) {
+ if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
"support it");
return -EINVAL;
@@ -4583,7 +4687,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
/* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
- if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) {
+ if (!(rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) &&
+ (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
/*
* According to chapter of 4.6.7.2.1 of the Spec Rev.
* 3.0 RSC configuration requires HW CRC stripping being
@@ -4597,7 +4702,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
/* RFCTL configuration */
rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
- if ((rsc_capable) && (rx_conf->enable_lro))
+ if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
/*
* Since NFS packets coalescing is not supported - clear
* RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
@@ -4610,7 +4715,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
/* If LRO hasn't been requested - we are done here. */
- if (!rx_conf->enable_lro)
+ if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
return 0;
/* Set RDRXCTL.RSCACKC bit */
@@ -4730,7 +4835,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
* Configure CRC stripping, if any.
*/
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- if (rx_conf->hw_strip_crc)
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
else
hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
@@ -4738,7 +4843,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
/*
* Configure jumbo frame support, if any.
*/
- if (rx_conf->jumbo_frame == 1) {
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
maxfrs &= 0x0000FFFF;
@@ -4758,6 +4863,12 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ /*
+ * Assume no header split and no VLAN strip support
+ * on any Rx queue first .
+ */
+ rx_conf->offloads &= ~(DEV_RX_OFFLOAD_HEADER_SPLIT |
+ DEV_RX_OFFLOAD_VLAN_STRIP);
/* Setup RX queues */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
@@ -4766,7 +4877,8 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
* Reset crc_len in case it was changed after queue setup by a
* call to configure.
*/
- rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
+ rxq->crc_len = (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) ?
+ 0 : ETHER_CRC_LEN;
/* Setup the Base and Length of the Rx Descriptor Rings */
bus_addr = rxq->rx_ring_phys_addr;
@@ -4784,7 +4896,9 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
/*
* Configure Header Split
*/
- if (rx_conf->header_split) {
+ if (rxq->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) {
+ /* add Header Split flag for set_rx_function( ) */
+ rx_conf->offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
if (hw->mac.type == ixgbe_mac_82599EB) {
/* Must setup the PSRTYPE register */
uint32_t psrtype;
@@ -4827,9 +4941,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * IXGBE_VLAN_TAG_SIZE > buf_size)
dev->data->scattered_rx = 1;
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
}
- if (rx_conf->enable_scatter)
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
dev->data->scattered_rx = 1;
/*
@@ -4844,7 +4960,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
*/
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
rxcsum |= IXGBE_RXCSUM_PCSD;
- if (rx_conf->hw_ip_checksum)
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
rxcsum |= IXGBE_RXCSUM_IPPCSE;
else
rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
@@ -4854,7 +4970,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
if (hw->mac.type == ixgbe_mac_82599EB ||
hw->mac.type == ixgbe_mac_X540) {
rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- if (rx_conf->hw_strip_crc)
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
else
rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
@@ -5260,6 +5376,7 @@ ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = rxq->drop_en;
qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
}
void
@@ -5290,6 +5407,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
struct ixgbe_rx_queue *rxq;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
uint64_t bus_addr;
uint32_t srrctl, psrtype = 0;
uint16_t buf_size;
@@ -5329,6 +5447,12 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
ixgbevf_rlpml_set_vf(hw,
(uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ /*
+ * Assume no header split and no VLAN strip support
+ * on any Rx queue first .
+ */
+ rxmode->offloads &= ~(DEV_RX_OFFLOAD_HEADER_SPLIT |
+ DEV_RX_OFFLOAD_VLAN_STRIP);
/* Setup RX queues */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
@@ -5356,7 +5480,9 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
/*
* Configure Header Split
*/
- if (dev->data->dev_conf.rxmode.header_split) {
+ if (rxq->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) {
+ /* add Header Split flag for set_rx_function( ) */
+ rxmode->offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
IXGBE_SRRCTL_BSIZEHDR_MASK);
@@ -5388,18 +5514,21 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
IXGBE_SRRCTL_BSIZEPKT_SHIFT);
- if (dev->data->dev_conf.rxmode.enable_scatter ||
+ if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
/* It adds dual VLAN length for supporting dual VLAN */
- (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ (rxmode->max_rx_pkt_len +
2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->data->scattered_rx = 1;
}
+
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
}
#ifdef RTE_HEADER_SPLIT_ENABLE
- if (dev->data->dev_conf.rxmode.header_split)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
/* Must setup the PSRTYPE register */
psrtype = IXGBE_PSRTYPE_TCPHDR |
IXGBE_PSRTYPE_UDPHDR |
@@ -307,5 +307,8 @@ uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
+uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
+uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev);
+
#endif /* RTE_IXGBE_INC_VECTOR */
#endif /* _IXGBE_RXTX_H_ */
@@ -286,7 +286,7 @@ ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
return -1;
/* no header split support */
- if (rxmode->header_split == 1)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
return -1;
return 0;
@@ -515,7 +515,7 @@ ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
/* no csum error report support */
- if (rxmode->hw_ip_checksum == 1)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
return -1;
return ixgbe_rx_vec_dev_conf_condition_check_default(dev);