[dpdk-dev] net/i40e: convert to new Rx offloads API

Message ID 20180302082011.62982-1-yanglong.wu@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Helin Zhang
Headers

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation success Compilation OK

Commit Message

Yanglong Wu March 2, 2018, 8:20 a.m. UTC
  Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.

Signed-off-by: Yanglong Wu <yanglong.wu@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c    | 30 ++++++++++++++++++++++--------
 drivers/net/i40e/i40e_ethdev_vf.c | 22 +++++++++++++++-------
 drivers/net/i40e/i40e_flow.c      |  3 ++-
 drivers/net/i40e/i40e_rxtx.c      | 32 ++++++++++++++++++++++++++++----
 drivers/net/i40e/i40e_rxtx.h      |  1 +
 5 files changed, 68 insertions(+), 20 deletions(-)
  

Comments

Qi Zhang March 19, 2018, 6:12 a.m. UTC | #1
Hi Yanglong:



> -----Original Message-----

> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Yanglong Wu

> Sent: Friday, March 2, 2018 4:20 PM

> To: dev@dpdk.org

> Cc: Dai, Wei <wei.dai@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Lu,

> Wenzhuo <wenzhuo.lu@intel.com>; Wu, Yanglong <yanglong.wu@intel.com>

> Subject: [dpdk-dev] [DPDK] net/i40e: convert to new Rx offloads API

>

> Ethdev Tx offloads API has changed since:

> commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This

> commit support the new Tx offloads API.

>

> Signed-off-by: Yanglong Wu <yanglong.wu@intel.com<mailto:yanglong.wu@intel.com>>

> ---

>  drivers/net/i40e/i40e_ethdev.c    | 30 ++++++++++++++++++++++--------

>  drivers/net/i40e/i40e_ethdev_vf.c | 22 +++++++++++++++-------

>  drivers/net/i40e/i40e_flow.c      |  3 ++-

>  drivers/net/i40e/i40e_rxtx.c      | 32

> ++++++++++++++++++++++++++++----

>  drivers/net/i40e/i40e_rxtx.h      |  1 +

>  5 files changed, 68 insertions(+), 20 deletions(-)

>

> diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c

> index 508b4171c..3cfc6a5b6 100644

> --- a/drivers/net/i40e/i40e_ethdev.c

> +++ b/drivers/net/i40e/i40e_ethdev.c

> @@ -3176,6 +3176,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev,

> struct rte_eth_dev_info *dev_info)

>    dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;

>    dev_info->max_mac_addrs = vsi->max_macaddrs;

>    dev_info->max_vfs = pci_dev->max_vfs;

> +   dev_info->rx_queue_offload_capa = 0;

>    dev_info->rx_offload_capa =

>             DEV_RX_OFFLOAD_VLAN_STRIP |

>             DEV_RX_OFFLOAD_QINQ_STRIP |

> @@ -3183,7 +3184,13 @@ i40e_dev_info_get(struct rte_eth_dev *dev,

> struct rte_eth_dev_info *dev_info)

>             DEV_RX_OFFLOAD_UDP_CKSUM |

>             DEV_RX_OFFLOAD_TCP_CKSUM |

>             DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |

> -             DEV_RX_OFFLOAD_CRC_STRIP;

> +            DEV_RX_OFFLOAD_CRC_STRIP |

> +            DEV_RX_OFFLOAD_VLAN_EXTEND |

> +            DEV_RX_OFFLOAD_VLAN_FILTER;

> +

> +   if (dev_info->max_rx_pktlen > ETHER_MAX_LEN)

> +            dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;



If statement is not necessary here since, max_rx_pktlen = I40E_FRAME_SIZE_MAX



> +

>    dev_info->tx_offload_capa =

>             DEV_TX_OFFLOAD_VLAN_INSERT |

>             DEV_TX_OFFLOAD_QINQ_INSERT |

> @@ -3210,6 +3217,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev,

> struct rte_eth_dev_info *dev_info)

>             },

>             .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,

>             .rx_drop_en = 0,

> +            .offloads = 0,

>    };

>

>    dev_info->default_txconf = (struct rte_eth_txconf) { @@ -3329,7

> +3337,8 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,  {

>    struct i40e_hw *hw =

> I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);

>    struct i40e_pf *pf =

> I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);

> -    int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;

> +   int qinq = dev->data->dev_conf.rxmode.offloads &

> +               DEV_RX_OFFLOAD_VLAN_EXTEND;

>    int ret = 0;

>

>    if ((vlan_type != ETH_VLAN_TYPE_INNER && @@ -3377,9 +3386,11

> @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)  {

>    struct i40e_pf *pf =

> I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);

>    struct i40e_vsi *vsi = pf->main_vsi;

> +   struct rte_eth_rxmode *rxmode;

>

> +   rxmode = &dev->data->dev_conf.rxmode;

>    if (mask & ETH_VLAN_FILTER_MASK) {

> -             if (dev->data->dev_conf.rxmode.hw_vlan_filter)

> +            if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)

>                      i40e_vsi_config_vlan_filter(vsi, TRUE);

>             else

>                      i40e_vsi_config_vlan_filter(vsi, FALSE); @@ -3387,14

> +3398,14 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)

>

>    if (mask & ETH_VLAN_STRIP_MASK) {

>             /* Enable or disable VLAN stripping */

> -             if (dev->data->dev_conf.rxmode.hw_vlan_strip)

> +            if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)

>                      i40e_vsi_config_vlan_stripping(vsi, TRUE);

>             else

>                      i40e_vsi_config_vlan_stripping(vsi, FALSE);

>    }

>

>    if (mask & ETH_VLAN_EXTEND_MASK) {

> -             if (dev->data->dev_conf.rxmode.hw_vlan_extend) {

> +            if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {

>                      i40e_vsi_config_double_vlan(vsi, TRUE);

>                      /* Set global registers with default ethertype. */

>                      i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, @@

> -3641,6 +3652,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,

>    struct i40e_pf *pf =

> I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);

>    struct i40e_mac_filter_info mac_filter;

>    struct i40e_vsi *vsi;

> +   struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;

>    int ret;

>

>    /* If VMDQ not enabled or configured, return */ @@ -3659,7 +3671,7

> @@ i40e_macaddr_add(struct rte_eth_dev *dev,

>    }

>

>    rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);

> -    if (dev->data->dev_conf.rxmode.hw_vlan_filter)

> +   if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)

>             mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;

>    else

>             mac_filter.filter_type = RTE_MAC_PERFECT_MATCH; @@ -11312,9

> +11324,11 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)

>    }

>

>    if (frame_size > ETHER_MAX_LEN)

> -             dev_data->dev_conf.rxmode.jumbo_frame = 1;

> +            dev_data->dev_conf.rxmode.offloads |=

> +                     DEV_RX_OFFLOAD_JUMBO_FRAME;

>    else

> -             dev_data->dev_conf.rxmode.jumbo_frame = 0;

> +            dev_data->dev_conf.rxmode.jumbo_frame &=

> +                     ~DEV_RX_OFFLOAD_JUMBO_FRAME;

>

>    dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;

>

> diff --git a/drivers/net/i40e/i40e_ethdev_vf.c

> b/drivers/net/i40e/i40e_ethdev_vf.c

> index fd003fe01..d4f9bde1a 100644

> --- a/drivers/net/i40e/i40e_ethdev_vf.c

> +++ b/drivers/net/i40e/i40e_ethdev_vf.c

> @@ -1541,7 +1541,7 @@ i40evf_dev_configure(struct rte_eth_dev *dev)

>    /* For non-DPDK PF drivers, VF has no ability to disable HW

>     * CRC strip, and is implicitly enabled by the PF.

>     */

> -    if (!conf->rxmode.hw_strip_crc) {

> +   if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {

>             vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);

>             if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&

>                 (vf->version_minor <= VIRTCHNL_VERSION_MINOR)) { @@

> -1575,7 +1575,7 @@ i40evf_vlan_offload_set(struct rte_eth_dev *dev, int

> mask)

>    /* Vlan stripping setting */

>    if (mask & ETH_VLAN_STRIP_MASK) {

>             /* Enable or disable VLAN stripping */

> -             if (dev_conf->rxmode.hw_vlan_strip)

> +            if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)

>                      i40evf_enable_vlan_strip(dev);

>             else

>                      i40evf_disable_vlan_strip(dev);

> @@ -1732,7 +1732,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct

> i40e_rx_queue *rxq)

>    /**

>     * Check if the jumbo frame and maximum packet length are set

> correctly

>     */

> -    if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {

> +   if (dev_data->dev_conf.rxmode.offloads &

> DEV_RX_OFFLOAD_JUMBO_FRAME) {

>             if (rxq->max_pkt_len <= ETHER_MAX_LEN ||

>                 rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {

>                      PMD_DRV_LOG(ERR, "maximum packet length must be "

> @@ -1752,7 +1752,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct

> i40e_rx_queue *rxq)

>             }

>    }

>

> -    if (dev_data->dev_conf.rxmode.enable_scatter ||

> +   if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)

> ||

>        (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {

>             dev_data->scattered_rx = 1;

>    }

> @@ -2189,6 +2189,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev,

> struct rte_eth_dev_info *dev_info)

>    dev_info->reta_size = ETH_RSS_RETA_SIZE_64;

>    dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;

>    dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;

> +   dev_info->rx_queue_offload_capa = 0;

>    dev_info->rx_offload_capa =

>             DEV_RX_OFFLOAD_VLAN_STRIP |

>             DEV_RX_OFFLOAD_QINQ_STRIP |

> @@ -2196,7 +2197,11 @@ i40evf_dev_info_get(struct rte_eth_dev *dev,

> struct rte_eth_dev_info *dev_info)

>             DEV_RX_OFFLOAD_UDP_CKSUM |

>             DEV_RX_OFFLOAD_TCP_CKSUM |

>             DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |

> -             DEV_RX_OFFLOAD_CRC_STRIP;

> +            DEV_RX_OFFLOAD_CRC_STRIP |

> +            DEV_RX_OFFLOAD_SCATTER;

> +   if (dev_info->max_rx_pktlen > ETHER_MAX_LEN)

> +            dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;



Same as above.



> +

>    dev_info->tx_offload_capa =

>             DEV_TX_OFFLOAD_VLAN_INSERT |

>             DEV_TX_OFFLOAD_QINQ_INSERT |

> @@ -2219,6 +2224,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev,

> struct rte_eth_dev_info *dev_info)

>             },

>             .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,

>             .rx_drop_en = 0,

> +            .offloads = 0,

>    };

>

>    dev_info->default_txconf = (struct rte_eth_txconf) { @@ -2649,9

> +2655,11 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)

>    }

>

>    if (frame_size > ETHER_MAX_LEN)

> -             dev_data->dev_conf.rxmode.jumbo_frame = 1;

> +            dev_data->dev_conf.rxmode.offloads |=

> +                     DEV_RX_OFFLOAD_JUMBO_FRAME;

>    else

> -             dev_data->dev_conf.rxmode.jumbo_frame = 0;

> +            dev_data->dev_conf.rxmode.jumbo_frame &=

> +                     ~DEV_RX_OFFLOAD_JUMBO_FRAME;

>

>    dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;

>

> diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c index

> 16c47cf73..7b9f2bc1a 100644

> --- a/drivers/net/i40e/i40e_flow.c

> +++ b/drivers/net/i40e/i40e_flow.c

> @@ -1939,7 +1939,8 @@ static uint16_t

>  i40e_get_outer_vlan(struct rte_eth_dev *dev)  {

>    struct i40e_hw *hw =

> I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);

> -    int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;

> +   int qinq = dev->data->dev_conf.rxmode.offloads &

> +               DEV_RX_OFFLOAD_VLAN_EXTEND;

>    uint64_t reg_r = 0;

>    uint16_t reg_id;

>    uint16_t tpid;

> diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index

> 1217e5a61..6492368ae 100644

> --- a/drivers/net/i40e/i40e_rxtx.c

> +++ b/drivers/net/i40e/i40e_rxtx.c

> @@ -1692,6 +1692,18 @@ i40e_dev_supported_ptypes_get(struct

> rte_eth_dev *dev)

>    return NULL;

>  }

>

> +static int

> +i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t

> +requested) {

> +   struct rte_eth_dev_info dev_info;

> +   uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;

> +   uint64_t supported; /* All per port offloads */

> +

> +   dev->dev_ops->dev_infos_get(dev, &dev_info);

> +   supported = dev_info.rx_offload_capa ^

> dev_info.rx_queue_offload_capa;

> +   return !((mandatory ^ requested) & supported); }



Would you explain the logic here, seems it's hard to understand

and it's better to add more comment even the code is correct.



Regards

Qi

> +

>  int

>  i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,

>                      uint16_t queue_idx,

> @@ -1712,6 +1724,18 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev

> *dev,

>    uint16_t len, i;

>    uint16_t reg_idx, base, bsf, tc_mapping;

>    int q_offset, use_def_burst_func = 1;

> +   struct rte_eth_dev_info dev_info;

> +

> +   if (!i40e_check_rx_queue_offloads(dev, rx_conf->offloads)) {

> +            dev->dev_ops->dev_infos_get(dev, &dev_info);

> +            PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64

> +                     " don't match port  offloads 0x%" PRIx64

> +                     " or supported offloads 0x%" PRIx64,

> +                     (void *)dev, rx_conf->offloads,

> +                     dev->data->dev_conf.rxmode.offloads,

> +                     dev_info.rx_offload_capa);

> +            return -ENOTSUP;

> +   }

>

>    if (hw->mac.type == I40E_MAC_VF || hw->mac.type ==

> I40E_MAC_X722_VF) {

>             vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);

> @@ -1760,8 +1784,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev

> *dev,

>    rxq->queue_id = queue_idx;

>    rxq->reg_idx = reg_idx;

>    rxq->port_id = dev->data->port_id;

> -    rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?

> -                                                         0 : ETHER_CRC_LEN);

> +   rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.offloads &

> +                     DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);

>    rxq->drop_en = rx_conf->rx_drop_en;

>    rxq->vsi = vsi;

>    rxq->rx_deferred_start = rx_conf->rx_deferred_start; @@ -2339,7

> +2363,6 @@ i40e_reset_tx_queue(struct i40e_tx_queue *txq)

>

>    txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);

>    txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);

> -

>    txq->tx_tail = 0;

>    txq->nb_tx_used = 0;

>

> @@ -2469,7 +2492,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)

>

>    len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;

>    rxq->max_pkt_len = RTE_MIN(len,

> data->dev_conf.rxmode.max_rx_pkt_len);

> -    if (data->dev_conf.rxmode.jumbo_frame == 1) {

> +   if (data->dev_conf.rxmode.offloads &

> DEV_RX_OFFLOAD_JUMBO_FRAME) {

>             if (rxq->max_pkt_len <= ETHER_MAX_LEN ||

>                      rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {

>                      PMD_DRV_LOG(ERR, "maximum packet length must "

> @@ -2747,6 +2770,7 @@ i40e_rxq_info_get(struct rte_eth_dev *dev,

> uint16_t queue_id,

>    qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;

>    qinfo->conf.rx_drop_en = rxq->drop_en;

>    qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;

> +   qinfo->conf.offloads = rxq->offloads;

>  }

>

>  void

> diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h index

> 34cd79233..cb5f8c714 100644

> --- a/drivers/net/i40e/i40e_rxtx.h

> +++ b/drivers/net/i40e/i40e_rxtx.h

> @@ -107,6 +107,7 @@ struct i40e_rx_queue {

>    bool rx_deferred_start; /**< don't start this queue in dev start */

>    uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */

>    uint8_t dcb_tc;         /**< Traffic class of rx queue */

> +   uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */

>  };

>

>  struct i40e_tx_entry {

> --

> 2.11.0
  

Patch

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 508b4171c..3cfc6a5b6 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3176,6 +3176,7 @@  i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
 	dev_info->max_mac_addrs = vsi->max_macaddrs;
 	dev_info->max_vfs = pci_dev->max_vfs;
+	dev_info->rx_queue_offload_capa = 0;
 	dev_info->rx_offload_capa =
 		DEV_RX_OFFLOAD_VLAN_STRIP |
 		DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -3183,7 +3184,13 @@  i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		DEV_RX_OFFLOAD_UDP_CKSUM |
 		DEV_RX_OFFLOAD_TCP_CKSUM |
 		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_CRC_STRIP;
+		DEV_RX_OFFLOAD_CRC_STRIP |
+		DEV_RX_OFFLOAD_VLAN_EXTEND |
+		DEV_RX_OFFLOAD_VLAN_FILTER;
+
+	if (dev_info->max_rx_pktlen > ETHER_MAX_LEN)
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+
 	dev_info->tx_offload_capa =
 		DEV_TX_OFFLOAD_VLAN_INSERT |
 		DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -3210,6 +3217,7 @@  i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		},
 		.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
+		.offloads = 0,
 	};
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3329,7 +3337,8 @@  i40e_vlan_tpid_set(struct rte_eth_dev *dev,
 {
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-	int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+	int qinq = dev->data->dev_conf.rxmode.offloads &
+		   DEV_RX_OFFLOAD_VLAN_EXTEND;
 	int ret = 0;
 
 	if ((vlan_type != ETH_VLAN_TYPE_INNER &&
@@ -3377,9 +3386,11 @@  i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct i40e_vsi *vsi = pf->main_vsi;
+	struct rte_eth_rxmode *rxmode;
 
+	rxmode = &dev->data->dev_conf.rxmode;
 	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
 			i40e_vsi_config_vlan_filter(vsi, TRUE);
 		else
 			i40e_vsi_config_vlan_filter(vsi, FALSE);
@@ -3387,14 +3398,14 @@  i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	if (mask & ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
-		if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
 			i40e_vsi_config_vlan_stripping(vsi, TRUE);
 		else
 			i40e_vsi_config_vlan_stripping(vsi, FALSE);
 	}
 
 	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
 			i40e_vsi_config_double_vlan(vsi, TRUE);
 			/* Set global registers with default ethertype. */
 			i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
@@ -3641,6 +3652,7 @@  i40e_macaddr_add(struct rte_eth_dev *dev,
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct i40e_mac_filter_info mac_filter;
 	struct i40e_vsi *vsi;
+	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 	int ret;
 
 	/* If VMDQ not enabled or configured, return */
@@ -3659,7 +3671,7 @@  i40e_macaddr_add(struct rte_eth_dev *dev,
 	}
 
 	rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
-	if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
 		mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
 	else
 		mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
@@ -11312,9 +11324,11 @@  i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	}
 
 	if (frame_size > ETHER_MAX_LEN)
-		dev_data->dev_conf.rxmode.jumbo_frame = 1;
+		dev_data->dev_conf.rxmode.offloads |=
+			DEV_RX_OFFLOAD_JUMBO_FRAME;
 	else
-		dev_data->dev_conf.rxmode.jumbo_frame = 0;
+		dev_data->dev_conf.rxmode.jumbo_frame &=
+			~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
 	dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index fd003fe01..d4f9bde1a 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1541,7 +1541,7 @@  i40evf_dev_configure(struct rte_eth_dev *dev)
 	/* For non-DPDK PF drivers, VF has no ability to disable HW
 	 * CRC strip, and is implicitly enabled by the PF.
 	 */
-	if (!conf->rxmode.hw_strip_crc) {
+	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
 		vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 		if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
 		    (vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
@@ -1575,7 +1575,7 @@  i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	/* Vlan stripping setting */
 	if (mask & ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
-		if (dev_conf->rxmode.hw_vlan_strip)
+		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
 			i40evf_enable_vlan_strip(dev);
 		else
 			i40evf_disable_vlan_strip(dev);
@@ -1732,7 +1732,7 @@  i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
 	/**
 	 * Check if the jumbo frame and maximum packet length are set correctly
 	 */
-	if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
+	if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 		if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
 		    rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
 			PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -1752,7 +1752,7 @@  i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
 		}
 	}
 
-	if (dev_data->dev_conf.rxmode.enable_scatter ||
+	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
 	    (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
 		dev_data->scattered_rx = 1;
 	}
@@ -2189,6 +2189,7 @@  i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
 	dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
 	dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
+	dev_info->rx_queue_offload_capa = 0;
 	dev_info->rx_offload_capa =
 		DEV_RX_OFFLOAD_VLAN_STRIP |
 		DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -2196,7 +2197,11 @@  i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		DEV_RX_OFFLOAD_UDP_CKSUM |
 		DEV_RX_OFFLOAD_TCP_CKSUM |
 		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_CRC_STRIP;
+		DEV_RX_OFFLOAD_CRC_STRIP |
+		DEV_RX_OFFLOAD_SCATTER;
+	if (dev_info->max_rx_pktlen > ETHER_MAX_LEN)
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+
 	dev_info->tx_offload_capa =
 		DEV_TX_OFFLOAD_VLAN_INSERT |
 		DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -2219,6 +2224,7 @@  i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		},
 		.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
+		.offloads = 0,
 	};
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2649,9 +2655,11 @@  i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	}
 
 	if (frame_size > ETHER_MAX_LEN)
-		dev_data->dev_conf.rxmode.jumbo_frame = 1;
+		dev_data->dev_conf.rxmode.offloads |=
+			DEV_RX_OFFLOAD_JUMBO_FRAME;
 	else
-		dev_data->dev_conf.rxmode.jumbo_frame = 0;
+		dev_data->dev_conf.rxmode.jumbo_frame &=
+			~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
 	dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 16c47cf73..7b9f2bc1a 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1939,7 +1939,8 @@  static uint16_t
 i40e_get_outer_vlan(struct rte_eth_dev *dev)
 {
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+	int qinq = dev->data->dev_conf.rxmode.offloads &
+		   DEV_RX_OFFLOAD_VLAN_EXTEND;
 	uint64_t reg_r = 0;
 	uint16_t reg_id;
 	uint16_t tpid;
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 1217e5a61..6492368ae 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1692,6 +1692,18 @@  i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 	return NULL;
 }
 
+static int
+i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+	struct rte_eth_dev_info dev_info;
+	uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
+	uint64_t supported; /* All per port offloads */
+
+	dev->dev_ops->dev_infos_get(dev, &dev_info);
+	supported = dev_info.rx_offload_capa ^ dev_info.rx_queue_offload_capa;
+	return !((mandatory ^ requested) & supported);
+}
+
 int
 i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			uint16_t queue_idx,
@@ -1712,6 +1724,18 @@  i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t len, i;
 	uint16_t reg_idx, base, bsf, tc_mapping;
 	int q_offset, use_def_burst_func = 1;
+	struct rte_eth_dev_info dev_info;
+
+	if (!i40e_check_rx_queue_offloads(dev, rx_conf->offloads)) {
+		dev->dev_ops->dev_infos_get(dev, &dev_info);
+		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+			" don't match port  offloads 0x%" PRIx64
+			" or supported offloads 0x%" PRIx64,
+			(void *)dev, rx_conf->offloads,
+			dev->data->dev_conf.rxmode.offloads,
+			dev_info.rx_offload_capa);
+		return -ENOTSUP;
+	}
 
 	if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
 		vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1760,8 +1784,8 @@  i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->queue_id = queue_idx;
 	rxq->reg_idx = reg_idx;
 	rxq->port_id = dev->data->port_id;
-	rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
-							0 : ETHER_CRC_LEN);
+	rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.offloads &
+			DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
 	rxq->drop_en = rx_conf->rx_drop_en;
 	rxq->vsi = vsi;
 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
@@ -2339,7 +2363,6 @@  i40e_reset_tx_queue(struct i40e_tx_queue *txq)
 
 	txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
 	txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
-
 	txq->tx_tail = 0;
 	txq->nb_tx_used = 0;
 
@@ -2469,7 +2492,7 @@  i40e_rx_queue_config(struct i40e_rx_queue *rxq)
 
 	len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
 	rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
-	if (data->dev_conf.rxmode.jumbo_frame == 1) {
+	if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 		if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
 			rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
 			PMD_DRV_LOG(ERR, "maximum packet length must "
@@ -2747,6 +2770,7 @@  i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
 	qinfo->conf.rx_drop_en = rxq->drop_en;
 	qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+	qinfo->conf.offloads = rxq->offloads;
 }
 
 void
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 34cd79233..cb5f8c714 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -107,6 +107,7 @@  struct i40e_rx_queue {
 	bool rx_deferred_start; /**< don't start this queue in dev start */
 	uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
 	uint8_t dcb_tc;         /**< Traffic class of rx queue */
+	uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
 };
 
 struct i40e_tx_entry {