[dpdk-dev,4/4] net/ixgbe: convert to new Tx offloads API
Checks
Commit Message
Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.
Signed-off-by: Wei Dai <wei.dai@intel.com>
---
drivers/net/ixgbe/ixgbe_ethdev.c | 40 +++++++------------------
drivers/net/ixgbe/ixgbe_ipsec.c | 5 +++-
drivers/net/ixgbe/ixgbe_rxtx.c | 65 +++++++++++++++++++++++++++++++++++++---
drivers/net/ixgbe/ixgbe_rxtx.h | 8 +++++
4 files changed, 83 insertions(+), 35 deletions(-)
Comments
> diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
> index 30095fa..d7f0535 100644
> --- a/drivers/net/ixgbe/ixgbe_rxtx.h
> +++ b/drivers/net/ixgbe/ixgbe_rxtx.h
> @@ -223,6 +223,7 @@ struct ixgbe_tx_queue {
> uint8_t hthresh; /**< Host threshold register. */
> uint8_t wthresh; /**< Write-back threshold reg. */
> uint32_t txq_flags; /**< Holds flags for this TXq */
> + uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
> uint32_t ctx_curr; /**< Hardware context states. */
> /** Hardware context0 history. */
> struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
> @@ -254,6 +255,12 @@ struct ixgbe_txq_ops {
> #define IXGBE_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
> ETH_TXQ_FLAGS_NOOFFLOADS)
>
> +#define IXGBE_SIMPLE_TX_OFFLOAD_FLAGS ((uint64_t)DEV_TX_OFFLOAD_MULTI_SEGS |\
> + DEV_TX_OFFLOAD_VLAN_INSERT |\
> + DEV_TX_OFFLOAD_SCTP_CKSUM |\
> + DEV_TX_OFFLOAD_UDP_CKSUM |\
> + DEV_TX_OFFLOAD_TCP_CKSUM)
Hmm and why IP_CKSUM, TSO, OUTER_IP_CKSUM, etc. is not included into that macro?
In fact do you really need that?
As I understand right now vector TX doesn't support any offloads, so tx_offload != 0,
should be enough for tx function selection, right?
Konstanitn
> +
> /*
> * Populate descriptors with the following info:
> * 1.) buffer_addr = phys_addr + headroom
> @@ -307,6 +314,7 @@ uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
> uint16_t nb_pkts);
> int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
>
> +uint64_t ixgbe_get_tx_port_offlaods(struct rte_eth_dev *dev);
> uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
> uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev);
>
> --
> 2.7.5
Hi, Konstantin
Thanks for your feedback.
> -----Original Message-----
> From: Ananyev, Konstantin
> Sent: Thursday, March 15, 2018 7:19 AM
> To: Dai, Wei <wei.dai@intel.com>; Lu, Wenzhuo <wenzhuo.lu@intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [PATCH 4/4] net/ixgbe: convert to new Tx offloads API
>
>
> > diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h
> > b/drivers/net/ixgbe/ixgbe_rxtx.h index 30095fa..d7f0535 100644
> > --- a/drivers/net/ixgbe/ixgbe_rxtx.h
> > +++ b/drivers/net/ixgbe/ixgbe_rxtx.h
> > @@ -223,6 +223,7 @@ struct ixgbe_tx_queue {
> > uint8_t hthresh; /**< Host threshold register. */
> > uint8_t wthresh; /**< Write-back threshold reg.
> */
> > uint32_t txq_flags; /**< Holds flags for this TXq */
> > + uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
> > uint32_t ctx_curr; /**< Hardware context states.
> */
> > /** Hardware context0 history. */
> > struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM]; @@ -254,6
> +255,12
> > @@ struct ixgbe_txq_ops { #define IXGBE_SIMPLE_FLAGS
> > ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
> > ETH_TXQ_FLAGS_NOOFFLOADS)
> >
> > +#define IXGBE_SIMPLE_TX_OFFLOAD_FLAGS
> ((uint64_t)DEV_TX_OFFLOAD_MULTI_SEGS |\
> > + DEV_TX_OFFLOAD_VLAN_INSERT |\
> > + DEV_TX_OFFLOAD_SCTP_CKSUM |\
> > + DEV_TX_OFFLOAD_UDP_CKSUM |\
> > + DEV_TX_OFFLOAD_TCP_CKSUM)
>
>
> Hmm and why IP_CKSUM, TSO, OUTER_IP_CKSUM, etc. is not included into
> that macro?
> In fact do you really need that?
> As I understand right now vector TX doesn't support any offloads, so
> tx_offload != 0, should be enough for tx function selection, right?
> Konstanitn
In this patch, IXGBE_SIMPLE_TX_OFFLOAD_FLAGS is just a reverse transform of
IXGBE_SIMPLE_FLAGS which is used in old offload API.
Yes, current vector Tx doesn't support any offloads.
I will use tx_offload==0 following your guide.
>
> > +
> > /*
> > * Populate descriptors with the following info:
> > * 1.) buffer_addr = phys_addr + headroom @@ -307,6 +314,7 @@
> > uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf
> **tx_pkts,
> > uint16_t nb_pkts);
> > int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
> >
> > +uint64_t ixgbe_get_tx_port_offlaods(struct rte_eth_dev *dev);
> > uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
> > uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev);
> >
> > --
> > 2.7.5
@@ -3647,28 +3647,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
dev_info->rx_queue_offload_capa);
-
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
-
- if (hw->mac.type == ixgbe_mac_82599EB ||
- hw->mac.type == ixgbe_mac_X540)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
-
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x ||
- hw->mac.type == ixgbe_mac_X550EM_a)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
-
-#ifdef RTE_LIBRTE_SECURITY
- if (dev->security_ctx)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
-#endif
+ dev_info->tx_queue_offload_capa = 0;
+ dev_info->tx_offload_capa = ixgbe_get_tx_port_offlaods(dev);
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -3690,7 +3670,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
+ ETH_TXQ_FLAGS_NOOFFLOADS |
+ ETH_TXQ_FLAGS_IGNORE,
+ .offloads = 0,
};
dev_info->rx_desc_lim = rx_desc_lim;
@@ -3774,12 +3756,8 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
dev_info->rx_queue_offload_capa);
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->tx_queue_offload_capa = 0;
+ dev_info->tx_offload_capa = ixgbe_get_tx_port_offlaods(dev);
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -3801,7 +3779,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
+ ETH_TXQ_FLAGS_NOOFFLOADS |
+ ETH_TXQ_FLAGS_IGNORE,
+ .offloads = 0,
};
dev_info->rx_desc_lim = rx_desc_lim;
@@ -599,8 +599,11 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t reg;
uint64_t rx_offloads;
+ uint64_t tx_offloads;
rx_offloads = dev->data->dev_conf.rxmode.offloads;
+ tx_offloads = dev->data->dev_conf.txmode.offloads;
+
/* sanity checks */
if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
@@ -634,7 +637,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
return -1;
}
}
- if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+ if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
IXGBE_SECTXCTRL_STORE_FORWARD);
reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
@@ -2379,7 +2379,7 @@ void __attribute__((cold))
ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
{
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
- if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
+ if (((txq->offloads & IXGBE_SIMPLE_TX_OFFLOAD_FLAGS) == 0) &&
#ifdef RTE_LIBRTE_SECURITY
!(txq->using_ipsec) &&
#endif
@@ -2398,9 +2398,10 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
} else {
PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
PMD_INIT_LOG(DEBUG,
- " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
- (unsigned long)txq->txq_flags,
- (unsigned long)IXGBE_SIMPLE_FLAGS);
+ " - offloads = 0x%" PRIx64
+ " [IXGBE_SIMPLE_TX_OFFLOAD_FLAGS=0x%" PRIx64 "]",
+ txq->offloads,
+ IXGBE_SIMPLE_TX_OFFLOAD_FLAGS);
PMD_INIT_LOG(DEBUG,
" - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
(unsigned long)txq->tx_rs_thresh,
@@ -2410,6 +2411,45 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
}
}
+uint64_t
+ixgbe_get_tx_port_offlaods(struct rte_eth_dev *dev)
+{
+ uint64_t tx_offload_capa;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540)
+ tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a)
+ tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+ if (dev->security_ctx)
+ tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+#endif
+ return tx_offload_capa;
+}
+
+static int
+ixgbe_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requessted)
+{
+ uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
+ uint64_t supported = ixgbe_get_tx_port_offlaods(dev);
+
+ return !((mandatory ^ requessted) & supported);
+}
+
int __attribute__((cold))
ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -2426,6 +2466,21 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/*
+ * Don't verify port offloads for application which
+ * use the old API.
+ */
+ if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
+ !ixgbe_check_tx_queue_offloads(dev, tx_conf->offloads)) {
+ PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ (void *)dev, tx_conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ ixgbe_get_tx_port_offlaods(dev));
+ return -ENOTSUP;
+ }
+
+ /*
* Validate number of transmit descriptors.
* It must not exceed hardware maximum, and must be multiple
* of IXGBE_ALIGN.
@@ -2551,6 +2606,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
txq->port_id = dev->data->port_id;
txq->txq_flags = tx_conf->txq_flags;
+ txq->offloads = tx_conf->offloads;
txq->ops = &def_txq_ops;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
#ifdef RTE_LIBRTE_SECURITY
@@ -5382,6 +5438,7 @@ ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
qinfo->conf.txq_flags = txq->txq_flags;
+ qinfo->conf.offloads = txq->offloads;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
@@ -223,6 +223,7 @@ struct ixgbe_tx_queue {
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold reg. */
uint32_t txq_flags; /**< Holds flags for this TXq */
+ uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
uint32_t ctx_curr; /**< Hardware context states. */
/** Hardware context0 history. */
struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
@@ -254,6 +255,12 @@ struct ixgbe_txq_ops {
#define IXGBE_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
ETH_TXQ_FLAGS_NOOFFLOADS)
+#define IXGBE_SIMPLE_TX_OFFLOAD_FLAGS ((uint64_t)DEV_TX_OFFLOAD_MULTI_SEGS |\
+ DEV_TX_OFFLOAD_VLAN_INSERT |\
+ DEV_TX_OFFLOAD_SCTP_CKSUM |\
+ DEV_TX_OFFLOAD_UDP_CKSUM |\
+ DEV_TX_OFFLOAD_TCP_CKSUM)
+
/*
* Populate descriptors with the following info:
* 1.) buffer_addr = phys_addr + headroom
@@ -307,6 +314,7 @@ uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
+uint64_t ixgbe_get_tx_port_offlaods(struct rte_eth_dev *dev);
uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev);