[dpdk-dev,6/6] net/mlx4: convert to new Rx offloads API

Message ID 20171123120252.143695-7-shahafs@mellanox.com (mailing list archive)
State Superseded, archived
Headers

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Shahaf Shuler Nov. 23, 2017, 12:02 p.m. UTC
  Ethdev Rx offloads API has changed since:

commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")

This commit support the new Rx offloads API.

Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
---
 drivers/net/mlx4/mlx4.c        | 11 ++++++
 drivers/net/mlx4/mlx4_ethdev.c | 10 ++---
 drivers/net/mlx4/mlx4_flow.c   |  5 ++-
 drivers/net/mlx4/mlx4_rxq.c    | 78 ++++++++++++++++++++++++++++++++++---
 drivers/net/mlx4/mlx4_rxtx.h   |  2 +
 5 files changed, 93 insertions(+), 13 deletions(-)
  

Patch

diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index 38c545b1b..3205b58ac 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -101,6 +101,10 @@  mlx4_dev_configure(struct rte_eth_dev *dev)
 	struct rte_flow_error error;
 	uint64_t supp_tx_offloads = mlx4_priv_get_tx_port_offloads(priv);
 	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+	uint64_t supp_rx_offloads =
+				(mlx4_get_rx_port_offloads() |
+				 mlx4_priv_get_rx_queue_offloads(priv));
+	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 	int ret;
 
 	if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
@@ -110,6 +114,13 @@  mlx4_dev_configure(struct rte_eth_dev *dev)
 		      tx_offloads, supp_tx_offloads);
 		return -rte_errno;
 	}
+	if ((rx_offloads & supp_rx_offloads) != rx_offloads) {
+		rte_errno = ENOTSUP;
+		ERROR("Some Rx offloads are not supported "
+		      "requested 0x%lx supported 0x%lx\n",
+		      rx_offloads, supp_rx_offloads);
+		return -rte_errno;
+	}
 	/* Prepare internal flow rules. */
 	ret = mlx4_flow_sync(priv, &error);
 	if (ret) {
diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
index 63e00b1da..fef89e731 100644
--- a/drivers/net/mlx4/mlx4_ethdev.c
+++ b/drivers/net/mlx4/mlx4_ethdev.c
@@ -766,13 +766,11 @@  mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 	info->max_rx_queues = max;
 	info->max_tx_queues = max;
 	info->max_mac_addrs = RTE_DIM(priv->mac);
-	info->rx_offload_capa = 0;
 	info->tx_offload_capa = mlx4_priv_get_tx_port_offloads(priv);
-	if (priv->hw_csum) {
-		info->rx_offload_capa |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
-					  DEV_RX_OFFLOAD_UDP_CKSUM |
-					  DEV_RX_OFFLOAD_TCP_CKSUM);
-	}
+	info->rx_queue_offload_capa =
+				mlx4_priv_get_rx_queue_offloads(priv);
+	info->rx_offload_capa = (mlx4_get_rx_port_offloads() |
+				 info->rx_queue_offload_capa);
 	if (mlx4_get_ifname(priv, &ifname) == 0)
 		info->if_index = if_nametoindex(ifname);
 	info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE;
diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
index 8b87b2989..654e72df3 100644
--- a/drivers/net/mlx4/mlx4_flow.c
+++ b/drivers/net/mlx4/mlx4_flow.c
@@ -1224,7 +1224,7 @@  mlx4_flow_internal_next_vlan(struct priv *priv, uint16_t vlan)
  * - MAC flow rules are generated from @p dev->data->mac_addrs
  *   (@p priv->mac array).
  * - An additional flow rule for Ethernet broadcasts is also generated.
- * - All these are per-VLAN if @p dev->data->dev_conf.rxmode.hw_vlan_filter
+ * - All these are per-VLAN if @p DEV_RX_OFFLOAD_VLAN_FILTER
  *   is enabled and VLAN filters are configured.
  *
  * @param priv
@@ -1292,7 +1292,8 @@  mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
 	};
 	struct ether_addr *rule_mac = &eth_spec.dst;
 	rte_be16_t *rule_vlan =
-		priv->dev->data->dev_conf.rxmode.hw_vlan_filter &&
+		(priv->dev->data->dev_conf.rxmode.offloads &
+		 DEV_RX_OFFLOAD_VLAN_FILTER) &&
 		!priv->dev->data->promiscuous ?
 		&vlan_spec.tci :
 		NULL;
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index 53313c56f..f8c1105dc 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -663,6 +663,66 @@  mlx4_rxq_detach(struct rxq *rxq)
 }
 
 /**
+ * Returns the per-queue supported offloads.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ *
+ * @return
+ *   Supported Tx offloads.
+ */
+uint64_t
+mlx4_priv_get_rx_queue_offloads(struct priv *priv)
+{
+	uint64_t offloads = DEV_RX_OFFLOAD_SCATTER;
+
+	if (priv->hw_csum)
+		offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+	return offloads;
+}
+
+/**
+ * Returns the per-port supported offloads.
+ *
+ * @return
+ *   Supported Rx offloads.
+ */
+uint64_t
+mlx4_get_rx_port_offloads(void)
+{
+	uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+
+	return offloads;
+}
+
+/**
+ * Checks if the per-queue offload configuration is valid.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param offloads
+ *   Per-queue offloads configuration.
+ *
+ * @return
+ *   1 if the configuration is valid, 0 otherwise.
+ */
+static int
+priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
+{
+	uint64_t port_offloads = priv->dev->data->dev_conf.rxmode.offloads;
+	uint64_t queue_supp_offloads =
+				mlx4_priv_get_rx_queue_offloads(priv);
+	uint64_t port_supp_offloads = mlx4_get_rx_port_offloads();
+
+	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
+	     offloads)
+		return 0;
+	if (((port_offloads ^ offloads) & port_supp_offloads))
+		return 0;
+	return 1;
+}
+
+/**
  * DPDK callback to configure a Rx queue.
  *
  * @param dev
@@ -707,6 +767,16 @@  mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	(void)conf; /* Thresholds configuration (ignored). */
 	DEBUG("%p: configuring queue %u for %u descriptors",
 	      (void *)dev, idx, desc);
+	if (!priv_is_rx_queue_offloads_allowed(priv, conf->offloads)) {
+		rte_errno = ENOTSUP;
+		ERROR("%p: Rx queue offloads 0x%lx don't match port "
+		      "offloads 0x%lx or supported offloads 0x%lx",
+		      (void *)dev, conf->offloads,
+		      dev->data->dev_conf.rxmode.offloads,
+		      (mlx4_get_rx_port_offloads() |
+		       mlx4_priv_get_rx_queue_offloads(priv)));
+		return -rte_errno;
+	}
 	if (idx >= dev->data->nb_rx_queues) {
 		rte_errno = EOVERFLOW;
 		ERROR("%p: queue index out of range (%u >= %u)",
@@ -746,10 +816,8 @@  mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts_n = rte_log2_u32(desc),
 		.elts = elts,
 		/* Toggle Rx checksum offload if hardware supports it. */
-		.csum = (priv->hw_csum &&
-			 dev->data->dev_conf.rxmode.hw_ip_checksum),
-		.csum_l2tun = (priv->hw_csum_l2tun &&
-			       dev->data->dev_conf.rxmode.hw_ip_checksum),
+		.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
+		.csum_l2tun = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
 		.l2tun_offload = priv->hw_csum_l2tun,
 		.stats = {
 			.idx = idx,
@@ -761,7 +829,7 @@  mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
 	    (mb_len - RTE_PKTMBUF_HEADROOM)) {
 		;
-	} else if (dev->data->dev_conf.rxmode.enable_scatter) {
+	} else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
 		uint32_t size =
 			RTE_PKTMBUF_HEADROOM +
 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
index 528600a18..4897e9471 100644
--- a/drivers/net/mlx4/mlx4_rxtx.h
+++ b/drivers/net/mlx4/mlx4_rxtx.h
@@ -163,6 +163,8 @@  int mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
 			const struct rte_eth_rxconf *conf,
 			struct rte_mempool *mp);
 void mlx4_rx_queue_release(void *dpdk_rxq);
+uint64_t mlx4_get_rx_port_offloads(void);
+uint64_t mlx4_priv_get_rx_queue_offloads(struct priv *priv);
 
 /* mlx4_rxtx.c */