[dpdk-dev] [PATCH 2/5] net/qede: enable IPGRE offload support

Rasesh Mody rasesh.mody at cavium.com
Wed Mar 28 02:15:52 CEST 2018


From: Harish Patil <harish.patil at cavium.com>

Signed-off-by: Harish Patil <harish.patil at cavium.com>
---
 drivers/net/qede/qede_ethdev.c |   49 ++++++++++++++++++++++++++++++++++------
 drivers/net/qede/qede_ethdev.h |    7 +++---
 drivers/net/qede/qede_rxtx.c   |   14 +++++-------
 drivers/net/qede/qede_rxtx.h   |    3 ++-
 4 files changed, 54 insertions(+), 19 deletions(-)

diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 6a51e3d..f25fb91 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -782,6 +782,36 @@ static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
 }
 
 static int
+qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+		  bool enable)
+{
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+	enum _ecore_status_t rc = ECORE_INVAL;
+	struct ecore_tunnel_info tunn;
+
+	memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
+	tunn.ip_gre.b_update_mode = true;
+	tunn.ip_gre.b_mode_enabled = enable;
+	tunn.ip_gre.tun_cls = clss;
+	tunn.ip_gre.tun_cls = clss;
+	tunn.b_update_rx_cls = true;
+	tunn.b_update_tx_cls = true;
+
+	rc = qede_tunnel_update(qdev, &tunn);
+	if (rc == ECORE_SUCCESS) {
+		qdev->ipgre.enable = enable;
+		DP_INFO(edev, "IPGRE is %s\n",
+			enable ? "enabled" : "disabled");
+	} else {
+		DP_ERR(edev, "Failed to update tunn_clss %u\n",
+		       clss);
+	}
+
+	return rc;
+}
+
+static int
 qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
 		 enum rte_eth_tunnel_type tunn_type, bool enable)
 {
@@ -794,6 +824,9 @@ static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
 	case RTE_TUNNEL_TYPE_GENEVE:
 		rc = qede_geneve_enable(eth_dev, clss, enable);
 		break;
+	case RTE_TUNNEL_TYPE_IP_IN_GRE:
+		rc = qede_ipgre_enable(eth_dev, clss, enable);
+		break;
 	default:
 		rc = -EINVAL;
 		break;
@@ -2078,6 +2111,7 @@ static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 		RTE_PTYPE_TUNNEL_VXLAN,
 		RTE_PTYPE_L4_FRAG,
 		RTE_PTYPE_TUNNEL_GENEVE,
+		RTE_PTYPE_TUNNEL_GRE,
 		/* Inner */
 		RTE_PTYPE_INNER_L2_ETHER,
 		RTE_PTYPE_INNER_L2_ETHER_VLAN,
@@ -2501,7 +2535,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 					ECORE_TUNN_CLSS_MAC_VLAN, false);
 
 		break;
-
 	case RTE_TUNNEL_TYPE_GENEVE:
 		if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
 			DP_ERR(edev, "UDP port %u doesn't exist\n",
@@ -2591,7 +2624,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 
 		qdev->vxlan.udp_port = udp_port;
 		break;
-
 	case RTE_TUNNEL_TYPE_GENEVE:
 		if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
 			DP_INFO(edev,
@@ -2629,7 +2661,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 
 		qdev->geneve.udp_port = udp_port;
 		break;
-
 	default:
 		return ECORE_INVAL;
 	}
@@ -2795,7 +2826,8 @@ static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
 			qdev->geneve.filter_type = conf->filter_type;
 		}
 
-		if (!qdev->vxlan.enable || !qdev->geneve.enable)
+		if (!qdev->vxlan.enable || !qdev->geneve.enable ||
+		    !qdev->ipgre.enable)
 			return qede_tunn_enable(eth_dev, clss,
 						conf->tunnel_type,
 						true);
@@ -2831,15 +2863,14 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
 		switch (filter_conf->tunnel_type) {
 		case RTE_TUNNEL_TYPE_VXLAN:
 		case RTE_TUNNEL_TYPE_GENEVE:
+		case RTE_TUNNEL_TYPE_IP_IN_GRE:
 			DP_INFO(edev,
 				"Packet steering to the specified Rx queue"
 				" is not supported with UDP tunneling");
 			return(qede_tunn_filter_config(eth_dev, filter_op,
 						      filter_conf));
-		/* Place holders for future tunneling support */
 		case RTE_TUNNEL_TYPE_TEREDO:
 		case RTE_TUNNEL_TYPE_NVGRE:
-		case RTE_TUNNEL_TYPE_IP_IN_GRE:
 		case RTE_L2_TUNNEL_TYPE_E_TAG:
 			DP_ERR(edev, "Unsupported tunnel type %d\n",
 				filter_conf->tunnel_type);
@@ -3138,19 +3169,23 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
 	/* VF tunnel offloads is enabled by default in PF driver */
 	adapter->vxlan.num_filters = 0;
 	adapter->geneve.num_filters = 0;
+	adapter->ipgre.num_filters = 0;
 	if (is_vf) {
 		adapter->vxlan.enable = true;
 		adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
 					     ETH_TUNNEL_FILTER_IVLAN;
 		adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
 		adapter->geneve.enable = true;
-
 		adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC |
 					      ETH_TUNNEL_FILTER_IVLAN;
 		adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT;
+		adapter->ipgre.enable = true;
+		adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC |
+					     ETH_TUNNEL_FILTER_IVLAN;
 	} else {
 		adapter->vxlan.enable = false;
 		adapter->geneve.enable = false;
+		adapter->ipgre.enable = false;
 	}
 
 	DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index 23f7e0e..baae22d 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -170,7 +170,7 @@ struct qede_fdir_info {
 #define QEDE_VXLAN_DEF_PORT			(4789)
 #define QEDE_GENEVE_DEF_PORT			(6081)
 
-struct qede_udp_tunn {
+struct qede_tunn_params {
 	bool enable;
 	uint16_t num_filters;
 	uint16_t filter_type;
@@ -205,8 +205,9 @@ struct qede_dev {
 	SLIST_HEAD(uc_list_head, qede_ucast_entry) uc_list_head;
 	uint16_t num_uc_addr;
 	bool handle_hw_err;
-	struct qede_udp_tunn vxlan;
-	struct qede_udp_tunn geneve;
+	struct qede_tunn_params vxlan;
+	struct qede_tunn_params geneve;
+	struct qede_tunn_params ipgre;
 	struct qede_fdir_info fdir_info;
 	bool vlan_strip_flg;
 	char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index e9fe46c..20c10be 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -1837,17 +1837,14 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 		 * offloads. Don't rely on pkt_type marked by Rx, instead use
 		 * tx_ol_flags to decide.
 		 */
-		if (((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
-						PKT_TX_TUNNEL_VXLAN) ||
-		    ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
-						PKT_TX_TUNNEL_MPLSINUDP) ||
-		    ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
-						PKT_TX_TUNNEL_GENEVE)) {
+		tunn_flg = !!(tx_ol_flags & PKT_TX_TUNNEL_MASK);
+
+		if (tunn_flg) {
 			/* Check against max which is Tunnel IPv6 + ext */
 			if (unlikely(txq->nb_tx_avail <
 				ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
 					break;
-			tunn_flg = true;
+
 			/* First indicate its a tunnel pkt */
 			bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
 				  ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
@@ -1986,7 +1983,8 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 			 * csum offload is requested then we need to force
 			 * recalculation of L4 tunnel header csum also.
 			 */
-			if (tunn_flg) {
+			if (tunn_flg && ((tx_ol_flags & PKT_TX_TUNNEL_MASK) !=
+							PKT_TX_TUNNEL_GRE)) {
 				bd1_bd_flags_bf |=
 					ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
 					ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index a2dc9e7..3c66df0 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -151,7 +151,8 @@
 			      PKT_TX_VLAN_PKT		| \
 			      PKT_TX_TUNNEL_VXLAN	| \
 			      PKT_TX_TUNNEL_GENEVE	| \
-			      PKT_TX_TUNNEL_MPLSINUDP)
+			      PKT_TX_TUNNEL_MPLSINUDP   | \
+			      PKT_TX_TUNNEL_GRE)
 
 #define QEDE_TX_OFFLOAD_NOTSUP_MASK \
 	(PKT_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK)
-- 
1.7.10.3



More information about the dev mailing list