[dpdk-dev] [PATCH v2 18/19] net/bnxt: add support for flow filter ops

Ajit Khaparde ajit.khaparde at broadcom.com
Mon Sep 18 17:17:54 CEST 2017


This patch adds support for flow validate/create/destroy/flush ops.
Also update doc/guides/nics/features/bnxt.ini to indicate this.

Signed-off-by: Ajit Khaparde <ajit.khaparde at broadcom.com>
--
v1->v2: incorporate review comments.
---
 doc/guides/nics/features/bnxt.ini |   1 +
 drivers/net/bnxt/bnxt.h           |   7 +
 drivers/net/bnxt/bnxt_ethdev.c    |  50 ++-
 drivers/net/bnxt/bnxt_filter.c    | 820 +++++++++++++++++++++++++++++++++++++-
 drivers/net/bnxt/bnxt_filter.h    |  70 ++++
 drivers/net/bnxt/bnxt_hwrm.c      | 281 ++++++++++++-
 drivers/net/bnxt/bnxt_hwrm.h      |  12 +-
 drivers/net/bnxt/bnxt_vnic.c      |   1 +
 drivers/net/bnxt/bnxt_vnic.h      |   1 +
 drivers/net/bnxt/rte_pmd_bnxt.c   |   4 +-
 10 files changed, 1213 insertions(+), 34 deletions(-)

diff --git a/doc/guides/nics/features/bnxt.ini b/doc/guides/nics/features/bnxt.ini
index 089d914..793322f 100644
--- a/doc/guides/nics/features/bnxt.ini
+++ b/doc/guides/nics/features/bnxt.ini
@@ -16,6 +16,7 @@ Multicast MAC filter = Y
 RSS reta update      = Y
 SR-IOV               = Y
 VLAN filter          = Y
+Flow API             = Y
 VLAN offload         = Y
 Basic stats          = Y
 Extended stats       = Y
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 4a88210..f33df2d 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -171,6 +171,12 @@ struct bnxt_cos_queue_info {
 	uint8_t	profile;
 };
 
+struct rte_flow {
+	STAILQ_ENTRY(rte_flow) next;
+	struct bnxt_filter_info *filter;
+	struct bnxt_vnic_info	*vnic;
+};
+
 #define BNXT_HWRM_SHORT_REQ_LEN		sizeof(struct hwrm_short_input)
 struct bnxt {
 	void				*bar0;
@@ -271,4 +277,5 @@ int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg);
 #define RX_PROD_AGG_BD_TYPE_RX_PROD_AGG		0x6
 
 bool is_bnxt_supported(struct rte_eth_dev *dev);
+extern const struct rte_flow_ops bnxt_flow_ops;
 #endif
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index daf7990..d5d3467 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -611,7 +611,7 @@ static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
 				if (filter->mac_index == index) {
 					STAILQ_REMOVE(&vnic->filter, filter,
 						      bnxt_filter_info, next);
-					bnxt_hwrm_clear_filter(bp, filter);
+					bnxt_hwrm_clear_l2_filter(bp, filter);
 					filter->mac_index = INVALID_MAC_INDEX;
 					memset(&filter->l2_addr, 0,
 					       ETHER_ADDR_LEN);
@@ -658,7 +658,7 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
 	STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
 	filter->mac_index = index;
 	memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
-	return bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
+	return bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
 }
 
 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
@@ -1152,7 +1152,7 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
 					/* Must delete the filter */
 					STAILQ_REMOVE(&vnic->filter, filter,
 						      bnxt_filter_info, next);
-					bnxt_hwrm_clear_filter(bp, filter);
+					bnxt_hwrm_clear_l2_filter(bp, filter);
 					STAILQ_INSERT_TAIL(
 							&bp->free_filter_list,
 							filter, next);
@@ -1178,7 +1178,7 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
 					memcpy(new_filter->l2_addr,
 					       filter->l2_addr, ETHER_ADDR_LEN);
 					/* MAC only filter */
-					rc = bnxt_hwrm_set_filter(bp,
+					rc = bnxt_hwrm_set_l2_filter(bp,
 							vnic->fw_vnic_id,
 							new_filter);
 					if (rc)
@@ -1230,7 +1230,7 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
 					/* Must delete the MAC filter */
 					STAILQ_REMOVE(&vnic->filter, filter,
 						      bnxt_filter_info, next);
-					bnxt_hwrm_clear_filter(bp, filter);
+					bnxt_hwrm_clear_l2_filter(bp, filter);
 					filter->l2_ovlan = 0;
 					STAILQ_INSERT_TAIL(
 							&bp->free_filter_list,
@@ -1253,8 +1253,9 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
 				new_filter->l2_ovlan = vlan_id;
 				new_filter->l2_ovlan_mask = 0xF000;
 				new_filter->enables |= en;
-				rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id,
-							  new_filter);
+				rc = bnxt_hwrm_set_l2_filter(bp,
+							     vnic->fw_vnic_id,
+							     new_filter);
 				if (rc)
 					goto exit;
 				RTE_LOG(INFO, PMD,
@@ -1333,7 +1334,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
 		/* Default Filter is at Index 0 */
 		if (filter->mac_index != 0)
 			continue;
-		rc = bnxt_hwrm_clear_filter(bp, filter);
+		rc = bnxt_hwrm_clear_l2_filter(bp, filter);
 		if (rc)
 			break;
 		memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN);
@@ -1342,7 +1343,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
 		filter->enables |=
 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
-		rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
+		rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
 		if (rc)
 			break;
 		filter->mac_index = 0;
@@ -1642,6 +1643,36 @@ bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
 	return RTE_ETH_TX_DESC_FULL;
 }
 
+static int
+bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,
+		    enum rte_filter_type filter_type,
+		    enum rte_filter_op filter_op, void *arg)
+{
+	int ret = 0;
+
+	switch (filter_type) {
+	case RTE_ETH_FILTER_ETHERTYPE:
+	case RTE_ETH_FILTER_NTUPLE:
+	case RTE_ETH_FILTER_FDIR:
+	case RTE_ETH_FILTER_TUNNEL:
+		/* FALLTHROUGH */
+		RTE_LOG(ERR, PMD,
+			"filter type: %d: To be implemented\n", filter_type);
+		break;
+	case RTE_ETH_FILTER_GENERIC:
+		if (filter_op != RTE_ETH_FILTER_GET)
+			return -EINVAL;
+		*(const void **)arg = &bnxt_flow_ops;
+		break;
+	default:
+		RTE_LOG(ERR, PMD,
+			"Filter type (%d) not supported", filter_type);
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
 /*
  * Initialization
  */
@@ -1694,6 +1725,7 @@ static const struct eth_dev_ops bnxt_dev_ops = {
 	.rx_queue_count = bnxt_rx_queue_count_op,
 	.rx_descriptor_status = bnxt_rx_descriptor_status_op,
 	.tx_descriptor_status = bnxt_tx_descriptor_status_op,
+	.filter_ctrl = bnxt_filter_ctrl_op,
 };
 
 static bool bnxt_vf_pciid(uint16_t id)
diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c
index e9aac27..47fc2da 100644
--- a/drivers/net/bnxt/bnxt_filter.c
+++ b/drivers/net/bnxt/bnxt_filter.c
@@ -35,6 +35,9 @@
 
 #include <rte_log.h>
 #include <rte_malloc.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_tailq.h>
 
 #include "bnxt.h"
 #include "bnxt_filter.h"
@@ -94,6 +97,8 @@ void bnxt_init_filters(struct bnxt *bp)
 	for (i = 0; i < max_filters; i++) {
 		filter = &bp->filter_info[i];
 		filter->fw_l2_filter_id = -1;
+		filter->fw_em_filter_id = -1;
+		filter->fw_ntuple_filter_id = -1;
 		STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
 	}
 }
@@ -121,7 +126,7 @@ void bnxt_free_all_filters(struct bnxt *bp)
 
 	for (i = 0; i < bp->pf.max_vfs; i++) {
 		STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
-			bnxt_hwrm_clear_filter(bp, filter);
+			bnxt_hwrm_clear_l2_filter(bp, filter);
 		}
 	}
 }
@@ -142,7 +147,7 @@ void bnxt_free_filter_mem(struct bnxt *bp)
 		if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
 			RTE_LOG(ERR, PMD, "HWRM filter is not freed??\n");
 			/* Call HWRM to try to free filter again */
-			rc = bnxt_hwrm_clear_filter(bp, filter);
+			rc = bnxt_hwrm_clear_l2_filter(bp, filter);
 			if (rc)
 				RTE_LOG(ERR, PMD,
 				       "HWRM filter cannot be freed rc = %d\n",
@@ -174,3 +179,814 @@ int bnxt_alloc_filter_mem(struct bnxt *bp)
 	bp->filter_info = filter_mem;
 	return 0;
 }
+
+static struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
+{
+	struct bnxt_filter_info *filter;
+
+	/* Find the 1st unused filter from the free_filter_list pool*/
+	filter = STAILQ_FIRST(&bp->free_filter_list);
+	if (!filter) {
+		RTE_LOG(ERR, PMD, "No more free filter resources\n");
+		return NULL;
+	}
+	STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
+
+	return filter;
+}
+
+static void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
+{
+	STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
+}
+
+static int
+bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow_error *error)
+{
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+			NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!actions) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static const struct rte_flow_item *
+nxt_non_void_pattern(const struct rte_flow_item *cur)
+{
+	while (1) {
+		if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
+			return cur;
+		cur++;
+	}
+}
+
+static const struct rte_flow_action *
+nxt_non_void_action(const struct rte_flow_action *cur)
+{
+	while (1) {
+		if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
+			return cur;
+		cur++;
+	}
+}
+
+static inline int check_zero_bytes(const uint8_t *bytes, int len)
+{
+	int i;
+	for (i = 0; i < len; i++)
+		if (bytes[i] != 0x00)
+			return 0;
+	return 1;
+}
+
+static int
+bnxt_filter_type_check(const struct rte_flow_item pattern[],
+		       struct rte_flow_error *error __rte_unused)
+{
+	const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
+	int use_ntuple = 1;
+
+	while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		switch (item->type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			use_ntuple = 1;
+			break;
+		case RTE_FLOW_ITEM_TYPE_VLAN:
+			use_ntuple = 0;
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+		case RTE_FLOW_ITEM_TYPE_TCP:
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			/* FALLTHROUGH */
+			/* need ntuple match, reset exact match */
+			if (!use_ntuple) {
+				RTE_LOG(ERR, PMD,
+					"VLAN flow cannot use NTUPLE filter\n");
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Cannot use VLAN with NTUPLE");
+				return -rte_errno;
+			}
+			use_ntuple |= 1;
+			break;
+		default:
+			RTE_LOG(ERR, PMD, "Unknown Flow type");
+			use_ntuple |= 1;
+		}
+		item++;
+	}
+	return use_ntuple;
+}
+
+static int
+bnxt_validate_and_parse_flow_type(const struct rte_flow_item pattern[],
+				  struct rte_flow_error *error,
+				  struct bnxt_filter_info *filter)
+{
+	const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
+	const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_nvgre *nvgre_spec;
+	const struct rte_flow_item_nvgre *nvgre_mask;
+	const struct rte_flow_item_vxlan *vxlan_spec;
+	const struct rte_flow_item_vxlan *vxlan_mask;
+	uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
+	uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
+	uint32_t tenant_id_be = 0;
+	bool vni_masked = 0;
+	bool tni_masked = 0;
+	int use_ntuple;
+	uint32_t en = 0;
+
+	use_ntuple = bnxt_filter_type_check(pattern, error);
+	RTE_LOG(ERR, PMD, "Use NTUPLE %d\n", use_ntuple);
+	if (use_ntuple < 0)
+		return use_ntuple;
+
+	filter->filter_type = use_ntuple ?
+		HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
+
+	while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		if (item->last) {
+			/* last or range is NOT supported as match criteria */
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "No support for range");
+			return -rte_errno;
+		}
+		if (!item->spec || !item->mask) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "spec/mask is NULL");
+			return -rte_errno;
+		}
+		switch (item->type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = (const struct rte_flow_item_eth *)item->spec;
+			eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+			/* Source MAC address mask cannot be partially set.
+			 * Should be All 0's or all 1's.
+			 * Destination MAC address mask must not be partially
+			 * set. Should be all 1's or all 0's.
+			 */
+			if ((!is_zero_ether_addr(&eth_mask->src) &&
+			     !is_broadcast_ether_addr(&eth_mask->src)) ||
+			    (!is_zero_ether_addr(&eth_mask->dst) &&
+			     !is_broadcast_ether_addr(&eth_mask->dst))) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "MAC_addr mask not valid");
+				return -rte_errno;
+			}
+
+			/* Mask is not allowed. Only exact matches are */
+			if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "ethertype mask not valid");
+				return -rte_errno;
+			}
+
+			if (is_broadcast_ether_addr(&eth_mask->dst)) {
+				rte_memcpy(filter->dst_macaddr,
+					   &eth_spec->dst, 6);
+				en |= use_ntuple ?
+					NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
+					EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
+				RTE_LOG(ERR, PMD, "DST MAC\n");
+			}
+			if (is_broadcast_ether_addr(&eth_mask->src)) {
+				rte_memcpy(filter->src_macaddr,
+					   &eth_spec->src, 6);
+				en |= use_ntuple ?
+					NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
+					EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
+				RTE_LOG(ERR, PMD, "SRC MAC\n");
+			} /*
+			   * else {
+			   *  RTE_LOG(ERR, PMD, "Handle this condition\n");
+			   * }
+			   */
+			if (eth_spec->type) {
+				filter->ethertype =
+					rte_be_to_cpu_16(eth_spec->type);
+				en |= use_ntuple ?
+					NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
+					EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_VLAN:
+			vlan_spec =
+				(const struct rte_flow_item_vlan *)item->spec;
+			vlan_mask =
+				(const struct rte_flow_item_vlan *)item->mask;
+			if (vlan_mask->tci & 0xFFFF && !vlan_mask->tpid) {
+				/* Only the VLAN ID can be matched. */
+				filter->l2_ovlan =
+					rte_be_to_cpu_16(vlan_spec->tci &
+							 0xFFF);
+				en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
+			} else {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "VLAN mask is invalid");
+				return -rte_errno;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			/* If mask is not involved, we could use EM filters. */
+			ipv4_spec =
+				(const struct rte_flow_item_ipv4 *)item->spec;
+			ipv4_mask =
+				(const struct rte_flow_item_ipv4 *)item->mask;
+			/* Only IP DST and SRC fields are maskable. */
+			if (ipv4_mask->hdr.version_ihl ||
+			    ipv4_mask->hdr.type_of_service ||
+			    ipv4_mask->hdr.total_length ||
+			    ipv4_mask->hdr.packet_id ||
+			    ipv4_mask->hdr.fragment_offset ||
+			    ipv4_mask->hdr.time_to_live ||
+			    ipv4_mask->hdr.next_proto_id ||
+			    ipv4_mask->hdr.hdr_checksum) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv4 mask.");
+				return -rte_errno;
+			}
+			filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
+			filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
+			if (use_ntuple)
+				en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
+					NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
+			else
+				en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
+					EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
+			if (ipv4_mask->hdr.src_addr) {
+				filter->src_ipaddr_mask[0] =
+					ipv4_mask->hdr.src_addr;
+				en |= !use_ntuple ? 0 :
+				     NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+			}
+			if (ipv4_mask->hdr.dst_addr) {
+				filter->dst_ipaddr_mask[0] =
+					ipv4_mask->hdr.dst_addr;
+				en |= !use_ntuple ? 0 :
+				     NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+			}
+			filter->ip_addr_type = use_ntuple ?
+			 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
+			 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
+			if (ipv4_spec->hdr.next_proto_id) {
+				filter->ip_protocol =
+					ipv4_spec->hdr.next_proto_id;
+				if (use_ntuple)
+					en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
+				else
+					en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec =
+				(const struct rte_flow_item_ipv6 *)item->spec;
+			ipv6_mask =
+				(const struct rte_flow_item_ipv6 *)item->mask;
+
+			/* Only IP DST and SRC fields are maskable. */
+			if (ipv6_mask->hdr.vtc_flow ||
+			    ipv6_mask->hdr.payload_len ||
+			    ipv6_mask->hdr.proto ||
+			    ipv6_mask->hdr.hop_limits) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv6 mask.");
+				return -rte_errno;
+			}
+
+			if (use_ntuple)
+				en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
+					NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
+			else
+				en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
+					EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
+			rte_memcpy(filter->src_ipaddr,
+				   ipv6_spec->hdr.src_addr, 16);
+			rte_memcpy(filter->dst_ipaddr,
+				   ipv6_spec->hdr.dst_addr, 16);
+			if (!check_zero_bytes(ipv6_mask->hdr.src_addr, 16)) {
+				rte_memcpy(filter->src_ipaddr_mask,
+					   ipv6_mask->hdr.src_addr, 16);
+				en |= !use_ntuple ? 0 :
+				    NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+			}
+			if (!check_zero_bytes(ipv6_mask->hdr.dst_addr, 16)) {
+				rte_memcpy(filter->dst_ipaddr_mask,
+					   ipv6_mask->hdr.dst_addr, 16);
+				en |= !use_ntuple ? 0 :
+				     NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+			}
+			filter->ip_addr_type = use_ntuple ?
+				NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
+				EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+			tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+
+			/* Check TCP mask. Only DST & SRC ports are maskable */
+			if (tcp_mask->hdr.sent_seq ||
+			    tcp_mask->hdr.recv_ack ||
+			    tcp_mask->hdr.data_off ||
+			    tcp_mask->hdr.tcp_flags ||
+			    tcp_mask->hdr.rx_win ||
+			    tcp_mask->hdr.cksum ||
+			    tcp_mask->hdr.tcp_urp) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid TCP mask");
+				return -rte_errno;
+			}
+			filter->src_port = tcp_spec->hdr.src_port;
+			filter->dst_port = tcp_spec->hdr.dst_port;
+			if (use_ntuple)
+				en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
+					NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
+			else
+				en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
+					EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
+			if (tcp_mask->hdr.dst_port) {
+				filter->dst_port_mask = tcp_mask->hdr.dst_port;
+				en |= !use_ntuple ? 0 :
+				  NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
+			}
+			if (tcp_mask->hdr.src_port) {
+				filter->src_port_mask = tcp_mask->hdr.src_port;
+				en |= !use_ntuple ? 0 :
+				  NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = (const struct rte_flow_item_udp *)item->spec;
+			udp_mask = (const struct rte_flow_item_udp *)item->mask;
+
+			if (udp_mask->hdr.dgram_len ||
+			    udp_mask->hdr.dgram_cksum) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid UDP mask");
+				return -rte_errno;
+			}
+
+			filter->src_port = udp_spec->hdr.src_port;
+			filter->dst_port = udp_spec->hdr.dst_port;
+			if (use_ntuple)
+				en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
+					NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
+			else
+				en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
+					EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
+
+			if (udp_mask->hdr.dst_port) {
+				filter->dst_port_mask = udp_mask->hdr.dst_port;
+				en |= !use_ntuple ? 0 :
+				  NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
+			}
+			if (udp_mask->hdr.src_port) {
+				filter->src_port_mask = udp_mask->hdr.src_port;
+				en |= !use_ntuple ? 0 :
+				  NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			vxlan_spec =
+				(const struct rte_flow_item_vxlan *)item->spec;
+			vxlan_mask =
+				(const struct rte_flow_item_vxlan *)item->mask;
+			/* Check if VXLAN item is used to describe protocol.
+			 * If yes, both spec and mask should be NULL.
+			 * If no, both spec and mask shouldn't be NULL.
+			 */
+			if ((!vxlan_spec && vxlan_mask) ||
+			    (vxlan_spec && !vxlan_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid VXLAN item");
+				return -rte_errno;
+			}
+
+			if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
+			    vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
+			    (vxlan_spec->flags != 0x8)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid VXLAN item");
+				return -rte_errno;
+			}
+
+			/* Check if VNI is masked. */
+			if (vxlan_spec && vxlan_mask) {
+				vni_masked =
+					!!memcmp(vxlan_mask->vni, vni_mask,
+						 RTE_DIM(vni_mask));
+				if (vni_masked) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid VNI mask");
+					return -rte_errno;
+				}
+
+				rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+					   vxlan_spec->vni, 3);
+				filter->vni =
+					rte_be_to_cpu_32(tenant_id_be);
+				filter->tunnel_type =
+				 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_NVGRE:
+			nvgre_spec =
+				(const struct rte_flow_item_nvgre *)item->spec;
+			nvgre_mask =
+				(const struct rte_flow_item_nvgre *)item->mask;
+			/* Check if NVGRE item is used to describe protocol.
+			 * If yes, both spec and mask should be NULL.
+			 * If no, both spec and mask shouldn't be NULL.
+			 */
+			if ((!nvgre_spec && nvgre_mask) ||
+			    (nvgre_spec && !nvgre_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid NVGRE item");
+				return -rte_errno;
+			}
+
+			if ((nvgre_spec->c_k_s_rsvd0_ver != 0x2000) ||
+			    (nvgre_spec->protocol != 0x6558)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid NVGRE item");
+				return -rte_errno;
+			}
+
+			if (nvgre_spec && nvgre_mask) {
+				tni_masked =
+					!!memcmp(nvgre_mask->tni, tni_mask,
+						 RTE_DIM(tni_mask));
+				if (tni_masked) {
+					rte_flow_error_set(error, EINVAL,
+						       RTE_FLOW_ERROR_TYPE_ITEM,
+						       item,
+						       "Invalid TNI mask");
+					return -rte_errno;
+				}
+				rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+					   nvgre_spec->tni, 3);
+				filter->vni =
+					rte_be_to_cpu_32(tenant_id_be);
+				filter->tunnel_type =
+				 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
+			}
+			break;
+		default:
+			break;
+		}
+		item++;
+	}
+	filter->enables = en;
+
+	return 0;
+}
+
+/* Parse attributes */
+static int
+bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
+		     struct rte_flow_error *error)
+{
+	/* Must be input direction */
+	if (!attr->ingress) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+				   attr, "Only support ingress.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->egress) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+				   attr, "No support for egress.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->priority) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "No support for priority.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->group) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+				   attr, "No support for group.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
+			     const struct rte_flow_item pattern[],
+			     const struct rte_flow_action actions[],
+			     const struct rte_flow_attr *attr,
+			     struct rte_flow_error *error,
+			     struct bnxt_filter_info *filter)
+{
+	const struct rte_flow_action *act = nxt_non_void_action(actions);
+	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+	const struct rte_flow_action_queue *act_q;
+	struct bnxt_filter_info *flt;
+	struct bnxt_vnic_info *vnic, *vnic0;
+	int rc;
+
+	rc = bnxt_validate_and_parse_flow_type(pattern, error, filter);
+	if (rc != 0)
+		goto ret;
+
+	rc = bnxt_flow_parse_attr(attr, error);
+	if (rc != 0)
+		goto ret;
+	//Since we support ingress attribute only - right now.
+	filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
+
+	switch (act->type) {
+	case RTE_FLOW_ACTION_TYPE_QUEUE:
+		/* Allow this flow. Redirect to a VNIC. */
+		act_q = (const struct rte_flow_action_queue *)act->conf;
+		if (act_q->index >= bp->rx_nr_rings) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, act,
+					   "Invalid queue ID.");
+			rc = -rte_errno;
+			goto ret;
+		}
+		RTE_LOG(ERR, PMD, "Queue index %d\n", act_q->index);
+
+		vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+		vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
+		if (vnic == NULL) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, act,
+					   "No matching VNIC for queue ID.");
+			rc = -rte_errno;
+			goto ret;
+		}
+		filter->dst_id = vnic->fw_vnic_id;
+		flt = STAILQ_FIRST(&vnic0->filter);
+		filter->fw_l2_filter_id = flt->fw_l2_filter_id;
+		RTE_LOG(ERR, PMD, "VNIC found\n");
+		break;
+	case RTE_FLOW_ACTION_TYPE_DROP:
+		if (filter->filter_type == HWRM_CFA_EM_FILTER)
+			filter->flags |=
+				HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
+		else
+			filter->flags |=
+				HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
+		break;
+	default:
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, act,
+				   "Invalid action.");
+		rc = -rte_errno;
+		goto ret;
+	}
+
+	act = nxt_non_void_action(++act);
+	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION,
+				   act, "Invalid action.");
+		rc = -rte_errno;
+		goto ret;
+	}
+ret:
+	return rc;
+}
+
+static int
+bnxt_flow_validate(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error)
+{
+	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+	struct bnxt_filter_info *filter;
+	int ret = 0;
+
+	ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
+	if (ret != 0)
+		return ret;
+
+	filter = bnxt_get_unused_filter(bp);
+	if (filter == NULL) {
+		RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
+		return -ENOMEM;
+	}
+
+	ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
+					   error, filter);
+	/* No need to hold on to this filter if we are just validating flow */
+	bnxt_free_filter(bp, filter);
+
+	return ret;
+}
+
+static struct rte_flow *
+bnxt_flow_create(struct rte_eth_dev *dev,
+		  const struct rte_flow_attr *attr,
+		  const struct rte_flow_item pattern[],
+		  const struct rte_flow_action actions[],
+		  struct rte_flow_error *error)
+{
+	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+	struct bnxt_filter_info *filter;
+	struct bnxt_vnic_info *vnic;
+	struct rte_flow *flow;
+	unsigned int i;
+	int ret = 0;
+
+	flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return flow;
+	}
+
+	ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
+	if (ret != 0) {
+		RTE_LOG(ERR, PMD, "Not a validate flow.\n");
+		goto free_flow;
+	}
+
+	filter = bnxt_get_unused_filter(bp);
+	if (filter == NULL) {
+		RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
+		goto free_flow;
+	}
+
+	ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
+					   error, filter);
+	if (ret != 0)
+		goto free_flow;
+
+	if (filter->filter_type == HWRM_CFA_EM_FILTER) {
+		filter->enables |=
+			HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
+		ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
+	}
+	if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
+		filter->enables |=
+			HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
+		ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
+	}
+
+	for (i = 0; i < bp->nr_vnics; i++) {
+		vnic = &bp->vnic_info[i];
+		if (filter->dst_id == vnic->fw_vnic_id)
+			break;
+	}
+
+	if (!ret) {
+		flow->filter = filter;
+		flow->vnic = vnic;
+		RTE_LOG(ERR, PMD, "Successfully created flow.\n");
+		STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
+		return flow;
+	}
+free_flow:
+	RTE_LOG(ERR, PMD, "Failed to create flow.\n");
+	rte_flow_error_set(error, -ret,
+			   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			   "Failed to create flow.");
+	rte_free(flow);
+	flow = NULL;
+	return flow;
+}
+
+static int
+bnxt_flow_destroy(struct rte_eth_dev *dev,
+		  struct rte_flow *flow,
+		  struct rte_flow_error *error)
+{
+	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+	struct bnxt_filter_info *filter = flow->filter;
+	struct bnxt_vnic_info *vnic = flow->vnic;
+	int ret = 0;
+
+	if (filter->filter_type == HWRM_CFA_EM_FILTER)
+		ret = bnxt_hwrm_clear_em_filter(bp, filter);
+	if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
+		ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
+
+	if (!ret) {
+		STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
+		rte_free(flow);
+	} else {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to destroy flow.");
+	}
+
+	return ret;
+}
+
+static int
+bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+	struct bnxt_vnic_info *vnic;
+	struct rte_flow *flow;
+	unsigned int i;
+	int ret = 0;
+
+	for (i = 0; i < bp->nr_vnics; i++) {
+		vnic = &bp->vnic_info[i];
+		STAILQ_FOREACH(flow, &vnic->flow_list, next) {
+			struct bnxt_filter_info *filter = flow->filter;
+
+			if (filter->filter_type == HWRM_CFA_EM_FILTER)
+				ret = bnxt_hwrm_clear_em_filter(bp, filter);
+			if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
+				ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
+
+			if (ret) {
+				rte_flow_error_set(error, -ret,
+						   RTE_FLOW_ERROR_TYPE_HANDLE,
+						   NULL,
+						   "Failed to flush flow in HW.");
+				return -rte_errno;
+			}
+
+			STAILQ_REMOVE(&vnic->flow_list, flow,
+				      rte_flow, next);
+			rte_free(flow);
+		}
+	}
+
+	return ret;
+}
+
+const struct rte_flow_ops bnxt_flow_ops = {
+	.validate = bnxt_flow_validate,
+	.create = bnxt_flow_create,
+	.destroy = bnxt_flow_destroy,
+	.flush = bnxt_flow_flush,
+};
diff --git a/drivers/net/bnxt/bnxt_filter.h b/drivers/net/bnxt/bnxt_filter.h
index 613b2ee..76b8a18 100644
--- a/drivers/net/bnxt/bnxt_filter.h
+++ b/drivers/net/bnxt/bnxt_filter.h
@@ -40,8 +40,15 @@ struct bnxt;
 struct bnxt_filter_info {
 	STAILQ_ENTRY(bnxt_filter_info)	next;
 	uint64_t		fw_l2_filter_id;
+	uint64_t		fw_em_filter_id;
+	uint64_t		fw_ntuple_filter_id;
 #define INVALID_MAC_INDEX	((uint16_t)-1)
 	uint16_t		mac_index;
+#define HWRM_CFA_L2_FILTER     0
+#define HWRM_CFA_EM_FILTER     1
+#define HWRM_CFA_NTUPLE_FILTER 2
+	uint8_t                 filter_type;    //L2 or EM or NTUPLE filter
+	uint32_t                dst_id;
 
 	/* Filter Characteristics */
 	uint32_t		flags;
@@ -65,6 +72,19 @@ struct bnxt_filter_info {
 	uint64_t		l2_filter_id_hint;
 	uint32_t		src_id;
 	uint8_t			src_type;
+	uint8_t                 src_macaddr[6];
+	uint8_t                 dst_macaddr[6];
+	uint32_t                dst_ipaddr[4];
+	uint32_t                dst_ipaddr_mask[4];
+	uint32_t                src_ipaddr[4];
+	uint32_t                src_ipaddr_mask[4];
+	uint16_t                dst_port;
+	uint16_t                dst_port_mask;
+	uint16_t                src_port;
+	uint16_t                src_port_mask;
+	uint16_t                ip_protocol;
+	uint16_t                ip_addr_type;
+	uint16_t                ethertype;
 };
 
 struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp);
@@ -74,4 +94,54 @@ void bnxt_free_all_filters(struct bnxt *bp);
 void bnxt_free_filter_mem(struct bnxt *bp);
 int bnxt_alloc_filter_mem(struct bnxt *bp);
 
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR	\
+	HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR
+#define EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR	\
+	HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR	\
+	HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR
+#define EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR	\
+	HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE   \
+	HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE
+#define EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE       \
+	HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE
+#define EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID       \
+	HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR  \
+	HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK     \
+	HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR  \
+	HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK     \
+	HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT    \
+	HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK       \
+	HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT    \
+	HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK       \
+	HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK
+#define NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO	\
+	HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL
+#define EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR	\
+	HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR
+#define EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR	\
+	HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR
+#define EM_FLOW_ALLOC_INPUT_EN_SRC_PORT	\
+	HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT
+#define EM_FLOW_ALLOC_INPUT_EN_DST_PORT	\
+	HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT
+#define EM_FLOW_ALLOC_INPUT_EN_IP_PROTO	\
+	HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL
+#define EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6	\
+	HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
+#define NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6	\
+	HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
+#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN	\
+	HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN
+#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE	\
+	HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE
 #endif
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index ade9627..a05453d 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -329,7 +329,7 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
 	return rc;
 }
 
-int bnxt_hwrm_clear_filter(struct bnxt *bp,
+int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
 			   struct bnxt_filter_info *filter)
 {
 	int rc = 0;
@@ -353,7 +353,7 @@ int bnxt_hwrm_clear_filter(struct bnxt *bp,
 	return 0;
 }
 
-int bnxt_hwrm_set_filter(struct bnxt *bp,
+int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
 			 uint16_t dst_id,
 			 struct bnxt_filter_info *filter)
 {
@@ -363,7 +363,7 @@ int bnxt_hwrm_set_filter(struct bnxt *bp,
 	uint32_t enables = 0;
 
 	if (filter->fw_l2_filter_id != UINT64_MAX)
-		bnxt_hwrm_clear_filter(bp, filter);
+		bnxt_hwrm_clear_l2_filter(bp, filter);
 
 	HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
 
@@ -1017,6 +1017,7 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 	cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
 
 	HWRM_UNLOCK();
+	bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
 
 	return rc;
 }
@@ -1133,7 +1134,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 	int rc = 0;
 	struct hwrm_vnic_cfg_input req = {.req_type = 0 };
 	struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
-	uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
+	uint32_t ctx_enable_flag = 0;
 	struct bnxt_plcmodes_cfg pmodes;
 
 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
@@ -1149,14 +1150,15 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 
 	/* Only RSS support for now TBD: COS & LB */
 	req.enables =
-	    rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
-			     HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
+	    rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
 	if (vnic->lb_rule != 0xffff)
-		ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
+		ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
 	if (vnic->cos_rule != 0xffff)
-		ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
-	if (vnic->rss_rule != 0xffff)
-		ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
+		ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
+	if (vnic->rss_rule != 0xffff) {
+		ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
+		ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
+	}
 	req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
 	req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
@@ -1749,9 +1751,39 @@ int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 	int rc = 0;
 
 	STAILQ_FOREACH(filter, &vnic->filter, next) {
-		rc = bnxt_hwrm_clear_filter(bp, filter);
-		if (rc)
-			break;
+		if (filter->filter_type == HWRM_CFA_EM_FILTER)
+			rc = bnxt_hwrm_clear_em_filter(bp, filter);
+		else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
+			rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
+		else
+			rc = bnxt_hwrm_clear_l2_filter(bp, filter);
+		//if (rc)
+			//break;
+	}
+	return rc;
+}
+
+static int
+bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+	struct bnxt_filter_info *filter;
+	struct rte_flow *flow;
+	int rc = 0;
+
+	STAILQ_FOREACH(flow, &vnic->flow_list, next) {
+		filter = flow->filter;
+		RTE_LOG(ERR, PMD, "filter type %d\n", filter->filter_type);
+		if (filter->filter_type == HWRM_CFA_EM_FILTER)
+			rc = bnxt_hwrm_clear_em_filter(bp, filter);
+		else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
+			rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
+		else
+			rc = bnxt_hwrm_clear_l2_filter(bp, filter);
+
+		STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
+		rte_free(flow);
+		//if (rc)
+			//break;
 	}
 	return rc;
 }
@@ -1762,7 +1794,7 @@ int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 	int rc = 0;
 
 	STAILQ_FOREACH(filter, &vnic->filter, next) {
-		rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
+		rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
 		if (rc)
 			break;
 	}
@@ -1783,20 +1815,17 @@ void bnxt_free_tunnel_ports(struct bnxt *bp)
 
 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
 {
-	struct bnxt_vnic_info *vnic;
 	unsigned int i;
 
 	if (bp->vnic_info == NULL)
 		return;
 
-	vnic = &bp->vnic_info[0];
-	if (BNXT_PF(bp))
-		bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
-
 	/* VNIC resources */
 	for (i = 0; i < bp->nr_vnics; i++) {
 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
 
+		bnxt_clear_hwrm_vnic_flows(bp, vnic);
+
 		bnxt_clear_hwrm_vnic_filters(bp, vnic);
 
 		bnxt_hwrm_vnic_ctx_free(bp, vnic);
@@ -3126,3 +3155,217 @@ int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
 	rte_free(vnic_ids);
 	return -1;
 }
+
+int bnxt_hwrm_set_em_filter(struct bnxt *bp,
+			 uint16_t dst_id,
+			 struct bnxt_filter_info *filter)
+{
+	int rc = 0;
+	struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
+	struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+	uint32_t enables = 0;
+
+	if (filter->fw_em_filter_id != UINT64_MAX)
+		bnxt_hwrm_clear_em_filter(bp, filter);
+
+	HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
+
+	req.flags = rte_cpu_to_le_32(filter->flags);
+
+	enables = filter->enables |
+	      HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
+	req.dst_id = rte_cpu_to_le_16(dst_id);
+
+	if (filter->ip_addr_type) {
+		req.ip_addr_type = filter->ip_addr_type;
+		enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
+	}
+	if (enables &
+	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
+		req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
+	if (enables &
+	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
+		memcpy(req.src_macaddr, filter->src_macaddr,
+		       ETHER_ADDR_LEN);
+	if (enables &
+	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
+		memcpy(req.dst_macaddr, filter->dst_macaddr,
+		       ETHER_ADDR_LEN);
+	if (enables &
+	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
+		req.ovlan_vid = filter->l2_ovlan;
+	if (enables &
+	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
+		req.ivlan_vid = filter->l2_ivlan;
+	if (enables &
+	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
+		req.ethertype = rte_cpu_to_be_16(filter->ethertype);
+	if (enables &
+	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
+		req.ip_protocol = filter->ip_protocol;
+	if (enables &
+	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
+		req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
+	if (enables &
+	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
+		req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
+	if (enables &
+	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
+		req.src_port = rte_cpu_to_be_16(filter->src_port);
+	if (enables &
+	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
+		req.dst_port = rte_cpu_to_be_16(filter->dst_port);
+	if (enables &
+	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
+		req.mirror_vnic_id = filter->mirror_vnic_id;
+
+	req.enables = rte_cpu_to_le_32(enables);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
+	HWRM_UNLOCK();
+
+	return rc;
+}
+
+int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
+{
+	int rc = 0;
+	struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
+	struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
+
+	if (filter->fw_em_filter_id == UINT64_MAX)
+		return 0;
+
+	RTE_LOG(ERR, PMD, "Clear EM filter\n");
+	HWRM_PREP(req, CFA_EM_FLOW_FREE);
+
+	req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
+
+	filter->fw_em_filter_id = -1;
+
+	return 0;
+}
+
+int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
+			 uint16_t dst_id,
+			 struct bnxt_filter_info *filter)
+{
+	int rc = 0;
+	struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
+	struct hwrm_cfa_ntuple_filter_alloc_output *resp =
+						bp->hwrm_cmd_resp_addr;
+	uint32_t enables = 0;
+
+	if (filter->fw_ntuple_filter_id != UINT64_MAX) {
+		RTE_LOG(ERR, PMD,
+			"NTUPLE id %lx\n", filter->fw_ntuple_filter_id);
+		bnxt_hwrm_clear_ntuple_filter(bp, filter);
+	}
+
+	HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
+
+	req.flags = rte_cpu_to_le_32(filter->flags);
+
+	enables = filter->enables |
+	      HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
+	req.dst_id = rte_cpu_to_le_16(dst_id);
+
+
+	if (filter->ip_addr_type) {
+		req.ip_addr_type = filter->ip_addr_type;
+		enables |=
+			HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
+	}
+	if (enables &
+	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
+		req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
+	if (enables &
+	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
+		memcpy(req.src_macaddr, filter->src_macaddr,
+		       ETHER_ADDR_LEN);
+	//if (enables &
+	    //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
+		//memcpy(req.dst_macaddr, filter->dst_macaddr,
+		       //ETHER_ADDR_LEN);
+	if (enables &
+	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
+		req.ethertype = rte_cpu_to_be_16(filter->ethertype);
+	if (enables &
+	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
+		req.ip_protocol = filter->ip_protocol;
+	if (enables &
+	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
+		req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
+	if (enables &
+	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
+		req.src_ipaddr_mask[0] =
+			rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
+	if (enables &
+	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
+		req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
+	if (enables &
+	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
+		req.dst_ipaddr_mask[0] =
+			rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
+	if (enables &
+	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
+		req.src_port = rte_cpu_to_le_16(filter->src_port);
+	if (enables &
+	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
+		req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
+	if (enables &
+	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
+		req.dst_port = rte_cpu_to_le_16(filter->dst_port);
+	if (enables &
+	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
+		req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
+	if (enables &
+	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
+		req.mirror_vnic_id = filter->mirror_vnic_id;
+
+	req.enables = rte_cpu_to_le_32(enables);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
+	HWRM_UNLOCK();
+
+	return rc;
+}
+
+int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
+				struct bnxt_filter_info *filter)
+{
+	int rc = 0;
+	struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
+	struct hwrm_cfa_ntuple_filter_free_output *resp =
+						bp->hwrm_cmd_resp_addr;
+
+	if (filter->fw_ntuple_filter_id == UINT64_MAX)
+		return 0;
+
+	RTE_LOG(ERR, PMD, "Clear NTUPLE filter\n");
+	HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
+
+	req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
+
+	filter->fw_ntuple_filter_id = -1;
+
+	return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index 51cd0dd..bd9017f 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -51,9 +51,9 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic,
 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
 			uint16_t vlan_count,
 			struct bnxt_vlan_antispoof_table_entry *vlan_table);
-int bnxt_hwrm_clear_filter(struct bnxt *bp,
+int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
 			   struct bnxt_filter_info *filter);
-int bnxt_hwrm_set_filter(struct bnxt *bp,
+int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
 			 uint16_t dst_id,
 			 struct bnxt_filter_info *filter);
 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
@@ -156,4 +156,12 @@ int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
 					      bool on);
 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf);
+int bnxt_hwrm_set_em_filter(struct bnxt *bp, uint16_t dst_id,
+			struct bnxt_filter_info *filter);
+int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter);
+
+int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp, uint16_t dst_id,
+			 struct bnxt_filter_info *filter);
+int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
+				struct bnxt_filter_info *filter);
 #endif
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index db9fb07..6f7c05b 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -83,6 +83,7 @@ void bnxt_init_vnics(struct bnxt *bp)
 
 		prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE);
 		STAILQ_INIT(&vnic->filter);
+		STAILQ_INIT(&vnic->flow_list);
 		STAILQ_INSERT_TAIL(&bp->free_vnic_list, vnic, next);
 	}
 	for (i = 0; i < MAX_FF_POOLS; i++)
diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h
index 993f221..5443904 100644
--- a/drivers/net/bnxt/bnxt_vnic.h
+++ b/drivers/net/bnxt/bnxt_vnic.h
@@ -80,6 +80,7 @@ struct bnxt_vnic_info {
 	bool		rss_dflt_cr;
 
 	STAILQ_HEAD(, bnxt_filter_info)	filter;
+	STAILQ_HEAD(, rte_flow)	flow_list;
 };
 
 struct bnxt;
diff --git a/drivers/net/bnxt/rte_pmd_bnxt.c b/drivers/net/bnxt/rte_pmd_bnxt.c
index 0bf5db5..82b9bac 100644
--- a/drivers/net/bnxt/rte_pmd_bnxt.c
+++ b/drivers/net/bnxt/rte_pmd_bnxt.c
@@ -730,7 +730,7 @@ int rte_pmd_bnxt_mac_addr_add(uint8_t port, struct ether_addr *addr,
 		    (HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
 		     HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK) &&
 		    memcmp(addr, filter->l2_addr, ETHER_ADDR_LEN) == 0) {
-			bnxt_hwrm_clear_filter(bp, filter);
+			bnxt_hwrm_clear_l2_filter(bp, filter);
 			break;
 		}
 	}
@@ -748,7 +748,7 @@ int rte_pmd_bnxt_mac_addr_add(uint8_t port, struct ether_addr *addr,
 	/* Do not add a filter for the default MAC */
 	if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf_id, &dflt_mac) ||
 	    memcmp(filter->l2_addr, dflt_mac.addr_bytes, ETHER_ADDR_LEN))
-		rc = bnxt_hwrm_set_filter(bp, vnic.fw_vnic_id, filter);
+		rc = bnxt_hwrm_set_l2_filter(bp, vnic.fw_vnic_id, filter);
 
 exit:
 	return rc;
-- 
2.10.1 (Apple Git-78)



More information about the dev mailing list