[v2,5/7] net/hinic: Add Fdir filter type

Message ID 153567d68c30f2af33568a1c159b6fb0fc93fd77.1584456756.git.cloud.wangxiaoyun@huawei.com (mailing list archive)
State Accepted, archived
Delegated to: Ferruh Yigit
Headers
Series Fix LRO issue and support Flow Control |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Wangxiaoyun (Cloud) March 17, 2020, 3:01 p.m. UTC
  The patch supports filter type of inner VXLAN or non vxlan dport,
and use TCAM method to config these rules.

Signed-off-by: Xiaoyun wang <cloud.wangxiaoyun@huawei.com>
---
 drivers/net/hinic/base/hinic_pmd_cmd.h    |   6 +
 drivers/net/hinic/base/hinic_pmd_niccfg.c | 184 +++++-
 drivers/net/hinic/base/hinic_pmd_niccfg.h |  81 +++
 drivers/net/hinic/hinic_pmd_ethdev.c      |   6 +-
 drivers/net/hinic/hinic_pmd_ethdev.h      | 110 +++-
 drivers/net/hinic/hinic_pmd_flow.c        | 961 ++++++++++++++++++++++++++----
 6 files changed, 1199 insertions(+), 149 deletions(-)
  

Patch

diff --git a/drivers/net/hinic/base/hinic_pmd_cmd.h b/drivers/net/hinic/base/hinic_pmd_cmd.h
index c025851..09918a7 100644
--- a/drivers/net/hinic/base/hinic_pmd_cmd.h
+++ b/drivers/net/hinic/base/hinic_pmd_cmd.h
@@ -115,6 +115,12 @@  enum hinic_port_cmd {
 
 	HINIC_PORT_CMD_GET_PORT_INFO		= 0xaa,
 
+	HINIC_PORT_CMD_UP_TC_ADD_FLOW		= 0xaf,
+	HINIC_PORT_CMD_UP_TC_DEL_FLOW		= 0xb0,
+	HINIC_PORT_CMD_UP_TC_GET_FLOW		= 0xb1,
+	HINIC_PORT_CMD_UP_TC_FLUSH_TCAM		= 0xb2,
+	HINIC_PORT_CMD_UP_TC_CTRL_TCAM_BLOCK	= 0xb3,
+
 	HINIC_PORT_CMD_SET_IPSU_MAC		= 0xcb,
 	HINIC_PORT_CMD_GET_IPSU_MAC		= 0xcc,
 
diff --git a/drivers/net/hinic/base/hinic_pmd_niccfg.c b/drivers/net/hinic/base/hinic_pmd_niccfg.c
index 2dc431e..9c273ad 100644
--- a/drivers/net/hinic/base/hinic_pmd_niccfg.c
+++ b/drivers/net/hinic/base/hinic_pmd_niccfg.c
@@ -19,22 +19,6 @@ 
 			buf_out, out_size, 0)
 
 
-#define TCAM_SET	0x1
-#define TCAM_CLEAR	0x2
-
-struct hinic_port_qfilter_info {
-	struct hinic_mgmt_msg_head mgmt_msg_head;
-
-	u16 func_id;
-	u8 normal_type_enable;
-	u8 filter_type_enable;
-	u8 filter_enable;
-	u8 filter_type;
-	u8 qid;
-	u8 fdir_flag;
-	u32 key;
-};
-
 /**
  * hinic_init_function_table - Initialize function table.
  *
@@ -1901,7 +1885,7 @@  int hinic_set_fdir_tcam(void *hwdev, u16 type_mask,
 			&port_tcam_cmd, sizeof(port_tcam_cmd),
 			&port_tcam_cmd, &out_size);
 	if (err || !out_size || port_tcam_cmd.mgmt_msg_head.status) {
-		PMD_DRV_LOG(ERR, "Set tcam table failed, err: %d, status: 0x%x, out size: 0x%x\n",
+		PMD_DRV_LOG(ERR, "Set tcam table failed, err: %d, status: 0x%x, out size: 0x%x",
 			err, port_tcam_cmd.mgmt_msg_head.status, out_size);
 		return -EFAULT;
 	}
@@ -1938,10 +1922,174 @@  int hinic_clear_fdir_tcam(void *hwdev, u16 type_mask)
 			&port_tcam_cmd, sizeof(port_tcam_cmd),
 			&port_tcam_cmd, &out_size);
 	if (err || !out_size || port_tcam_cmd.mgmt_msg_head.status) {
-		PMD_DRV_LOG(ERR, "Clear tcam table failed, err: %d, status: 0x%x, out size: 0x%x\n",
+		PMD_DRV_LOG(ERR, "Clear tcam table failed, err: %d, status: 0x%x, out size: 0x%x",
 			err, port_tcam_cmd.mgmt_msg_head.status, out_size);
 		return -EFAULT;
 	}
 
 	return 0;
 }
+
+int hinic_add_tcam_rule(void *hwdev, struct tag_tcam_cfg_rule *tcam_rule)
+{
+	u16 out_size = sizeof(struct tag_fdir_add_rule_cmd);
+	struct tag_fdir_add_rule_cmd tcam_cmd;
+	int err;
+
+	if (!hwdev) {
+		PMD_DRV_LOG(ERR, "Hwdev is NULL");
+		return -EINVAL;
+	}
+
+	if (tcam_rule->index >= HINIC_MAX_TCAM_RULES_NUM) {
+		PMD_DRV_LOG(ERR, "Tcam rules num to add is invalid");
+		return -EFAULT;
+	}
+
+	memset(&tcam_cmd, 0, sizeof(struct tag_fdir_add_rule_cmd));
+	tcam_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+	memcpy((void *)&tcam_cmd.rule, (void *)tcam_rule,
+		sizeof(struct tag_tcam_cfg_rule));
+
+	err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_UP_TC_ADD_FLOW,
+				&tcam_cmd, sizeof(tcam_cmd),
+				&tcam_cmd, &out_size);
+	if (err || tcam_cmd.mgmt_msg_head.status || !out_size) {
+		PMD_DRV_LOG(ERR,
+			"Add tcam rule failed, err: %d, status: 0x%x, out size: 0x%x",
+			err, tcam_cmd.mgmt_msg_head.status, out_size);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+int hinic_del_tcam_rule(void *hwdev, u32 index)
+{
+	u16 out_size = sizeof(struct tag_fdir_del_rule_cmd);
+	struct tag_fdir_del_rule_cmd tcam_cmd;
+	int err;
+
+	if (!hwdev) {
+		PMD_DRV_LOG(ERR, "Hwdev is NULL");
+		return -EINVAL;
+	}
+
+	if (index >= HINIC_MAX_TCAM_RULES_NUM) {
+		PMD_DRV_LOG(ERR, "Tcam rules num to del is invalid");
+		return -EFAULT;
+	}
+
+	memset(&tcam_cmd, 0, sizeof(struct tag_fdir_del_rule_cmd));
+	tcam_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+	tcam_cmd.index_start = index;
+	tcam_cmd.index_num = 1;
+
+	err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_UP_TC_DEL_FLOW,
+				&tcam_cmd, sizeof(tcam_cmd),
+				&tcam_cmd, &out_size);
+	if (err || tcam_cmd.mgmt_msg_head.status || !out_size) {
+		PMD_DRV_LOG(ERR,
+			"Del tcam rule failed, err: %d, status: 0x%x, out size: 0x%x",
+			err, tcam_cmd.mgmt_msg_head.status, out_size);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int hinic_mgmt_tcam_block(void *hwdev, u8 alloc_en,
+				u8 block_type, u16 *index)
+{
+	struct hinic_cmd_ctrl_tcam_block tcam_block_info;
+	u16 out_size = sizeof(struct hinic_cmd_ctrl_tcam_block);
+	struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+	int err;
+
+	if (!hwdev) {
+		PMD_DRV_LOG(ERR, "Hwdev is NULL");
+		return -EINVAL;
+	}
+
+	memset(&tcam_block_info, 0, sizeof(struct hinic_cmd_ctrl_tcam_block));
+	tcam_block_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+	tcam_block_info.func_id = hinic_global_func_id(hwdev);
+	tcam_block_info.alloc_en = alloc_en;
+	tcam_block_info.tcam_type = block_type;
+	tcam_block_info.tcam_block_index = *index;
+
+	err = l2nic_msg_to_mgmt_sync(hwdev,
+				HINIC_PORT_CMD_UP_TC_CTRL_TCAM_BLOCK,
+				&tcam_block_info, sizeof(tcam_block_info),
+				&tcam_block_info, &out_size);
+	if (tcam_block_info.mgmt_msg_head.status ==
+		HINIC_MGMT_CMD_UNSUPPORTED) {
+		err = HINIC_MGMT_CMD_UNSUPPORTED;
+		PMD_DRV_LOG(INFO, "Firmware/uP does't support alloc or del tcam block");
+		return err;
+	} else if ((err == HINIC_MBOX_VF_CMD_ERROR) &&
+			(HINIC_IS_VF(nic_hwdev))) {
+		err = HINIC_MGMT_CMD_UNSUPPORTED;
+		PMD_DRV_LOG(INFO, "VF does't support alloc and del tcam block.");
+		return err;
+	} else if (err || (!out_size) || tcam_block_info.mgmt_msg_head.status) {
+		PMD_DRV_LOG(ERR,
+			"Set tcam block failed, err: %d, status: 0x%x, out size: 0x%x",
+			err, tcam_block_info.mgmt_msg_head.status, out_size);
+		return -EFAULT;
+	}
+
+	if (alloc_en)
+		*index = tcam_block_info.tcam_block_index;
+
+	return 0;
+}
+
+int hinic_alloc_tcam_block(void *hwdev, u8 block_type, u16 *index)
+{
+	return hinic_mgmt_tcam_block(hwdev, HINIC_TCAM_BLOCK_ENABLE,
+				block_type, index);
+}
+
+int hinic_free_tcam_block(void *hwdev, u8 block_type, u16 *index)
+{
+	return hinic_mgmt_tcam_block(hwdev, HINIC_TCAM_BLOCK_DISABLE,
+				block_type, index);
+}
+
+int hinic_flush_tcam_rule(void *hwdev)
+{
+	struct hinic_cmd_flush_tcam_rules tcam_flush;
+	u16 out_size = sizeof(struct hinic_cmd_flush_tcam_rules);
+	struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+	int err;
+
+	if (!hwdev) {
+		PMD_DRV_LOG(ERR, "Hwdev is NULL");
+		return -EINVAL;
+	}
+
+	memset(&tcam_flush, 0, sizeof(struct hinic_cmd_flush_tcam_rules));
+	tcam_flush.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+	tcam_flush.func_id = hinic_global_func_id(hwdev);
+
+	err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_UP_TC_FLUSH_TCAM,
+			&tcam_flush, sizeof(struct hinic_cmd_flush_tcam_rules),
+			&tcam_flush, &out_size);
+	if (tcam_flush.mgmt_msg_head.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+		err = HINIC_MGMT_CMD_UNSUPPORTED;
+		PMD_DRV_LOG(INFO, "Firmware/uP does't support flush tcam fdir");
+	} else if ((err == HINIC_MBOX_VF_CMD_ERROR) &&
+			(HINIC_IS_VF(nic_hwdev))) {
+		err = HINIC_MGMT_CMD_UNSUPPORTED;
+		PMD_DRV_LOG(INFO, "VF doesn't support flush tcam fdir");
+	} else if (err || (!out_size) || tcam_flush.mgmt_msg_head.status) {
+		PMD_DRV_LOG(ERR,
+			"Flush tcam fdir rules failed, err: %d, status: 0x%x, out size: 0x%x",
+			err, tcam_flush.mgmt_msg_head.status, out_size);
+		err = -EFAULT;
+	}
+
+	return err;
+}
+
diff --git a/drivers/net/hinic/base/hinic_pmd_niccfg.h b/drivers/net/hinic/base/hinic_pmd_niccfg.h
index b9e037e..be6b320 100644
--- a/drivers/net/hinic/base/hinic_pmd_niccfg.h
+++ b/drivers/net/hinic/base/hinic_pmd_niccfg.h
@@ -750,6 +750,77 @@  struct hinic_fdir_tcam_info {
 	struct tag_pa_action filter_action;
 };
 
+#define TCAM_SET	0x1
+#define TCAM_CLEAR	0x2
+
+struct hinic_port_qfilter_info {
+	struct hinic_mgmt_msg_head mgmt_msg_head;
+
+	u16 func_id;
+	u8 normal_type_enable;
+	u8 filter_type_enable;
+	u8 filter_enable;
+	u8 filter_type;
+	u8 qid;
+	u8 fdir_flag;
+	u32 key;
+};
+
+#define HINIC_MAX_TCAM_RULES_NUM   (10240)
+#define HINIC_TCAM_BLOCK_ENABLE      1
+#define HINIC_TCAM_BLOCK_DISABLE     0
+
+struct tag_tcam_result {
+	u32 qid;
+	u32 rsvd;
+};
+
+#define TCAM_FLOW_KEY_SIZE   24
+
+struct tag_tcam_key_x_y {
+	u8 x[TCAM_FLOW_KEY_SIZE];
+	u8 y[TCAM_FLOW_KEY_SIZE];
+};
+
+struct tag_tcam_cfg_rule {
+	u32 index;
+	struct tag_tcam_result data;
+	struct tag_tcam_key_x_y key;
+};
+
+struct tag_fdir_add_rule_cmd {
+	struct hinic_mgmt_msg_head mgmt_msg_head;
+	struct tag_tcam_cfg_rule rule;
+};
+
+struct tag_fdir_del_rule_cmd {
+	struct hinic_mgmt_msg_head mgmt_msg_head;
+
+	u32 index_start;
+	u32 index_num;
+};
+
+struct hinic_cmd_flush_tcam_rules {
+	struct hinic_mgmt_msg_head mgmt_msg_head;
+
+	u16 func_id;
+	u16 rsvd;
+};
+
+struct hinic_cmd_ctrl_tcam_block {
+	struct hinic_mgmt_msg_head mgmt_msg_head;
+
+	u16 func_id;
+	u8  alloc_en; /* 0: free tcam block, 1: alloc tcam block */
+	/*
+	 * 0: alloc 1k size tcam block,
+	 * 1: alloc 128 size tcam block, others rsvd
+	 */
+	u8  tcam_type;
+	u16 tcam_block_index;
+	u16 rsvd;
+};
+
 int hinic_set_mac(void *hwdev, u8 *mac_addr, u16 vlan_id, u16 func_id);
 
 int hinic_del_mac(void *hwdev, u8 *mac_addr, u16 vlan_id, u16 func_id);
@@ -858,4 +929,14 @@  int hinic_set_fdir_tcam(void *hwdev, u16 type_mask,
 
 int hinic_clear_fdir_tcam(void *hwdev, u16 type_mask);
 
+int hinic_add_tcam_rule(void *hwdev, struct tag_tcam_cfg_rule *tcam_rule);
+
+int hinic_del_tcam_rule(void *hwdev, u32 index);
+
+int hinic_alloc_tcam_block(void *hwdev, u8 block_type, u16 *index);
+
+int hinic_free_tcam_block(void *hwdev, u8 block_type, u16 *index);
+
+int hinic_flush_tcam_rule(void *hwdev);
+
 #endif /* _HINIC_PMD_NICCFG_H_ */
diff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c
index 37aa042..9e90056 100644
--- a/drivers/net/hinic/hinic_pmd_ethdev.c
+++ b/drivers/net/hinic/hinic_pmd_ethdev.c
@@ -1222,7 +1222,7 @@  static void hinic_dev_stop(struct rte_eth_dev *dev)
 	/* clean root context */
 	hinic_free_qp_ctxts(nic_dev->hwdev);
 
-	hinic_free_fdir_filter(nic_dev);
+	hinic_destroy_fdir_filter(dev);
 
 	/* free mbuf */
 	hinic_free_all_rx_mbuf(dev);
@@ -2946,6 +2946,7 @@  static int hinic_func_init(struct rte_eth_dev *eth_dev)
 	struct rte_ether_addr *eth_addr;
 	struct hinic_nic_dev *nic_dev;
 	struct hinic_filter_info *filter_info;
+	struct hinic_tcam_info *tcam_info;
 	u32 mac_size;
 	int rc;
 
@@ -3035,9 +3036,12 @@  static int hinic_func_init(struct rte_eth_dev *eth_dev)
 
 	/* initialize filter info */
 	filter_info = &nic_dev->filter;
+	tcam_info = &nic_dev->tcam;
 	memset(filter_info, 0, sizeof(struct hinic_filter_info));
+	memset(tcam_info, 0, sizeof(struct hinic_tcam_info));
 	/* initialize 5tuple filter list */
 	TAILQ_INIT(&filter_info->fivetuple_list);
+	TAILQ_INIT(&tcam_info->tcam_list);
 	TAILQ_INIT(&nic_dev->filter_ntuple_list);
 	TAILQ_INIT(&nic_dev->filter_ethertype_list);
 	TAILQ_INIT(&nic_dev->filter_fdir_rule_list);
diff --git a/drivers/net/hinic/hinic_pmd_ethdev.h b/drivers/net/hinic/hinic_pmd_ethdev.h
index 3e3f3b3..910a57b 100644
--- a/drivers/net/hinic/hinic_pmd_ethdev.h
+++ b/drivers/net/hinic/hinic_pmd_ethdev.h
@@ -95,20 +95,113 @@  struct hinic_hw_fdir_mask {
 	uint32_t dst_ipv4_mask;
 	uint16_t src_port_mask;
 	uint16_t dst_port_mask;
+	uint16_t proto_mask;
+	uint16_t tunnel_flag;
+	uint16_t tunnel_inner_src_port_mask;
+	uint16_t tunnel_inner_dst_port_mask;
 };
 
 /* Flow Director attribute */
 struct hinic_atr_input {
-	u32 dst_ip;
-	u32 src_ip;
-	u16 src_port;
-	u16 dst_port;
+	uint32_t dst_ip;
+	uint32_t src_ip;
+	uint16_t src_port;
+	uint16_t dst_port;
+	uint16_t proto;
+	uint16_t tunnel_flag;
+	uint16_t tunnel_inner_src_port;
+	uint16_t tunnel_inner_dst_port;
+};
+
+enum hinic_fdir_mode {
+	HINIC_FDIR_MODE_NORMAL      = 0,
+	HINIC_FDIR_MODE_TCAM        = 1,
+};
+
+#define HINIC_PF_MAX_TCAM_FILTERS	1024
+#define HINIC_VF_MAX_TCAM_FILTERS	128
+#define HINIC_SUPPORT_PF_MAX_NUM	4
+#define HINIC_TOTAL_PF_MAX_NUM		16
+#define HINIC_SUPPORT_VF_MAX_NUM	32
+#define HINIC_TCAM_BLOCK_TYPE_PF	0 /* 1024 tcam index of a block */
+#define HINIC_TCAM_BLOCK_TYPE_VF	1 /* 128 tcam index of a block */
+
+#define HINIC_PKT_VF_TCAM_INDEX_START(block_index)  \
+		(HINIC_PF_MAX_TCAM_FILTERS * HINIC_SUPPORT_PF_MAX_NUM + \
+		HINIC_VF_MAX_TCAM_FILTERS * (block_index))
+
+TAILQ_HEAD(hinic_tcam_filter_list, hinic_tcam_filter);
+
+struct hinic_tcam_info {
+	struct hinic_tcam_filter_list tcam_list;
+	u8 tcam_index_array[HINIC_PF_MAX_TCAM_FILTERS];
+	u16 tcam_block_index;
+	u16 tcam_rule_nums;
+};
+
+struct tag_tcam_key_mem {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN)
+
+		u32 rsvd0:16;
+		u32 function_id:16;
+
+		u32 protocal:8;
+		/*
+		 * tunnel packet, mask must be 0xff, spec value is 1;
+		 * normal packet, mask must be 0, spec value is 0;
+		 * if tunnal packet, ucode use
+		 * sip/dip/protocol/src_port/dst_dport from inner packet
+		 */
+		u32 tunnel_flag:8;
+		u32 sip_h:16;
+
+		u32 sip_l:16;
+		u32 dip_h:16;
+
+		u32 dip_l:16;
+		u32 src_port:16;
+
+		u32 dst_port:16;
+		/*
+		 * tunnel packet and normal packet,
+		 * ext_dip mask must be 0xffffffff
+		 */
+		u32 ext_dip_h:16;
+		u32 ext_dip_l:16;
+		u32 rsvd2:16;
+#else
+		u32 function_id:16;
+		u32 rsvd0:16;
+
+		u32 sip_h:16;
+		u32 tunnel_flag:8;
+		u32 protocal:8;
+
+		u32 dip_h:16;
+		u32 sip_l:16;
+
+		u32 src_port:16;
+		u32 dip_l:16;
+
+		u32 ext_dip_h:16;
+		u32 dst_port:16;
+
+		u32 rsvd2:16;
+		u32 ext_dip_l:16;
+#endif
+};
+
+struct tag_tcam_key {
+	struct tag_tcam_key_mem key_info;
+	struct tag_tcam_key_mem key_mask;
 };
 
 struct hinic_fdir_rule {
 	struct hinic_hw_fdir_mask mask;
 	struct hinic_atr_input hinic_fdir; /* key of fdir filter */
 	uint8_t queue; /* queue assigned when matched */
+	enum hinic_fdir_mode mode; /* fdir type */
+	u16 tcam_index;
 };
 
 /* ntuple filter list structure */
@@ -129,6 +222,13 @@  struct hinic_fdir_rule_ele {
 	struct hinic_fdir_rule filter_info;
 };
 
+struct hinic_tcam_filter {
+	TAILQ_ENTRY(hinic_tcam_filter) entries;
+	uint16_t index; /* tcam index */
+	struct tag_tcam_key tcam_key;
+	uint16_t queue; /* rx queue assigned to */
+};
+
 struct rte_flow {
 	enum rte_filter_type filter_type;
 	void *rule;
@@ -181,6 +281,7 @@  struct hinic_nic_dev {
 	u32 rx_csum_en;
 
 	struct hinic_filter_info    filter;
+	struct hinic_tcam_info      tcam;
 	struct hinic_ntuple_filter_list filter_ntuple_list;
 	struct hinic_ethertype_filter_list filter_ethertype_list;
 	struct hinic_fdir_rule_filter_list filter_fdir_rule_list;
@@ -189,4 +290,5 @@  struct hinic_nic_dev {
 
 void hinic_free_fdir_filter(struct hinic_nic_dev *nic_dev);
 
+void hinic_destroy_fdir_filter(struct rte_eth_dev *dev);
 #endif /* _HINIC_PMD_ETHDEV_H_ */
diff --git a/drivers/net/hinic/hinic_pmd_flow.c b/drivers/net/hinic/hinic_pmd_flow.c
index 49c9d87..7f8f330 100644
--- a/drivers/net/hinic/hinic_pmd_flow.c
+++ b/drivers/net/hinic/hinic_pmd_flow.c
@@ -46,7 +46,12 @@ 
 #define PA_IP_PROTOCOL_TYPE_SCTP	5
 #define PA_IP_PROTOCOL_TYPE_VRRP	112
 
-#define IP_HEADER_PROTOCOL_TYPE_TCP	6
+#define IP_HEADER_PROTOCOL_TYPE_TCP     6
+#define IP_HEADER_PROTOCOL_TYPE_UDP     17
+#define IP_HEADER_PROTOCOL_TYPE_ICMP    1
+
+#define FDIR_TCAM_NORMAL_PACKET         0
+#define FDIR_TCAM_TUNNEL_PACKET         1
 
 #define HINIC_MIN_N_TUPLE_PRIO		1
 #define HINIC_MAX_N_TUPLE_PRIO		7
@@ -82,6 +87,10 @@ 
 #define HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev) \
 	(&((struct hinic_nic_dev *)nic_dev)->filter)
 
+#define HINIC_DEV_PRIVATE_TO_TCAM_INFO(nic_dev) \
+	(&((struct hinic_nic_dev *)nic_dev)->tcam)
+
+
 enum hinic_atr_flow_type {
 	HINIC_ATR_FLOW_TYPE_IPV4_DIP    = 0x1,
 	HINIC_ATR_FLOW_TYPE_IPV4_SIP    = 0x2,
@@ -270,8 +279,7 @@  static int hinic_check_ethertype_first_item(const struct rte_flow_item *item,
  * other members in mask and spec should set to 0x00.
  * item->last should be NULL.
  */
-static int
-cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
+static int cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
 			const struct rte_flow_item *pattern,
 			const struct rte_flow_action *actions,
 			struct rte_eth_ethertype_filter *filter,
@@ -341,8 +349,7 @@  static int hinic_check_ethertype_first_item(const struct rte_flow_item *item,
 	return 0;
 }
 
-static int
-hinic_parse_ethertype_filter(struct rte_eth_dev *dev,
+static int hinic_parse_ethertype_filter(struct rte_eth_dev *dev,
 			const struct rte_flow_attr *attr,
 			const struct rte_flow_item pattern[],
 			const struct rte_flow_action actions[],
@@ -728,8 +735,7 @@  static int hinic_check_ntuple_item_ele(const struct rte_flow_item *item,
  * Because the pattern is used to describe the packets,
  * normally the packets should use network order.
  */
-static int
-cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
+static int cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 			const struct rte_flow_item pattern[],
 			const struct rte_flow_action actions[],
 			struct rte_eth_ntuple_filter *filter,
@@ -752,8 +758,7 @@  static int hinic_check_ntuple_item_ele(const struct rte_flow_item *item,
 	return 0;
 }
 
-static int
-hinic_parse_ntuple_filter(struct rte_eth_dev *dev,
+static int hinic_parse_ntuple_filter(struct rte_eth_dev *dev,
 			const struct rte_flow_attr *attr,
 			const struct rte_flow_item pattern[],
 			const struct rte_flow_action actions[],
@@ -891,6 +896,7 @@  static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
 
 		rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
 		rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
+		rule->mode = HINIC_FDIR_MODE_NORMAL;
 
 		if (item->spec) {
 			ipv4_spec =
@@ -906,6 +912,8 @@  static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
 		item = next_no_void_pattern(pattern, item);
 		if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
 		    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+		    item->type != RTE_FLOW_ITEM_TYPE_ICMP &&
+		    item->type != RTE_FLOW_ITEM_TYPE_ANY &&
 		    item->type != RTE_FLOW_ITEM_TYPE_END) {
 			memset(rule, 0, sizeof(struct hinic_fdir_rule));
 			rte_flow_error_set(error, EINVAL,
@@ -920,6 +928,239 @@  static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
 }
 
 static int hinic_normal_item_check_l4(const struct rte_flow_item **in_out_item,
+			__rte_unused const struct rte_flow_item pattern[],
+			__rte_unused struct hinic_fdir_rule *rule,
+			struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item = *in_out_item;
+
+	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Not supported by normal fdir filter,not support l4");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+
+static int hinic_normal_item_check_end(const struct rte_flow_item *item,
+					struct hinic_fdir_rule *rule,
+					struct rte_flow_error *error)
+{
+	/* Check if the next not void item is END */
+	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		memset(rule, 0, sizeof(struct hinic_fdir_rule));
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Not supported by fdir filter,support end");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int hinic_check_normal_item_ele(const struct rte_flow_item *item,
+					const struct rte_flow_item pattern[],
+					struct hinic_fdir_rule *rule,
+					struct rte_flow_error *error)
+{
+	if (hinic_normal_item_check_ether(&item, pattern, error) ||
+		hinic_normal_item_check_ip(&item, pattern, rule, error) ||
+		hinic_normal_item_check_l4(&item, pattern, rule, error) ||
+		hinic_normal_item_check_end(item, rule, error))
+		return -rte_errno;
+
+	return 0;
+}
+
+static int
+hinic_tcam_normal_item_check_l4(const struct rte_flow_item **in_out_item,
+				const struct rte_flow_item pattern[],
+				struct hinic_fdir_rule *rule,
+				struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item = *in_out_item;
+	const struct rte_flow_item_tcp *tcp_spec;
+	const struct rte_flow_item_tcp *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec;
+	const struct rte_flow_item_udp *udp_mask;
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
+		rule->mode = HINIC_FDIR_MODE_TCAM;
+		rule->mask.proto_mask = UINT16_MAX;
+		rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_ICMP;
+	} else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) {
+		rule->mode = HINIC_FDIR_MODE_TCAM;
+	} else if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+		if (!item->mask) {
+			(void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter, support src, dst ports");
+			return -rte_errno;
+		}
+
+		tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+
+		/*
+		 * Only support src & dst ports, tcp flags,
+		 * others should be masked.
+		 */
+		if (tcp_mask->hdr.sent_seq ||
+			tcp_mask->hdr.recv_ack ||
+			tcp_mask->hdr.data_off ||
+			tcp_mask->hdr.rx_win ||
+			tcp_mask->hdr.cksum ||
+			tcp_mask->hdr.tcp_urp) {
+			(void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir normal tcam filter");
+			return -rte_errno;
+		}
+
+		rule->mode = HINIC_FDIR_MODE_TCAM;
+		rule->mask.proto_mask = UINT16_MAX;
+		rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
+		rule->mask.src_port_mask = tcp_mask->hdr.src_port;
+
+		rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_TCP;
+		if (item->spec) {
+			tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+			rule->hinic_fdir.dst_port = tcp_spec->hdr.dst_port;
+			rule->hinic_fdir.src_port = tcp_spec->hdr.src_port;
+		}
+	} else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+		/*
+		 * Only care about src & dst ports,
+		 * others should be masked.
+		 */
+		if (!item->mask) {
+			(void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter, support src, dst ports");
+			return -rte_errno;
+		}
+
+		udp_mask = (const struct rte_flow_item_udp *)item->mask;
+		if (udp_mask->hdr.dgram_len ||
+			udp_mask->hdr.dgram_cksum) {
+			(void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter, support udp");
+			return -rte_errno;
+		}
+
+		rule->mode = HINIC_FDIR_MODE_TCAM;
+		rule->mask.proto_mask = UINT16_MAX;
+		rule->mask.src_port_mask = udp_mask->hdr.src_port;
+		rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
+
+		rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_UDP;
+		if (item->spec) {
+			udp_spec = (const struct rte_flow_item_udp *)item->spec;
+			rule->hinic_fdir.src_port = udp_spec->hdr.src_port;
+			rule->hinic_fdir.dst_port = udp_spec->hdr.dst_port;
+		}
+	} else {
+		(void)memset(rule,  0, sizeof(struct hinic_fdir_rule));
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter tcam normal, l4 only support icmp, tcp");
+		return -rte_errno;
+	}
+
+	item = next_no_void_pattern(pattern, item);
+	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		(void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Not supported by fdir filter tcam normal, support end");
+		return -rte_errno;
+	}
+
+	/* get next no void item */
+	*in_out_item = item;
+
+	return 0;
+}
+
+static int hinic_check_tcam_normal_item_ele(const struct rte_flow_item *item,
+					const struct rte_flow_item pattern[],
+					struct hinic_fdir_rule *rule,
+					struct rte_flow_error *error)
+{
+	if (hinic_normal_item_check_ether(&item, pattern, error) ||
+		hinic_normal_item_check_ip(&item, pattern, rule, error) ||
+		hinic_tcam_normal_item_check_l4(&item, pattern, rule, error) ||
+		hinic_normal_item_check_end(item, rule, error))
+		return -rte_errno;
+
+	return 0;
+}
+
+static int hinic_tunnel_item_check_l4(const struct rte_flow_item **in_out_item,
+					const struct rte_flow_item pattern[],
+					struct hinic_fdir_rule *rule,
+					struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item = *in_out_item;
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
+			(void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter, support vxlan");
+			return -rte_errno;
+		}
+
+		*in_out_item = item;
+	} else {
+		(void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter tcam tunnel, outer l4 only support udp");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+hinic_tunnel_item_check_vxlan(const struct rte_flow_item **in_out_item,
+				const struct rte_flow_item pattern[],
+				struct hinic_fdir_rule *rule,
+				struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item = *in_out_item;
+
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+		    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+		    item->type != RTE_FLOW_ITEM_TYPE_ANY) {
+			(void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter, support tcp/udp");
+			return -rte_errno;
+		}
+
+		*in_out_item = item;
+	}
+
+	return 0;
+}
+
+static int
+hinic_tunnel_inner_item_check_l4(const struct rte_flow_item **in_out_item,
 				const struct rte_flow_item pattern[],
 				struct hinic_fdir_rule *rule,
 				struct rte_flow_error *error)
@@ -933,13 +1174,14 @@  static int hinic_normal_item_check_l4(const struct rte_flow_item **in_out_item,
 	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
 		/* Not supported last point for range */
 		if (item->last) {
+			memset(rule, 0, sizeof(struct hinic_fdir_rule));
 			rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 				item, "Not supported last point for range");
 			return -rte_errno;
 		}
 
-		/* Get TCP/UDP info */
+		/* get the TCP/UDP info */
 		if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
 			/*
 			 * Only care about src & dst ports,
@@ -948,8 +1190,8 @@  static int hinic_normal_item_check_l4(const struct rte_flow_item **in_out_item,
 			if (!item->mask) {
 				memset(rule, 0, sizeof(struct hinic_fdir_rule));
 				rte_flow_error_set(error, EINVAL,
-					RTE_FLOW_ERROR_TYPE_ITEM, item,
-					"Not supported by fdir filter,support src,dst ports");
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item, "Not supported by fdir filter, support src, dst ports");
 				return -rte_errno;
 			}
 
@@ -961,26 +1203,31 @@  static int hinic_normal_item_check_l4(const struct rte_flow_item **in_out_item,
 				tcp_mask->hdr.rx_win ||
 				tcp_mask->hdr.cksum ||
 				tcp_mask->hdr.tcp_urp) {
-				memset(rule, 0, sizeof(struct hinic_fdir_rule));
+				(void)memset(rule, 0,
+					sizeof(struct hinic_fdir_rule));
 				rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ITEM,
-					item, "Not supported by fdir filter,support tcp");
+					item, "Not supported by fdir filter, support tcp");
 				return -rte_errno;
 			}
 
-			rule->mask.src_port_mask = tcp_mask->hdr.src_port;
-			rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
+			rule->mode = HINIC_FDIR_MODE_TCAM;
+			rule->mask.tunnel_flag = UINT16_MAX;
+			rule->mask.tunnel_inner_src_port_mask =
+							tcp_mask->hdr.src_port;
+			rule->mask.tunnel_inner_dst_port_mask =
+							tcp_mask->hdr.dst_port;
+			rule->mask.proto_mask = UINT16_MAX;
 
+			rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_TCP;
 			if (item->spec) {
 				tcp_spec =
-					(const struct rte_flow_item_tcp *)
-					item->spec;
-				rule->hinic_fdir.src_port =
-					tcp_spec->hdr.src_port;
-				rule->hinic_fdir.dst_port =
-					tcp_spec->hdr.dst_port;
+				(const struct rte_flow_item_tcp *)item->spec;
+				rule->hinic_fdir.tunnel_inner_src_port =
+							tcp_spec->hdr.src_port;
+				rule->hinic_fdir.tunnel_inner_dst_port =
+							tcp_spec->hdr.dst_port;
 			}
-
 		} else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
 			/*
 			 * Only care about src & dst ports,
@@ -990,7 +1237,7 @@  static int hinic_normal_item_check_l4(const struct rte_flow_item **in_out_item,
 				memset(rule, 0, sizeof(struct hinic_fdir_rule));
 				rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ITEM,
-					item, "Not supported by fdir filter,support src,dst ports");
+					item, "Not supported by fdir filter, support src, dst ports");
 				return -rte_errno;
 			}
 
@@ -1000,60 +1247,55 @@  static int hinic_normal_item_check_l4(const struct rte_flow_item **in_out_item,
 				memset(rule, 0, sizeof(struct hinic_fdir_rule));
 				rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ITEM,
-					item, "Not supported by fdir filter,support udp");
+					item, "Not supported by fdir filter, support udp");
 				return -rte_errno;
 			}
-			rule->mask.src_port_mask = udp_mask->hdr.src_port;
-			rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
 
+			rule->mode = HINIC_FDIR_MODE_TCAM;
+			rule->mask.tunnel_flag = UINT16_MAX;
+			rule->mask.tunnel_inner_src_port_mask =
+							udp_mask->hdr.src_port;
+			rule->mask.tunnel_inner_dst_port_mask =
+							udp_mask->hdr.dst_port;
+			rule->mask.proto_mask = UINT16_MAX;
+
+			rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_UDP;
 			if (item->spec) {
 				udp_spec =
-					(const struct rte_flow_item_udp *)
-					item->spec;
-				rule->hinic_fdir.src_port =
-					udp_spec->hdr.src_port;
-				rule->hinic_fdir.dst_port =
-					udp_spec->hdr.dst_port;
+				(const struct rte_flow_item_udp *)item->spec;
+				rule->hinic_fdir.tunnel_inner_src_port =
+							udp_spec->hdr.src_port;
+				rule->hinic_fdir.tunnel_inner_dst_port =
+							udp_spec->hdr.dst_port;
 			}
+		} else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) {
+			rule->mode = HINIC_FDIR_MODE_TCAM;
+			rule->mask.tunnel_flag = UINT16_MAX;
 		} else {
 			memset(rule, 0, sizeof(struct hinic_fdir_rule));
 			rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ITEM,
-				item, "Not supported by fdir filter,support tcp/udp");
+				item, "Not supported by fdir filter, support tcp/udp");
 			return -rte_errno;
 		}
 
-		/* Get next no void item */
+		/* get next no void item */
 		*in_out_item = next_no_void_pattern(pattern, item);
 	}
 
 	return 0;
 }
 
-static int hinic_normal_item_check_end(const struct rte_flow_item *item,
-					struct hinic_fdir_rule *rule,
-					struct rte_flow_error *error)
-{
-	/* Check if the next not void item is END */
-	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
-		memset(rule, 0, sizeof(struct hinic_fdir_rule));
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by fdir filter,support end");
-		return -rte_errno;
-	}
-
-	return 0;
-}
-
-static int hinic_check_normal_item_ele(const struct rte_flow_item *item,
+static int hinic_check_tcam_tunnel_item_ele(const struct rte_flow_item *item,
 					const struct rte_flow_item pattern[],
 					struct hinic_fdir_rule *rule,
 					struct rte_flow_error *error)
 {
 	if (hinic_normal_item_check_ether(&item, pattern, error) ||
 		hinic_normal_item_check_ip(&item, pattern, rule, error) ||
-		hinic_normal_item_check_l4(&item, pattern, rule, error) ||
+		hinic_tunnel_item_check_l4(&item, pattern, rule, error) ||
+		hinic_tunnel_item_check_vxlan(&item, pattern, rule, error) ||
+		hinic_tunnel_inner_item_check_l4(&item, pattern, rule, error) ||
 		hinic_normal_item_check_end(item, rule, error))
 		return -rte_errno;
 
@@ -1172,8 +1414,107 @@  static int hinic_check_normal_act_ele(const struct rte_flow_item *item,
 	return 0;
 }
 
+/**
+ * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
+ * And get the flow director filter info BTW.
+ * UDP/TCP/SCTP PATTERN:
+ * The first not void item can be ETH or IPV4 or IPV6
+ * The second not void item must be IPV4 or IPV6 if the first one is ETH.
+ * The next not void item can be ANY/TCP/UDP
+ * ACTION:
+ * The first not void action should be QUEUE.
+ * The second not void optional action should be MARK,
+ * mark_id is a uint32_t number.
+ * The next not void action should be END.
+ * UDP/TCP pattern example:
+ * ITEM                 Spec	                       Mask
+ * ETH            NULL                                 NULL
+ * IPV4           src_addr  1.2.3.6                 0xFFFFFFFF
+ *                dst_addr  1.2.3.5                 0xFFFFFFFF
+ * UDP/TCP        src_port  80                      0xFFFF
+ *                dst_port  80                      0xFFFF
+ * END
+ * Other members in mask and spec should set to 0x00.
+ * Item->last should be NULL.
+ */
+static int
+hinic_parse_fdir_filter_tcam_normal(const struct rte_flow_attr *attr,
+			       const struct rte_flow_item pattern[],
+			       const struct rte_flow_action actions[],
+			       struct hinic_fdir_rule *rule,
+			       struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item = NULL;
+
+	if (hinic_check_filter_arg(attr, pattern, actions, error))
+		return -rte_errno;
+
+	if (hinic_check_tcam_normal_item_ele(item, pattern, rule, error))
+		return -rte_errno;
+
+	if (hinic_check_normal_attr_ele(attr, rule, error))
+		return -rte_errno;
+
+	if (hinic_check_normal_act_ele(item, actions, rule, error))
+		return -rte_errno;
+
+	return 0;
+}
+
+/**
+ * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
+ * And get the flow director filter info BTW.
+ * UDP/TCP/SCTP PATTERN:
+ * The first not void item can be ETH or IPV4 or IPV6
+ * The second not void item must be IPV4 or IPV6 if the first one is ETH.
+ * The next not void item must be UDP
+ * The next not void item must be VXLAN(optional)
+ * The first not void item can be ETH or IPV4 or IPV6
+ * The next not void item could be ANY or UDP or TCP(optional)
+ * The next not void item must be END.
+ * ACTION:
+ * The first not void action should be QUEUE.
+ * The second not void optional action should be MARK,
+ * mark_id is a uint32_t number.
+ * The next not void action should be END.
+ * UDP/TCP pattern example:
+ * ITEM             Spec	                    Mask
+ * ETH            NULL                              NULL
+ * IPV4        src_addr  1.2.3.6                 0xFFFFFFFF
+ *             dst_addr  1.2.3.5                 0xFFFFFFFF
+ * UDP            NULL                              NULL
+ * VXLAN          NULL                              NULL
+ * UDP/TCP     src_port  80                      0xFFFF
+ *             dst_port  80                      0xFFFF
+ * END
+ * Other members in mask and spec should set to 0x00.
+ * Item->last should be NULL.
+ */
 static int
-hinic_parse_fdir_filter(struct rte_eth_dev *dev,
+hinic_parse_fdir_filter_tacm_tunnel(const struct rte_flow_attr *attr,
+			       const struct rte_flow_item pattern[],
+			       const struct rte_flow_action actions[],
+			       struct hinic_fdir_rule *rule,
+			       struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item = NULL;
+
+	if (hinic_check_filter_arg(attr, pattern, actions, error))
+		return -rte_errno;
+
+	if (hinic_check_tcam_tunnel_item_ele(item, pattern, rule, error))
+		return -rte_errno;
+
+	if (hinic_check_normal_attr_ele(attr, rule, error))
+		return -rte_errno;
+
+	if (hinic_check_normal_act_ele(item, actions, rule, error))
+		return -rte_errno;
+
+	return 0;
+}
+
+static int hinic_parse_fdir_filter(struct rte_eth_dev *dev,
 			const struct rte_flow_attr *attr,
 			const struct rte_flow_item pattern[],
 			const struct rte_flow_action actions[],
@@ -1182,11 +1523,22 @@  static int hinic_check_normal_act_ele(const struct rte_flow_item *item,
 {
 	int ret;
 
-	ret = hinic_parse_fdir_filter_normal(attr, pattern,
-						actions, rule, error);
+	ret = hinic_parse_fdir_filter_normal(attr, pattern, actions,
+						rule, error);
+	if (!ret)
+		goto step_next;
+
+	ret = hinic_parse_fdir_filter_tcam_normal(attr, pattern, actions,
+						rule, error);
+	if (!ret)
+		goto step_next;
+
+	ret = hinic_parse_fdir_filter_tacm_tunnel(attr, pattern, actions,
+						rule, error);
 	if (ret)
 		return ret;
 
+step_next:
 	if (rule->queue >= dev->data->nb_rx_queues)
 		return -ENOTSUP;
 
@@ -1229,18 +1581,17 @@  static int hinic_flow_validate(struct rte_eth_dev *dev,
 	return ret;
 }
 
-static inline int
-ntuple_ip_filter(struct rte_eth_ntuple_filter *filter,
-		 struct hinic_5tuple_filter_info *filter_info)
+static inline int ntuple_ip_filter(struct rte_eth_ntuple_filter *filter,
+		 struct hinic_5tuple_filter_info *hinic_filter_info)
 {
 	switch (filter->dst_ip_mask) {
 	case UINT32_MAX:
-		filter_info->dst_ip_mask = 0;
-		filter_info->dst_ip = filter->dst_ip;
+		hinic_filter_info->dst_ip_mask = 0;
+		hinic_filter_info->dst_ip = filter->dst_ip;
 		break;
 	case 0:
-		filter_info->dst_ip_mask = 1;
-		filter_info->dst_ip = 0;
+		hinic_filter_info->dst_ip_mask = 1;
+		hinic_filter_info->dst_ip = 0;
 		break;
 	default:
 		PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
@@ -1249,12 +1600,12 @@  static int hinic_flow_validate(struct rte_eth_dev *dev,
 
 	switch (filter->src_ip_mask) {
 	case UINT32_MAX:
-		filter_info->src_ip_mask = 0;
-		filter_info->src_ip = filter->src_ip;
+		hinic_filter_info->src_ip_mask = 0;
+		hinic_filter_info->src_ip = filter->src_ip;
 		break;
 	case 0:
-		filter_info->src_ip_mask = 1;
-		filter_info->src_ip = 0;
+		hinic_filter_info->src_ip_mask = 1;
+		hinic_filter_info->src_ip = 0;
 		break;
 	default:
 		PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
@@ -1263,18 +1614,17 @@  static int hinic_flow_validate(struct rte_eth_dev *dev,
 	return 0;
 }
 
-static inline int
-ntuple_port_filter(struct rte_eth_ntuple_filter *filter,
-		   struct hinic_5tuple_filter_info *filter_info)
+static inline int ntuple_port_filter(struct rte_eth_ntuple_filter *filter,
+		   struct hinic_5tuple_filter_info *hinic_filter_info)
 {
 	switch (filter->dst_port_mask) {
 	case UINT16_MAX:
-		filter_info->dst_port_mask = 0;
-		filter_info->dst_port = filter->dst_port;
+		hinic_filter_info->dst_port_mask = 0;
+		hinic_filter_info->dst_port = filter->dst_port;
 		break;
 	case 0:
-		filter_info->dst_port_mask = 1;
-		filter_info->dst_port = 0;
+		hinic_filter_info->dst_port_mask = 1;
+		hinic_filter_info->dst_port = 0;
 		break;
 	default:
 		PMD_DRV_LOG(ERR, "Invalid dst_port mask.");
@@ -1283,12 +1633,12 @@  static int hinic_flow_validate(struct rte_eth_dev *dev,
 
 	switch (filter->src_port_mask) {
 	case UINT16_MAX:
-		filter_info->src_port_mask = 0;
-		filter_info->src_port = filter->src_port;
+		hinic_filter_info->src_port_mask = 0;
+		hinic_filter_info->src_port = filter->src_port;
 		break;
 	case 0:
-		filter_info->src_port_mask = 1;
-		filter_info->src_port = 0;
+		hinic_filter_info->src_port_mask = 1;
+		hinic_filter_info->src_port = 0;
 		break;
 	default:
 		PMD_DRV_LOG(ERR, "Invalid src_port mask.");
@@ -1298,18 +1648,17 @@  static int hinic_flow_validate(struct rte_eth_dev *dev,
 	return 0;
 }
 
-static inline int
-ntuple_proto_filter(struct rte_eth_ntuple_filter *filter,
-		    struct hinic_5tuple_filter_info *filter_info)
+static inline int ntuple_proto_filter(struct rte_eth_ntuple_filter *filter,
+		    struct hinic_5tuple_filter_info *hinic_filter_info)
 {
 	switch (filter->proto_mask) {
 	case UINT8_MAX:
-		filter_info->proto_mask = 0;
-		filter_info->proto = filter->proto;
+		hinic_filter_info->proto_mask = 0;
+		hinic_filter_info->proto = filter->proto;
 		break;
 	case 0:
-		filter_info->proto_mask = 1;
-		filter_info->proto = 0;
+		hinic_filter_info->proto_mask = 1;
+		hinic_filter_info->proto = 0;
 		break;
 	default:
 		PMD_DRV_LOG(ERR, "Invalid protocol mask.");
@@ -1319,8 +1668,7 @@  static int hinic_flow_validate(struct rte_eth_dev *dev,
 	return 0;
 }
 
-static inline int
-ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
+static inline int ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
 			struct hinic_5tuple_filter_info *filter_info)
 {
 	if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM ||
@@ -1468,30 +1816,20 @@  static int hinic_set_vrrp_tcam(struct hinic_nic_dev *nic_dev)
  */
 void hinic_free_fdir_filter(struct hinic_nic_dev *nic_dev)
 {
-	struct hinic_filter_info *filter_info =
-		HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev);
+	(void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
 
-	if (filter_info->type_mask &
-	    (1 << HINIC_PKT_TYPE_FIND_ID(PKT_BGPD_DPORT_TYPE)))
-		hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_DPORT);
+	(void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_DPORT);
 
-	if (filter_info->type_mask &
-	    (1 << HINIC_PKT_TYPE_FIND_ID(PKT_BGPD_SPORT_TYPE)))
-		hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT);
+	(void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT);
 
-	if (filter_info->type_mask &
-	    (1 << HINIC_PKT_TYPE_FIND_ID(PKT_VRRP_TYPE)))
-		hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
+	(void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
 
-	if (filter_info->type_mask &
-	    (1 << HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE)))
-		hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP);
+	(void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP);
 
-	hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
+	(void)hinic_flush_tcam_rule(nic_dev->hwdev);
 }
 
-static int
-hinic_filter_info_init(struct hinic_5tuple_filter *filter,
+static int hinic_filter_info_init(struct hinic_5tuple_filter *filter,
 		       struct hinic_filter_info *filter_info)
 {
 	switch (filter->filter_info.proto) {
@@ -1544,10 +1882,8 @@  void hinic_free_fdir_filter(struct hinic_nic_dev *nic_dev)
 	return 0;
 }
 
-static int
-hinic_lookup_new_filter(struct hinic_5tuple_filter *filter,
-			struct hinic_filter_info *filter_info,
-			int *index)
+static int hinic_lookup_new_filter(struct hinic_5tuple_filter *filter,
+			struct hinic_filter_info *filter_info, int *index)
 {
 	int type_id;
 
@@ -1586,9 +1922,8 @@  void hinic_free_fdir_filter(struct hinic_nic_dev *nic_dev)
  *    - On success, zero.
  *    - On failure, a negative value.
  */
-static int
-hinic_add_5tuple_filter(struct rte_eth_dev *dev,
-			struct hinic_5tuple_filter *filter)
+static int hinic_add_5tuple_filter(struct rte_eth_dev *dev,
+				struct hinic_5tuple_filter *filter)
 {
 	struct hinic_filter_info *filter_info =
 		HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
@@ -1676,8 +2011,7 @@  void hinic_free_fdir_filter(struct hinic_nic_dev *nic_dev)
  * @param filter
  *  The pointer of the filter will be removed.
  */
-static void
-hinic_remove_5tuple_filter(struct rte_eth_dev *dev,
+static void hinic_remove_5tuple_filter(struct rte_eth_dev *dev,
 			   struct hinic_5tuple_filter *filter)
 {
 	struct hinic_filter_info *filter_info =
@@ -1929,7 +2263,6 @@  static int hinic_add_del_ntuple_filter(struct rte_eth_dev *dev,
 		default:
 			break;
 		}
-
 	} else {
 		ethertype_filter.pkt_proto = filter->ether_type;
 		i = hinic_ethertype_filter_lookup(filter_info,
@@ -1972,9 +2305,8 @@  static int hinic_add_del_ntuple_filter(struct rte_eth_dev *dev,
 	return 0;
 }
 
-static int
-hinic_fdir_info_init(struct hinic_fdir_rule *rule,
-		     struct hinic_fdir_info *fdir_info)
+static int hinic_fdir_info_init(struct hinic_fdir_rule *rule,
+				struct hinic_fdir_info *fdir_info)
 {
 	switch (rule->mask.src_ipv4_mask) {
 	case UINT32_MAX:
@@ -2014,10 +2346,8 @@  static int hinic_add_del_ntuple_filter(struct rte_eth_dev *dev,
 	return 0;
 }
 
-static inline int
-hinic_add_del_fdir_filter(struct rte_eth_dev *dev,
-			  struct hinic_fdir_rule *rule,
-			  bool add)
+static inline int hinic_add_del_fdir_filter(struct rte_eth_dev *dev,
+					struct hinic_fdir_rule *rule, bool add)
 {
 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
 	struct hinic_fdir_info fdir_info;
@@ -2062,6 +2392,352 @@  static int hinic_add_del_ntuple_filter(struct rte_eth_dev *dev,
 	return 0;
 }
 
+static void tcam_translate_key_y(u8 *key_y, u8 *src_input, u8 *mask, u8 len)
+{
+	u8 idx;
+
+	for (idx = 0; idx < len; idx++)
+		key_y[idx] = src_input[idx] & mask[idx];
+}
+
+static void tcam_translate_key_x(u8 *key_x, u8 *key_y, u8 *mask, u8 len)
+{
+	u8 idx;
+
+	for (idx = 0; idx < len; idx++)
+		key_x[idx] = key_y[idx] ^ mask[idx];
+}
+
+static void tcam_key_calculate(struct tag_tcam_key *tcam_key,
+				struct tag_tcam_cfg_rule *fdir_tcam_rule)
+{
+	tcam_translate_key_y(fdir_tcam_rule->key.y,
+		(u8 *)(&tcam_key->key_info),
+		(u8 *)(&tcam_key->key_mask),
+		TCAM_FLOW_KEY_SIZE);
+	tcam_translate_key_x(fdir_tcam_rule->key.x,
+		fdir_tcam_rule->key.y,
+		(u8 *)(&tcam_key->key_mask),
+		TCAM_FLOW_KEY_SIZE);
+}
+
+static int hinic_fdir_tcam_info_init(struct rte_eth_dev *dev,
+				struct hinic_fdir_rule *rule,
+				struct tag_tcam_key *tcam_key,
+				struct tag_tcam_cfg_rule *fdir_tcam_rule)
+{
+	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+
+	switch (rule->mask.dst_ipv4_mask) {
+	case UINT32_MAX:
+		tcam_key->key_info.ext_dip_h =
+			(rule->hinic_fdir.dst_ip >> 16) & 0xffffU;
+		tcam_key->key_info.ext_dip_l =
+			rule->hinic_fdir.dst_ip & 0xffffU;
+		tcam_key->key_mask.ext_dip_h =
+			(rule->mask.dst_ipv4_mask >> 16) & 0xffffU;
+		tcam_key->key_mask.ext_dip_l =
+			rule->mask.dst_ipv4_mask & 0xffffU;
+		break;
+
+	case 0:
+		break;
+
+	default:
+		PMD_DRV_LOG(ERR, "invalid src_ip mask.");
+		return -EINVAL;
+	}
+
+	if (rule->mask.dst_port_mask > 0) {
+		tcam_key->key_info.dst_port = rule->hinic_fdir.dst_port;
+		tcam_key->key_mask.dst_port = rule->mask.dst_port_mask;
+	}
+
+	if (rule->mask.src_port_mask > 0) {
+		tcam_key->key_info.src_port = rule->hinic_fdir.src_port;
+		tcam_key->key_mask.src_port = rule->mask.src_port_mask;
+	}
+
+	switch (rule->mask.tunnel_flag) {
+	case UINT16_MAX:
+		tcam_key->key_info.tunnel_flag = FDIR_TCAM_TUNNEL_PACKET;
+		tcam_key->key_mask.tunnel_flag = UINT8_MAX;
+		break;
+
+	case 0:
+		tcam_key->key_info.tunnel_flag = FDIR_TCAM_NORMAL_PACKET;
+		tcam_key->key_mask.tunnel_flag = 0;
+		break;
+
+	default:
+		PMD_DRV_LOG(ERR, "invalid tunnel flag mask.");
+		return -EINVAL;
+	}
+
+	if (rule->mask.tunnel_inner_dst_port_mask > 0) {
+		tcam_key->key_info.dst_port =
+					rule->hinic_fdir.tunnel_inner_dst_port;
+		tcam_key->key_mask.dst_port =
+					rule->mask.tunnel_inner_dst_port_mask;
+	}
+
+	if (rule->mask.tunnel_inner_src_port_mask > 0) {
+		tcam_key->key_info.src_port =
+					rule->hinic_fdir.tunnel_inner_src_port;
+		tcam_key->key_mask.src_port =
+					rule->mask.tunnel_inner_src_port_mask;
+	}
+
+	switch (rule->mask.proto_mask) {
+	case UINT16_MAX:
+		tcam_key->key_info.protocal = rule->hinic_fdir.proto;
+		tcam_key->key_mask.protocal = UINT8_MAX;
+		break;
+
+	case 0:
+		break;
+
+	default:
+		PMD_DRV_LOG(ERR, "invalid tunnel flag mask.");
+		return -EINVAL;
+	}
+
+	tcam_key->key_mask.function_id = UINT16_MAX;
+
+	tcam_key->key_info.function_id = hinic_global_func_id(nic_dev->hwdev);
+
+	fdir_tcam_rule->data.qid = rule->queue;
+
+	tcam_key_calculate(tcam_key, fdir_tcam_rule);
+
+	return 0;
+}
+
+static inline struct hinic_tcam_filter *
+hinic_tcam_filter_lookup(struct hinic_tcam_filter_list *filter_list,
+			struct tag_tcam_key *key)
+{
+	struct hinic_tcam_filter *it;
+
+	TAILQ_FOREACH(it, filter_list, entries) {
+		if (memcmp(key, &it->tcam_key,
+			sizeof(struct tag_tcam_key)) == 0) {
+			return it;
+		}
+	}
+
+	return NULL;
+}
+
+static int hinic_lookup_new_tcam_filter(struct rte_eth_dev *dev,
+					struct hinic_tcam_info *tcam_info,
+					struct hinic_tcam_filter *tcam_filter,
+					u16 *tcam_index)
+{
+	int index;
+	int max_index;
+	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+
+	if (hinic_func_type(nic_dev->hwdev) == TYPE_VF)
+		max_index = HINIC_VF_MAX_TCAM_FILTERS;
+	else
+		max_index = HINIC_PF_MAX_TCAM_FILTERS;
+
+	for (index = 0; index < max_index; index++) {
+		if (tcam_info->tcam_index_array[index] == 0)
+			break;
+	}
+
+	if (index == max_index) {
+		PMD_DRV_LOG(ERR, "function 0x%x tcam filters only support %d filter rules",
+			hinic_global_func_id(nic_dev->hwdev), max_index);
+		return -EINVAL;
+	}
+
+	tcam_filter->index = index;
+	*tcam_index = index;
+
+	return 0;
+}
+
+static int hinic_add_tcam_filter(struct rte_eth_dev *dev,
+				struct hinic_tcam_filter *tcam_filter,
+				struct tag_tcam_cfg_rule *fdir_tcam_rule)
+{
+	struct hinic_tcam_info *tcam_info =
+		HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
+	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+	u16 index = 0;
+	u16 tcam_block_index = 0;
+	int rc;
+
+	if (hinic_lookup_new_tcam_filter(dev, tcam_info, tcam_filter, &index))
+		return -EINVAL;
+
+	if (tcam_info->tcam_rule_nums == 0) {
+		if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
+			rc = hinic_alloc_tcam_block(nic_dev->hwdev,
+				HINIC_TCAM_BLOCK_TYPE_VF, &tcam_block_index);
+			if (rc != 0) {
+				PMD_DRV_LOG(ERR, "VF fdir filter tcam alloc block faied!");
+				return -EFAULT;
+			}
+		} else {
+			rc = hinic_alloc_tcam_block(nic_dev->hwdev,
+				HINIC_TCAM_BLOCK_TYPE_PF, &tcam_block_index);
+			if (rc != 0) {
+				PMD_DRV_LOG(ERR, "PF fdir filter tcam alloc block faied!");
+				return -EFAULT;
+			}
+		}
+
+		tcam_info->tcam_block_index = tcam_block_index;
+	} else {
+		tcam_block_index = tcam_info->tcam_block_index;
+	}
+
+	if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
+		fdir_tcam_rule->index =
+			HINIC_PKT_VF_TCAM_INDEX_START(tcam_block_index) + index;
+	} else {
+		fdir_tcam_rule->index =
+			tcam_block_index * HINIC_PF_MAX_TCAM_FILTERS + index;
+	}
+
+	rc = hinic_add_tcam_rule(nic_dev->hwdev, fdir_tcam_rule);
+	if (rc != 0) {
+		PMD_DRV_LOG(ERR, "Fdir_tcam_rule add faied!");
+		return -EFAULT;
+	}
+
+	PMD_DRV_LOG(INFO, "Add fdir_tcam_rule function_id: 0x%x,"
+		"tcam_block_id: %d, index: %d, queue: %d, tcam_rule_nums: %d succed",
+		hinic_global_func_id(nic_dev->hwdev), tcam_block_index,
+		fdir_tcam_rule->index, fdir_tcam_rule->data.qid,
+		tcam_info->tcam_rule_nums + 1);
+
+	if (tcam_info->tcam_rule_nums == 0) {
+		rc = hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, true);
+		if (rc < 0) {
+			(void)hinic_del_tcam_rule(nic_dev->hwdev,
+						fdir_tcam_rule->index);
+			return rc;
+		}
+	}
+
+	TAILQ_INSERT_TAIL(&tcam_info->tcam_list, tcam_filter, entries);
+
+	tcam_info->tcam_index_array[index] = 1;
+	tcam_info->tcam_rule_nums++;
+
+	return 0;
+}
+
+static int hinic_del_tcam_filter(struct rte_eth_dev *dev,
+				struct hinic_tcam_filter *tcam_filter)
+{
+	struct hinic_tcam_info *tcam_info =
+		HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
+	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+	u32 index = 0;
+	u16 tcam_block_index = tcam_info->tcam_block_index;
+	int rc;
+	u8 block_type = 0;
+
+	if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
+		index = HINIC_PKT_VF_TCAM_INDEX_START(tcam_block_index) +
+			tcam_filter->index;
+		block_type = HINIC_TCAM_BLOCK_TYPE_VF;
+	} else {
+		index = tcam_block_index * HINIC_PF_MAX_TCAM_FILTERS +
+			tcam_filter->index;
+		block_type = HINIC_TCAM_BLOCK_TYPE_PF;
+	}
+
+	rc = hinic_del_tcam_rule(nic_dev->hwdev, index);
+	if (rc != 0) {
+		PMD_DRV_LOG(ERR, "fdir_tcam_rule del faied!");
+		return -EFAULT;
+	}
+
+	PMD_DRV_LOG(INFO, "Del fdir_tcam_rule function_id: 0x%x, "
+		"tcam_block_id: %d, index: %d, tcam_rule_nums: %d succed",
+		hinic_global_func_id(nic_dev->hwdev), tcam_block_index, index,
+		tcam_info->tcam_rule_nums - 1);
+
+	TAILQ_REMOVE(&tcam_info->tcam_list, tcam_filter, entries);
+
+	tcam_info->tcam_index_array[tcam_filter->index] = 0;
+
+	rte_free(tcam_filter);
+
+	tcam_info->tcam_rule_nums--;
+
+	if (tcam_info->tcam_rule_nums == 0) {
+		(void)hinic_free_tcam_block(nic_dev->hwdev, block_type,
+					&tcam_block_index);
+	}
+
+	return 0;
+}
+
+static int hinic_add_del_tcam_fdir_filter(struct rte_eth_dev *dev,
+					struct hinic_fdir_rule *rule, bool add)
+{
+	struct hinic_tcam_info *tcam_info =
+		HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
+	struct hinic_tcam_filter *tcam_filter;
+	struct tag_tcam_cfg_rule fdir_tcam_rule;
+	struct tag_tcam_key tcam_key;
+	int ret;
+
+	memset(&fdir_tcam_rule, 0, sizeof(struct tag_tcam_cfg_rule));
+	memset((void *)&tcam_key, 0, sizeof(struct tag_tcam_key));
+
+	ret = hinic_fdir_tcam_info_init(dev, rule, &tcam_key, &fdir_tcam_rule);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Init hiovs fdir info failed!");
+		return ret;
+	}
+
+	tcam_filter = hinic_tcam_filter_lookup(&tcam_info->tcam_list,
+						&tcam_key);
+	if (tcam_filter != NULL && add) {
+		PMD_DRV_LOG(ERR, "Filter exists.");
+		return -EEXIST;
+	}
+	if (tcam_filter == NULL && !add) {
+		PMD_DRV_LOG(ERR, "Filter doesn't exist.");
+		return -ENOENT;
+	}
+
+	if (add) {
+		tcam_filter = rte_zmalloc("hiovs_5tuple_filter",
+				sizeof(struct hinic_tcam_filter), 0);
+		if (tcam_filter == NULL)
+			return -ENOMEM;
+		(void)rte_memcpy(&tcam_filter->tcam_key,
+				 &tcam_key, sizeof(struct tag_tcam_key));
+		tcam_filter->queue = fdir_tcam_rule.data.qid;
+
+		ret = hinic_add_tcam_filter(dev, tcam_filter, &fdir_tcam_rule);
+		if (ret < 0) {
+			rte_free(tcam_filter);
+			return ret;
+		}
+
+		rule->tcam_index = fdir_tcam_rule.index;
+
+	} else {
+		PMD_DRV_LOG(ERR, "Begin to hiovs_del_tcam_filter");
+		ret = hinic_del_tcam_filter(dev, tcam_filter);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
 /**
  * Create or destroy a flow rule.
  * Theorically one rule can match more than one filters.
@@ -2158,7 +2834,16 @@  static struct rte_flow *hinic_flow_create(struct rte_eth_dev *dev,
 	ret = hinic_parse_fdir_filter(dev, attr, pattern,
 				      actions, &fdir_rule, error);
 	if (!ret) {
-		ret = hinic_add_del_fdir_filter(dev, &fdir_rule, TRUE);
+		if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL) {
+			ret = hinic_add_del_fdir_filter(dev,
+					&fdir_rule, TRUE);
+		} else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM) {
+			ret = hinic_add_del_tcam_fdir_filter(dev,
+					&fdir_rule, TRUE);
+		}  else {
+			PMD_DRV_LOG(INFO, "flow fdir rule create failed, rule mode wrong");
+			goto out;
+		}
 		if (!ret) {
 			fdir_rule_ptr = rte_zmalloc("hinic_fdir_rule",
 				sizeof(struct hinic_fdir_rule_ele), 0);
@@ -2187,9 +2872,8 @@  static struct rte_flow *hinic_flow_create(struct rte_eth_dev *dev,
 }
 
 /* Destroy a flow rule on hinic. */
-static int hinic_flow_destroy(struct rte_eth_dev *dev,
-			      struct rte_flow *flow,
-			      struct rte_flow_error *error)
+static int hinic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
+				struct rte_flow_error *error)
 {
 	int ret;
 	struct rte_flow *pmd_flow = flow;
@@ -2235,7 +2919,15 @@  static int hinic_flow_destroy(struct rte_eth_dev *dev,
 		rte_memcpy(&fdir_rule,
 			&fdir_rule_ptr->filter_info,
 			sizeof(struct hinic_fdir_rule));
-		ret = hinic_add_del_fdir_filter(dev, &fdir_rule, FALSE);
+		if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL) {
+			ret = hinic_add_del_fdir_filter(dev, &fdir_rule, FALSE);
+		} else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM) {
+			ret = hinic_add_del_tcam_fdir_filter(dev, &fdir_rule,
+								FALSE);
+		} else {
+			PMD_DRV_LOG(ERR, "FDIR Filter type is wrong!");
+			ret = -EINVAL;
+		}
 		if (!ret) {
 			TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list,
 				fdir_rule_ptr, entries);
@@ -2318,8 +3010,16 @@  static void hinic_clear_all_ethertype_filter(struct rte_eth_dev *dev)
 static void hinic_clear_all_fdir_filter(struct rte_eth_dev *dev)
 {
 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+	struct hinic_tcam_info *tcam_info =
+		HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
+	struct hinic_tcam_filter *tcam_filter_ptr;
+
+	while ((tcam_filter_ptr = TAILQ_FIRST(&tcam_info->tcam_list)))
+		(void)hinic_del_tcam_filter(dev, tcam_filter_ptr);
 
 	(void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
+
+	(void)hinic_flush_tcam_rule(nic_dev->hwdev);
 }
 
 static void hinic_filterlist_flush(struct rte_eth_dev *dev)
@@ -2377,9 +3077,18 @@  static int hinic_flow_flush(struct rte_eth_dev *dev,
 	return 0;
 }
 
+void hinic_destroy_fdir_filter(struct rte_eth_dev *dev)
+{
+	hinic_clear_all_ntuple_filter(dev);
+	hinic_clear_all_ethertype_filter(dev);
+	hinic_clear_all_fdir_filter(dev);
+	hinic_filterlist_flush(dev);
+}
+
 const struct rte_flow_ops hinic_flow_ops = {
 	.validate = hinic_flow_validate,
 	.create = hinic_flow_create,
 	.destroy = hinic_flow_destroy,
 	.flush = hinic_flow_flush,
 };
+