@@ -3649,6 +3649,93 @@ s32 txgbe_reset_hw(struct txgbe_hw *hw)
return status;
}
+/**
+ * txgbe_fdir_check_cmd_complete - poll to check whether FDIRPICMD is complete
+ * @hw: pointer to hardware structure
+ * @fdircmd: current value of FDIRCMD register
+ */
+static s32 txgbe_fdir_check_cmd_complete(struct txgbe_hw *hw, u32 *fdircmd)
+{
+ int i;
+
+ for (i = 0; i < TXGBE_FDIRCMD_CMD_POLL; i++) {
+ *fdircmd = rd32(hw, TXGBE_FDIRPICMD);
+ if (!(*fdircmd & TXGBE_FDIRPICMD_OP_MASK))
+ return 0;
+ usec_delay(10);
+ }
+
+ return TXGBE_ERR_FDIR_CMD_INCOMPLETE;
+}
+
+/**
+ * txgbe_reinit_fdir_tables - Reinitialize Flow Director tables.
+ * @hw: pointer to hardware structure
+ **/
+s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw)
+{
+ s32 err;
+ int i;
+ u32 fdirctrl = rd32(hw, TXGBE_FDIRCTL);
+ u32 fdircmd;
+ fdirctrl &= ~TXGBE_FDIRCTL_INITDONE;
+
+ DEBUGFUNC("txgbe_reinit_fdir_tables");
+
+ /*
+ * Before starting reinitialization process,
+ * FDIRPICMD.OP must be zero.
+ */
+ err = txgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err) {
+ DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
+ return err;
+ }
+
+ wr32(hw, TXGBE_FDIRFREE, 0);
+ txgbe_flush(hw);
+ /*
+ * adapters flow director init flow cannot be restarted,
+ * Workaround silicon errata by performing the following steps
+ * before re-writing the FDIRCTL control register with the same value.
+ * - write 1 to bit 8 of FDIRPICMD register &
+ * - write 0 to bit 8 of FDIRPICMD register
+ */
+ wr32m(hw, TXGBE_FDIRPICMD, TXGBE_FDIRPICMD_CLR, TXGBE_FDIRPICMD_CLR);
+ txgbe_flush(hw);
+ wr32m(hw, TXGBE_FDIRPICMD, TXGBE_FDIRPICMD_CLR, 0);
+ txgbe_flush(hw);
+ /*
+ * Clear FDIR Hash register to clear any leftover hashes
+ * waiting to be programmed.
+ */
+ wr32(hw, TXGBE_FDIRPIHASH, 0x00);
+ txgbe_flush(hw);
+
+ wr32(hw, TXGBE_FDIRCTL, fdirctrl);
+ txgbe_flush(hw);
+
+ /* Poll init-done after we write FDIRCTL register */
+ for (i = 0; i < TXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (rd32m(hw, TXGBE_FDIRCTL, TXGBE_FDIRCTL_INITDONE))
+ break;
+ msec_delay(1);
+ }
+ if (i >= TXGBE_FDIR_INIT_DONE_POLL) {
+ DEBUGOUT("Flow Director Signature poll time exceeded!\n");
+ return TXGBE_ERR_FDIR_REINIT_FAILED;
+ }
+
+ /* Clear FDIR statistics registers (read to clear) */
+ rd32(hw, TXGBE_FDIRUSED);
+ rd32(hw, TXGBE_FDIRFAIL);
+ rd32(hw, TXGBE_FDIRMATCH);
+ rd32(hw, TXGBE_FDIRMISS);
+ rd32(hw, TXGBE_FDIRLEN);
+
+ return 0;
+}
+
/**
* txgbe_start_hw_raptor - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
@@ -108,5 +108,6 @@ s32 txgbe_init_phy_raptor(struct txgbe_hw *hw);
s32 txgbe_enable_rx_dma_raptor(struct txgbe_hw *hw, u32 regval);
s32 txgbe_prot_autoc_read_raptor(struct txgbe_hw *hw, bool *locked, u64 *value);
s32 txgbe_prot_autoc_write_raptor(struct txgbe_hw *hw, bool locked, u64 value);
+s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw);
bool txgbe_verify_lesm_fw_enabled_raptor(struct txgbe_hw *hw);
#endif /* _TXGBE_HW_H_ */
@@ -5186,6 +5186,27 @@ txgbe_clear_syn_filter(struct rte_eth_dev *dev)
}
}
+/* remove all the L2 tunnel filters */
+int
+txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+ struct txgbe_l2_tn_filter *l2_tn_filter;
+ struct txgbe_l2_tunnel_conf l2_tn_conf;
+ int ret = 0;
+
+ while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+ l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
+ l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id;
+ l2_tn_conf.pool = l2_tn_filter->pool;
+ ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct eth_dev_ops txgbe_eth_dev_ops = {
.dev_configure = txgbe_dev_configure,
.dev_infos_get = txgbe_dev_info_get,
@@ -488,12 +488,14 @@ int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev);
uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
void txgbe_fdir_filter_restore(struct rte_eth_dev *dev);
+int txgbe_clear_all_fdir_filter(struct rte_eth_dev *dev);
extern const struct rte_flow_ops txgbe_flow_ops;
void txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev);
void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev);
void txgbe_clear_syn_filter(struct rte_eth_dev *dev);
+int txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev);
int txgbe_vt_check(struct txgbe_hw *hw);
int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
@@ -902,6 +902,27 @@ txgbe_fdir_filter_program(struct rte_eth_dev *dev,
return err;
}
+static int
+txgbe_fdir_flush(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
+ int ret;
+
+ ret = txgbe_reinit_fdir_tables(hw);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "Failed to re-initialize FD table.");
+ return ret;
+ }
+
+ info->f_add = 0;
+ info->f_remove = 0;
+ info->add = 0;
+ info->remove = 0;
+
+ return ret;
+}
+
/* restore flow director filter */
void
txgbe_fdir_filter_restore(struct rte_eth_dev *dev)
@@ -936,3 +957,29 @@ txgbe_fdir_filter_restore(struct rte_eth_dev *dev)
}
}
+/* remove all the flow director filters */
+int
+txgbe_clear_all_fdir_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
+ struct txgbe_fdir_filter *fdir_filter;
+ struct txgbe_fdir_filter *filter_flag;
+ int ret = 0;
+
+ /* flush flow director */
+ rte_hash_reset(fdir_info->hash_handle);
+ memset(fdir_info->hash_map, 0,
+ sizeof(struct txgbe_fdir_filter *) * TXGBE_MAX_FDIR_FILTER_NUM);
+ filter_flag = TAILQ_FIRST(&fdir_info->fdir_list);
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list,
+ fdir_filter,
+ entries);
+ rte_free(fdir_filter);
+ }
+
+ if (filter_flag != NULL)
+ ret = txgbe_fdir_flush(dev);
+
+ return ret;
+}
@@ -2555,6 +2555,16 @@ txgbe_parse_rss_filter(struct rte_eth_dev *dev,
return 0;
}
+/* remove the rss filter */
+static void
+txgbe_clear_rss_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+ if (filter_info->rss_info.conf.queue_num)
+ txgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
+}
+
void
txgbe_filterlist_init(void)
{
@@ -3069,9 +3079,42 @@ txgbe_flow_destroy(struct rte_eth_dev *dev,
return ret;
}
+/* Destroy all flow rules associated with a port on txgbe. */
+static int
+txgbe_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+
+ txgbe_clear_all_ntuple_filter(dev);
+ txgbe_clear_all_ethertype_filter(dev);
+ txgbe_clear_syn_filter(dev);
+
+ ret = txgbe_clear_all_fdir_filter(dev);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to flush rule");
+ return ret;
+ }
+
+ ret = txgbe_clear_all_l2_tn_filter(dev);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to flush rule");
+ return ret;
+ }
+
+ txgbe_clear_rss_filter(dev);
+
+ txgbe_filterlist_flush();
+
+ return 0;
+}
+
const struct rte_flow_ops txgbe_flow_ops = {
.validate = txgbe_flow_validate,
.create = txgbe_flow_create,
.destroy = txgbe_flow_destroy,
+ .flush = txgbe_flow_flush,
};