[PATCH v5 06/12] net/nfp: add flower PF related routines
Andrew Rybchenko
andrew.rybchenko at oktetlabs.ru
Fri Aug 5 14:55:26 CEST 2022
On 8/5/22 09:32, Chaoyong He wrote:
> This commit adds the start/stop/close routine of the
"This commit adds" -> "Add"
Typically close goes in pair with configure.
> flower PF vNIC.
>
> Signed-off-by: Chaoyong He <chaoyong.he at corigine.com>
> Reviewed-by: Niklas Söderlund <niklas.soderlund at corigine.com>
> ---
> drivers/net/nfp/flower/nfp_flower.c | 193 ++++++++++++++++++++++++++++++++++++
> 1 file changed, 193 insertions(+)
>
> diff --git a/drivers/net/nfp/flower/nfp_flower.c b/drivers/net/nfp/flower/nfp_flower.c
> index c05d4ca..2498020 100644
> --- a/drivers/net/nfp/flower/nfp_flower.c
> +++ b/drivers/net/nfp/flower/nfp_flower.c
> @@ -7,6 +7,7 @@
> #include <ethdev_driver.h>
> #include <rte_service_component.h>
> #include <rte_malloc.h>
> +#include <rte_alarm.h>
> #include <ethdev_pci.h>
> #include <ethdev_driver.h>
>
> @@ -37,11 +38,178 @@
> return 0;
> }
>
> +static int
> +nfp_flower_pf_start(struct rte_eth_dev *dev)
> +{
> + int ret;
> + uint32_t new_ctrl;
> + uint32_t update = 0;
> + struct nfp_net_hw *hw;
> +
> + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +
> + /* Disabling queues just in case... */
> + nfp_net_disable_queues(dev);
> +
> + /* Enabling the required queues in the device */
> + nfp_net_enable_queues(dev);
> +
> + new_ctrl = nfp_check_offloads(dev);
> +
> + /* Writing configuration parameters in the device */
> + nfp_net_params_setup(hw);
> +
> + nfp_net_rss_config_default(dev);
> + update |= NFP_NET_CFG_UPDATE_RSS;
> +
> + if (hw->cap & NFP_NET_CFG_CTRL_RSS2)
> + new_ctrl |= NFP_NET_CFG_CTRL_RSS2;
> + else
> + new_ctrl |= NFP_NET_CFG_CTRL_RSS;
> +
> + /* Enable device */
> + new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
> +
> + update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
> +
> + if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
> + new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
> +
> + nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
> +
> + /* If an error when reconfig we avoid to change hw state */
> + ret = nfp_net_reconfig(hw, new_ctrl, update);
> + if (ret) {
Compare vs 0
> + PMD_INIT_LOG(ERR, "Failed to reconfig PF vnic");
> + return -EIO;
> + }
> +
> + hw->ctrl = new_ctrl;
> +
> + /* Setup the freelist ring */
> + ret = nfp_net_rx_freelist_setup(dev);
> + if (ret) {
Compare vs 0
> + PMD_INIT_LOG(ERR, "Error with flower PF vNIC freelist setup");
> + return -EIO;
> + }
> +
> + return 0;
> +}
> +
> +/* Stop device: disable rx and tx functions to allow for reconfiguring. */
> +static int
> +nfp_flower_pf_stop(struct rte_eth_dev *dev)
> +{
> + uint16_t i;
> + struct nfp_net_hw *hw;
> + struct nfp_net_txq *this_tx_q;
> + struct nfp_net_rxq *this_rx_q;
> +
> + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +
> + nfp_net_disable_queues(dev);
> +
> + /* Clear queues */
> + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> + this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
> + nfp_net_reset_tx_queue(this_tx_q);
> + }
> +
> + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> + this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
> + nfp_net_reset_rx_queue(this_rx_q);
> + }
> +
> + if (rte_eal_process_type() == RTE_PROC_PRIMARY)
> + /* Configure the physical port down */
> + nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
> + else
> + nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 0);
> +
> + return 0;
> +}
> +
> +/* Reset and stop device. The device can not be restarted. */
> +static int
> +nfp_flower_pf_close(struct rte_eth_dev *dev)
> +{
> + uint16_t i;
> + struct nfp_net_hw *hw;
> + struct nfp_pf_dev *pf_dev;
> + struct nfp_net_txq *this_tx_q;
> + struct nfp_net_rxq *this_rx_q;
> + struct rte_pci_device *pci_dev;
> + struct nfp_app_flower *app_flower;
> +
> + if (rte_eal_process_type() != RTE_PROC_PRIMARY)
> + return 0;
> +
> + pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> + pci_dev = RTE_ETH_DEV_TO_PCI(dev);
> + app_flower = NFP_APP_PRIV_TO_APP_FLOWER(pf_dev->app_priv);
> +
> + /*
> + * We assume that the DPDK application is stopping all the
> + * threads/queues before calling the device close function.
> + */
> +
> + nfp_net_disable_queues(dev);
> +
> + /* Clear queues */
> + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> + this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
> + nfp_net_reset_tx_queue(this_tx_q);
> + }
> +
> + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> + this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
> + nfp_net_reset_rx_queue(this_rx_q);
> + }
> +
> + /* Cancel possible impending LSC work here before releasing the port*/
> + rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev);
> +
> + nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
> +
> + rte_eth_dev_release_port(dev);
> +
> + /* Now it is safe to free all PF resources */
> + PMD_INIT_LOG(INFO, "Freeing PF resources");
> + nfp_cpp_area_free(pf_dev->ctrl_area);
> + nfp_cpp_area_free(pf_dev->hwqueues_area);
> + free(pf_dev->hwinfo);
> + free(pf_dev->sym_tbl);
> + nfp_cpp_free(pf_dev->cpp);
> + rte_free(app_flower);
> + rte_free(pf_dev);
> +
> + rte_intr_disable(pci_dev->intr_handle);
> +
> + /* unregister callback func from eal lib */
> + rte_intr_callback_unregister(pci_dev->intr_handle,
> + nfp_net_dev_interrupt_handler, (void *)dev);
> +
> + return 0;
> +}
> +
> +static int
> +nfp_flower_pf_link_update(__rte_unused struct rte_eth_dev *dev,
> + __rte_unused int wait_to_complete)
> +{
It is really confusing implementation of the operatoin.
Could you explain why dummy implementation is OK? Why is it required?
> + return 0;
> +}
> +
> static const struct eth_dev_ops nfp_flower_pf_dev_ops = {
> .dev_configure = nfp_flower_pf_configure,
>
> /* Use the normal dev_infos_get functionality in the NFP PMD */
> .dev_infos_get = nfp_net_infos_get,
> +
> + .dev_start = nfp_flower_pf_start,
> + .dev_stop = nfp_flower_pf_stop,
> + .dev_close = nfp_flower_pf_close,
> + .link_update = nfp_flower_pf_link_update,
> };
>
> static struct rte_service_spec flower_services[NFP_FLOWER_SERVICE_MAX] = {
> @@ -375,6 +543,24 @@
> return ret;
> }
>
> +static int
> +nfp_flower_start_pf_vnic(struct nfp_net_hw *hw)
> +{
> + int ret;
> + uint16_t port_id;
> +
> + port_id = hw->eth_dev->data->port_id;
> +
> + /* Start the device */
> + ret = rte_eth_dev_start(port_id);
> + if (ret) {
Compare vs 0
> + PMD_INIT_LOG(ERR, "Could not start PF device %d", port_id);
> + return ret;
> + }
> +
> + return 0;
> +}
> +
> int
> nfp_init_app_flower(struct nfp_pf_dev *pf_dev)
> {
> @@ -432,6 +618,13 @@
> goto pf_cpp_area_cleanup;
> }
>
> + /* Start the PF vNIC */
> + ret = nfp_flower_start_pf_vnic(app_flower->pf_hw);
> + if (ret) {
Compare vs 0
> + PMD_INIT_LOG(ERR, "Could not start flower PF vNIC");
> + goto pf_vnic_cleanup;
> + }
> +
> /* Start up flower services */
> if (nfp_flower_enable_services(app_flower)) {
> ret = -ESRCH;
More information about the dev
mailing list