[dpdk-stable] [PATCH v2 61/67] net/mlx5: change device reference for secondary process

Yongseok Koh yskoh at mellanox.com
Tue Jun 5 02:41:33 CEST 2018


[ backported from upstream commit df428ceef4fdbceacda8d50341c25ddd46a76a39 ]

rte_eth_devices[] is not shared between primary and secondary process, but
a static array to each process. The reverse pointer of device (priv->dev)
is invalid. Instead, priv has the pointer to shared data of the device,
  struct rte_eth_dev_data *dev_data;

Two macros are added,
  #define PORT_ID(priv) ((priv)->dev_data->port_id)
  #define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)])

Signed-off-by: Yongseok Koh <yskoh at mellanox.com>
---
 drivers/net/mlx5/mlx5.c      |  2 +-
 drivers/net/mlx5/mlx5.h      |  5 ++++-
 drivers/net/mlx5/mlx5_flow.c | 21 +++++++++------------
 drivers/net/mlx5/mlx5_mr.c   | 19 +++++++++----------
 drivers/net/mlx5/mlx5_rxq.c  | 38 +++++++++++++++++++-------------------
 drivers/net/mlx5/mlx5_txq.c  | 13 ++++++-------
 6 files changed, 48 insertions(+), 50 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index bb56bd22d..10ce33592 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1008,7 +1008,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 			goto port_error;
 		}
 		eth_dev->data->dev_private = priv;
-		priv->dev = eth_dev;
+		priv->dev_data = eth_dev->data;
 		eth_dev->data->mac_addrs = priv->mac;
 		eth_dev->device = &pci_dev->device;
 		rte_eth_copy_pci_info(eth_dev, pci_dev);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 75aa853f2..5e6027b82 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -109,7 +109,7 @@ struct mlx5_verbs_alloc_ctx {
 };
 
 struct priv {
-	struct rte_eth_dev *dev; /* Ethernet device of master process. */
+	struct rte_eth_dev_data *dev_data;  /* Pointer to device data. */
 	struct ibv_context *ctx; /* Verbs context. */
 	struct ibv_device_attr_ex device_attr; /* Device properties. */
 	struct ibv_pd *pd; /* Protection Domain. */
@@ -170,6 +170,9 @@ struct priv {
 	/* Context for Verbs allocator. */
 };
 
+#define PORT_ID(priv) ((priv)->dev_data->port_id)
+#define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)])
+
 /* mlx5.c */
 
 int mlx5_getenv_int(const char *);
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index f9a931438..54e0dbc57 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1725,7 +1725,7 @@ mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev,
 		parser->queue[HASH_RXQ_ETH].ibv_attr;
 	if (parser->count)
 		flow->cs = parser->cs;
-	if (!priv->dev->data->dev_started)
+	if (!dev->data->dev_started)
 		return 0;
 	parser->queue[HASH_RXQ_ETH].ibv_attr = NULL;
 	flow->frxq[HASH_RXQ_ETH].ibv_flow =
@@ -1776,7 +1776,6 @@ mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev,
 				  struct rte_flow *flow,
 				  struct rte_flow_error *error)
 {
-	struct priv *priv = dev->data->dev_private;
 	unsigned int i;
 
 	for (i = 0; i != hash_rxq_init_n; ++i) {
@@ -1787,7 +1786,7 @@ mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev,
 		flow->frxq[i].ibv_attr = parser->queue[i].ibv_attr;
 		parser->queue[i].ibv_attr = NULL;
 		hash_fields = hash_rxq_init[i].hash_fields;
-		if (!priv->dev->data->dev_started)
+		if (!dev->data->dev_started)
 			continue;
 		flow->frxq[i].hrxq =
 			mlx5_hrxq_get(dev,
@@ -1836,7 +1835,7 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
 			      struct rte_flow *flow,
 			      struct rte_flow_error *error)
 {
-	struct priv *priv = dev->data->dev_private;
+	struct priv *priv __rte_unused = dev->data->dev_private;
 	int ret;
 	unsigned int i;
 	unsigned int flows_n = 0;
@@ -1849,7 +1848,7 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
 		goto error;
 	if (parser->count)
 		flow->cs = parser->cs;
-	if (!priv->dev->data->dev_started)
+	if (!dev->data->dev_started)
 		return 0;
 	for (i = 0; i != hash_rxq_init_n; ++i) {
 		if (!flow->frxq[i].hrxq)
@@ -2642,9 +2641,9 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
 	}
 	priv->isolated = !!enable;
 	if (enable)
-		priv->dev->dev_ops = &mlx5_dev_ops_isolate;
+		dev->dev_ops = &mlx5_dev_ops_isolate;
 	else
-		priv->dev->dev_ops = &mlx5_dev_ops;
+		dev->dev_ops = &mlx5_dev_ops;
 	return 0;
 }
 
@@ -3032,11 +3031,10 @@ mlx5_fdir_filter_flush(struct rte_eth_dev *dev)
 static void
 mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
 {
-	struct priv *priv = dev->data->dev_private;
 	struct rte_eth_fdir_masks *mask =
-		&priv->dev->data->dev_conf.fdir_conf.mask;
+		&dev->data->dev_conf.fdir_conf.mask;
 
-	fdir_info->mode = priv->dev->data->dev_conf.fdir_conf.mode;
+	fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
 	fdir_info->guarant_spc = 0;
 	rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
 	fdir_info->max_flexpayload = 0;
@@ -3064,9 +3062,8 @@ static int
 mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
 		    void *arg)
 {
-	struct priv *priv = dev->data->dev_private;
 	enum rte_fdir_mode fdir_mode =
-		priv->dev->data->dev_conf.fdir_conf.mode;
+		dev->data->dev_conf.fdir_conf.mode;
 
 	if (filter_op == RTE_ETH_FILTER_NOP)
 		return 0;
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 933bfe395..a50c52088 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -134,8 +134,8 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
 	rte_spinlock_lock(&txq_ctrl->priv->mr_lock);
 	/* Add a new entry, register MR first. */
 	DRV_LOG(DEBUG, "port %u discovered new memory pool \"%s\" (%p)",
-		txq_ctrl->priv->dev->data->port_id, mp->name, (void *)mp);
-	dev = txq_ctrl->priv->dev;
+		PORT_ID(txq_ctrl->priv), mp->name, (void *)mp);
+	dev = ETH_DEV(txq_ctrl->priv);
 	mr = mlx5_mr_get(dev, mp);
 	if (mr == NULL) {
 		if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
@@ -143,8 +143,7 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
 				"port %u using unregistered mempool 0x%p(%s)"
 				" in secondary process, please create mempool"
 				" before rte_eth_dev_start()",
-				txq_ctrl->priv->dev->data->port_id,
-				(void *)mp, mp->name);
+				PORT_ID(txq_ctrl->priv), (void *)mp, mp->name);
 			rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
 			rte_errno = ENOTSUP;
 			return NULL;
@@ -155,7 +154,7 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
 		DRV_LOG(DEBUG,
 			"port %u unable to configure memory region,"
 			" ibv_reg_mr() failed.",
-			txq_ctrl->priv->dev->data->port_id);
+			PORT_ID(txq_ctrl->priv));
 		rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
 		return NULL;
 	}
@@ -164,7 +163,7 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
 		DRV_LOG(DEBUG,
 			"port %u memory region <-> memory pool table full, "
 			" dropping oldest entry",
-			txq_ctrl->priv->dev->data->port_id);
+			PORT_ID(txq_ctrl->priv));
 		--idx;
 		mlx5_mr_release(txq->mp2mr[0]);
 		memmove(&txq->mp2mr[0], &txq->mp2mr[1],
@@ -175,7 +174,7 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
 	DRV_LOG(DEBUG,
 		"port %u new memory region lkey for MP \"%s\" (%p): 0x%08"
 		PRIu32,
-		txq_ctrl->priv->dev->data->port_id, mp->name, (void *)mp,
+		PORT_ID(txq_ctrl->priv), mp->name, (void *)mp,
 		txq_ctrl->txq.mp2mr[idx]->lkey);
 	rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
 	return mr;
@@ -236,15 +235,15 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg)
 	if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 ||
 			data.ret == -1)
 		return;
-	mr = mlx5_mr_get(priv->dev, mp);
+	mr = mlx5_mr_get(ETH_DEV(priv), mp);
 	if (mr) {
 		mlx5_mr_release(mr);
 		return;
 	}
-	mr = mlx5_mr_new(priv->dev, mp);
+	mr = mlx5_mr_new(ETH_DEV(priv), mp);
 	if (!mr)
 		DRV_LOG(ERR, "port %u cannot create memory region: %s",
-			priv->dev->data->port_id, strerror(rte_errno));
+			PORT_ID(priv), strerror(rte_errno));
 }
 
 /**
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 17b70dc9a..dcc5a87b6 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -105,7 +105,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
 		buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
 		if (buf == NULL) {
 			DRV_LOG(ERR, "port %u empty mbuf pool",
-				rxq_ctrl->priv->dev->data->port_id);
+				PORT_ID(rxq_ctrl->priv));
 			rte_errno = ENOMEM;
 			goto error;
 		}
@@ -149,7 +149,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
 	DRV_LOG(DEBUG,
 		"port %u Rx queue %u allocated and configured %u segments"
 		" (max %u packets)",
-		rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx, elts_n,
+		PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx, elts_n,
 		elts_n / (1 << rxq_ctrl->rxq.sges_n));
 	return 0;
 error:
@@ -161,7 +161,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
 		(*rxq_ctrl->rxq.elts)[i] = NULL;
 	}
 	DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
-		rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
+		PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
 	rte_errno = err; /* Restore rte_errno. */
 	return -rte_errno;
 }
@@ -182,7 +182,7 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
 	uint16_t i;
 
 	DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
-		rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
+		PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
 	if (rxq->elts == NULL)
 		return;
 	/**
@@ -213,7 +213,7 @@ void
 mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
 {
 	DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u",
-		rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
+		PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
 	if (rxq_ctrl->ibv)
 		mlx5_rxq_ibv_release(rxq_ctrl->ibv);
 	memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
@@ -300,11 +300,11 @@ mlx5_rx_queue_release(void *dpdk_rxq)
 		return;
 	rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
 	priv = rxq_ctrl->priv;
-	if (!mlx5_rxq_releasable(priv->dev, rxq_ctrl->rxq.stats.idx))
+	if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx))
 		rte_panic("port %u Rx queue %u is still used by a flow and"
-			  " cannot be removed\n", priv->dev->data->port_id,
-			  rxq_ctrl->idx);
-	mlx5_rxq_release(priv->dev, rxq_ctrl->rxq.stats.idx);
+			  " cannot be removed\n",
+			  PORT_ID(priv), rxq_ctrl->idx);
+	mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx);
 }
 
 /**
@@ -324,9 +324,9 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
 	unsigned int rxqs_n = priv->rxqs_n;
 	unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
 	unsigned int count = 0;
-	struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+	struct rte_intr_handle *intr_handle = dev->intr_handle;
 
-	if (!priv->dev->data->dev_conf.intr_conf.rxq)
+	if (!dev->data->dev_conf.intr_conf.rxq)
 		return 0;
 	mlx5_rx_intr_vec_disable(dev);
 	intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
@@ -398,12 +398,12 @@ void
 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
 {
 	struct priv *priv = dev->data->dev_private;
-	struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+	struct rte_intr_handle *intr_handle = dev->intr_handle;
 	unsigned int i;
 	unsigned int rxqs_n = priv->rxqs_n;
 	unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
 
-	if (!priv->dev->data->dev_conf.intr_conf.rxq)
+	if (!dev->data->dev_conf.intr_conf.rxq)
 		return;
 	if (!intr_handle->intr_vec)
 		goto free;
@@ -842,7 +842,7 @@ mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
 	if (!ret)
 		rxq_ibv->mr = NULL;
 	DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
-		rxq_ibv->rxq_ctrl->priv->dev->data->port_id,
+		PORT_ID(rxq_ibv->rxq_ctrl->priv),
 		rxq_ibv->rxq_ctrl->idx, rte_atomic32_read(&rxq_ibv->refcnt));
 	if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
 		rxq_free_elts(rxq_ibv->rxq_ctrl);
@@ -928,7 +928,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		return NULL;
 	}
 	tmpl->socket = socket;
-	if (priv->dev->data->dev_conf.intr_conf.rxq)
+	if (dev->data->dev_conf.intr_conf.rxq)
 		tmpl->irq = 1;
 	/* Enable scattered packets support for this queue if necessary. */
 	assert(mb_len >= RTE_PKTMBUF_HEADROOM);
@@ -1272,8 +1272,8 @@ mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
 	unsigned int i;
 
 	DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
-		((struct priv *)dev->data->dev_private)->port,
-		(void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
+		dev->data->port_id, (void *)ind_tbl,
+		rte_atomic32_read(&ind_tbl->refcnt));
 	if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
 		claim_zero(ibv_destroy_rwq_ind_table(ind_tbl->ind_table));
 	for (i = 0; i != ind_tbl->queues_n; ++i)
@@ -1457,8 +1457,8 @@ int
 mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
 {
 	DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
-		((struct priv *)dev->data->dev_private)->port,
-		(void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
+		dev->data->port_id, (void *)hrxq,
+		rte_atomic32_read(&hrxq->refcnt));
 	if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
 		claim_zero(ibv_destroy_qp(hrxq->qp));
 		mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 7abfd565b..a5c6b5851 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -75,7 +75,7 @@ txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
 	for (i = 0; (i != elts_n); ++i)
 		(*txq_ctrl->txq.elts)[i] = NULL;
 	DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
-		txq_ctrl->priv->dev->data->port_id, txq_ctrl->idx, elts_n);
+		PORT_ID(txq_ctrl->priv), txq_ctrl->idx, elts_n);
 	txq_ctrl->txq.elts_head = 0;
 	txq_ctrl->txq.elts_tail = 0;
 	txq_ctrl->txq.elts_comp = 0;
@@ -97,7 +97,7 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
 	struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
 
 	DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
-		txq_ctrl->priv->dev->data->port_id, txq_ctrl->idx);
+		PORT_ID(txq_ctrl->priv), txq_ctrl->idx);
 	txq_ctrl->txq.elts_head = 0;
 	txq_ctrl->txq.elts_tail = 0;
 	txq_ctrl->txq.elts_comp = 0;
@@ -205,9 +205,9 @@ mlx5_tx_queue_release(void *dpdk_txq)
 	priv = txq_ctrl->priv;
 	for (i = 0; (i != priv->txqs_n); ++i)
 		if ((*priv->txqs)[i] == txq) {
-			mlx5_txq_release(priv->dev, i);
+			mlx5_txq_release(ETH_DEV(priv), i);
 			DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
-				priv->dev->data->port_id, txq_ctrl->idx);
+				PORT_ID(priv), txq_ctrl->idx);
 			break;
 		}
 }
@@ -545,7 +545,7 @@ mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv)
 {
 	assert(txq_ibv);
 	DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
-		txq_ibv->txq_ctrl->priv->dev->data->port_id,
+		PORT_ID(txq_ibv->txq_ctrl->priv),
 		txq_ibv->txq_ctrl->idx, rte_atomic32_read(&txq_ibv->refcnt));
 	if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
 		claim_zero(ibv_destroy_qp(txq_ibv->qp));
@@ -684,8 +684,7 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 			DRV_LOG(WARNING,
 				"port %u txq inline is too large (%d) setting it"
 				" to the maximum possible: %d\n",
-				priv->dev->data->port_id, priv->txq_inline,
-				max_inline);
+				PORT_ID(priv), priv->txq_inline, max_inline);
 			tmpl->txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE;
 		}
 	}
-- 
2.11.0



More information about the stable mailing list