[v3,1/2] net/pcap: move pcap handler to process private

Message ID 20181114195647.196648-2-qi.z.zhang@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series fix pcap handlers for secondary |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/mellanox-Performance-Testing success Performance Testing PASS
ci/intel-Performance-Testing success Performance Testing PASS
ci/Intel-compilation success Compilation OK

Commit Message

Qi Zhang Nov. 14, 2018, 7:56 p.m. UTC
  This is prework for data path enabling for secondary process.
To prevent pcap handler opened by one process be overwritten by
another process, each process should have their private copy,
`rte_eth_dev->process_private` is exactly what we needed.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/pcap/rte_eth_pcap.c | 40 ++++++++++++++++++++++++++++++++++------
 1 file changed, 34 insertions(+), 6 deletions(-)
  

Comments

Ferruh Yigit Nov. 14, 2018, 11:05 p.m. UTC | #1
On 11/14/2018 7:56 PM, Qi Zhang wrote:
> This is prework for data path enabling for secondary process.
> To prevent pcap handler opened by one process be overwritten by
> another process, each process should have their private copy,
> `rte_eth_dev->process_private` is exactly what we needed.
> 
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>

<...>

> @@ -646,8 +652,10 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
>  		struct rte_mempool *mb_pool)
>  {
>  	struct pmd_internals *internals = dev->data->dev_private;
> +	struct pmd_process_private *pp = dev->process_private;
>  	struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
>  
> +	pcap_q->pcap = pp->rx_pcap[rx_queue_id];
>  	pcap_q->mb_pool = mb_pool;
>  	dev->data->rx_queues[rx_queue_id] = pcap_q;
>  	pcap_q->in_port = dev->data->port_id;
> @@ -663,8 +671,12 @@ eth_tx_queue_setup(struct rte_eth_dev *dev,
>  		const struct rte_eth_txconf *tx_conf __rte_unused)
>  {
>  	struct pmd_internals *internals = dev->data->dev_private;
> +	struct pmd_process_private *pp = dev->process_private;
> +	struct pcap_tx_queue *pcap_q = &internals->tx_queue[tx_queue_id];
>  
> -	dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
> +	pcap_q->pcap = pp->tx_pcap[tx_queue_id];
> +	pcap_q->dumper = pp->tx_dumper[tx_queue_id];
> +	dev->data->tx_queues[tx_queue_id] = pcap_q;

We can't do this, this will be same thing as not using process_private at all,
dev->data is shared between primary and secondary.
Handlers need to be accessed directly from process_private in burst functions.
To able to match queue with handlers in process_private, queue may need to
include queue_index.

tap pmd has similar implementation already.
  
Qi Zhang Nov. 15, 2018, 12:13 a.m. UTC | #2
> -----Original Message-----
> From: Yigit, Ferruh
> Sent: Wednesday, November 14, 2018 3:05 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>
> Cc: thomas@monjalon.net; dev@dpdk.org; Lin, Xueqin <xueqin.lin@intel.com>
> Subject: Re: [PATCH v3 1/2] net/pcap: move pcap handler to process private
> 
> On 11/14/2018 7:56 PM, Qi Zhang wrote:
> > This is prework for data path enabling for secondary process.
> > To prevent pcap handler opened by one process be overwritten by
> > another process, each process should have their private copy,
> > `rte_eth_dev->process_private` is exactly what we needed.
> >
> > Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> 
> <...>
> 
> > @@ -646,8 +652,10 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
> >  		struct rte_mempool *mb_pool)
> >  {
> >  	struct pmd_internals *internals = dev->data->dev_private;
> > +	struct pmd_process_private *pp = dev->process_private;
> >  	struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
> >
> > +	pcap_q->pcap = pp->rx_pcap[rx_queue_id];
> >  	pcap_q->mb_pool = mb_pool;
> >  	dev->data->rx_queues[rx_queue_id] = pcap_q;
> >  	pcap_q->in_port = dev->data->port_id; @@ -663,8 +671,12 @@
> > eth_tx_queue_setup(struct rte_eth_dev *dev,
> >  		const struct rte_eth_txconf *tx_conf __rte_unused)  {
> >  	struct pmd_internals *internals = dev->data->dev_private;
> > +	struct pmd_process_private *pp = dev->process_private;
> > +	struct pcap_tx_queue *pcap_q = &internals->tx_queue[tx_queue_id];
> >
> > -	dev->data->tx_queues[tx_queue_id] =
> &internals->tx_queue[tx_queue_id];
> > +	pcap_q->pcap = pp->tx_pcap[tx_queue_id];
> > +	pcap_q->dumper = pp->tx_dumper[tx_queue_id];
> > +	dev->data->tx_queues[tx_queue_id] = pcap_q;
> 
> We can't do this, this will be same thing as not using process_private at all,
> dev->data is shared between primary and secondary.
> Handlers need to be accessed directly from process_private in burst functions.
> To able to match queue with handlers in process_private, queue may need to
> include queue_index.
> 
> tap pmd has similar implementation already.

I don't know why the idea come to my mind that 
Queue must be setup at local process before you start data path on that same process, but obviously it is wrong.
Will fix this.
  

Patch

diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index 7bbe72e25..88cb404f1 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -83,6 +83,12 @@  struct pmd_internals {
 	int phy_mac;
 };
 
+struct pmd_process_private {
+	pcap_t *rx_pcap[RTE_PMD_PCAP_MAX_QUEUES];
+	pcap_t *tx_pcap[RTE_PMD_PCAP_MAX_QUEUES];
+	pcap_dumper_t *tx_dumper[RTE_PMD_PCAP_MAX_QUEUES];
+};
+
 struct pmd_devargs {
 	unsigned int num_of_queue;
 	struct devargs_queue {
@@ -646,8 +652,10 @@  eth_rx_queue_setup(struct rte_eth_dev *dev,
 		struct rte_mempool *mb_pool)
 {
 	struct pmd_internals *internals = dev->data->dev_private;
+	struct pmd_process_private *pp = dev->process_private;
 	struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
 
+	pcap_q->pcap = pp->rx_pcap[rx_queue_id];
 	pcap_q->mb_pool = mb_pool;
 	dev->data->rx_queues[rx_queue_id] = pcap_q;
 	pcap_q->in_port = dev->data->port_id;
@@ -663,8 +671,12 @@  eth_tx_queue_setup(struct rte_eth_dev *dev,
 		const struct rte_eth_txconf *tx_conf __rte_unused)
 {
 	struct pmd_internals *internals = dev->data->dev_private;
+	struct pmd_process_private *pp = dev->process_private;
+	struct pcap_tx_queue *pcap_q = &internals->tx_queue[tx_queue_id];
 
-	dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
+	pcap_q->pcap = pp->tx_pcap[tx_queue_id];
+	pcap_q->dumper = pp->tx_dumper[tx_queue_id];
+	dev->data->tx_queues[tx_queue_id] = pcap_q;
 
 	return 0;
 }
@@ -896,16 +908,29 @@  pmd_init_internals(struct rte_vdev_device *vdev,
 		struct rte_eth_dev **eth_dev)
 {
 	struct rte_eth_dev_data *data;
+	struct pmd_process_private *pp;
 	unsigned int numa_node = vdev->device.numa_node;
 
 	PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d",
 		numa_node);
 
+	pp = (struct pmd_process_private *)
+		rte_zmalloc(NULL, sizeof(struct pmd_process_private),
+				RTE_CACHE_LINE_SIZE);
+
+	if (pp == NULL) {
+		PMD_LOG(ERR,
+			"Failed to allocate memory for process private");
+		return -1;
+	}
+
 	/* reserve an ethdev entry */
 	*eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals));
-	if (!(*eth_dev))
+	if (!(*eth_dev)) {
+		rte_free(pp);
 		return -1;
-
+	}
+	(*eth_dev)->process_private = pp;
 	/* now put it all together
 	 * - store queue data in internals,
 	 * - store numa_node info in eth_dev
@@ -1027,6 +1052,7 @@  eth_from_pcaps_common(struct rte_vdev_device *vdev,
 		struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues,
 		struct pmd_internals **internals, struct rte_eth_dev **eth_dev)
 {
+	struct pmd_process_private *pp;
 	unsigned int i;
 
 	/* do some parameter checking */
@@ -1039,11 +1065,12 @@  eth_from_pcaps_common(struct rte_vdev_device *vdev,
 			eth_dev) < 0)
 		return -1;
 
+	pp = (*eth_dev)->process_private;
 	for (i = 0; i < nb_rx_queues; i++) {
 		struct pcap_rx_queue *rx = &(*internals)->rx_queue[i];
 		struct devargs_queue *queue = &rx_queues->queue[i];
 
-		rx->pcap = queue->pcap;
+		pp->rx_pcap[i] = queue->pcap;
 		snprintf(rx->name, sizeof(rx->name), "%s", queue->name);
 		snprintf(rx->type, sizeof(rx->type), "%s", queue->type);
 	}
@@ -1052,8 +1079,8 @@  eth_from_pcaps_common(struct rte_vdev_device *vdev,
 		struct pcap_tx_queue *tx = &(*internals)->tx_queue[i];
 		struct devargs_queue *queue = &tx_queues->queue[i];
 
-		tx->dumper = queue->dumper;
-		tx->pcap = queue->pcap;
+		pp->tx_dumper[i] = queue->dumper;
+		pp->tx_pcap[i] = queue->pcap;
 		snprintf(tx->name, sizeof(tx->name), "%s", queue->name);
 		snprintf(tx->type, sizeof(tx->type), "%s", queue->type);
 	}
@@ -1235,6 +1262,7 @@  pmd_pcap_remove(struct rte_vdev_device *dev)
 			eth_dev->data->mac_addrs = NULL;
 	}
 
+	rte_free(eth_dev->process_private);
 	rte_eth_dev_release_port(eth_dev);
 
 	return 0;