[3/3] net/pcap: fix concurrent multiseg packet transmits

Message ID 1563969270-29669-4-git-send-email-david.marchand@redhat.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series Multiseg fixes for pcap pmd |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

David Marchand July 24, 2019, 11:54 a.m. UTC
  Two cores can send multi segment packets on two different pcap ports.
Because of this, we can't have one single buffer to linearize packets.

Use rte_pktmbuf_read() to copy the packet into a buffer on the stack
and remove eth_pcap_gather_data().

Fixes: 6db141c91e1f ("pcap: support jumbo frames")
Cc: stable@dpdk.org

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
 drivers/net/pcap/rte_eth_pcap.c | 90 +++++++++++++++--------------------------
 1 file changed, 32 insertions(+), 58 deletions(-)
  

Comments

Ferruh Yigit July 24, 2019, 6:43 p.m. UTC | #1
On 7/24/2019 12:54 PM, David Marchand wrote:
> Two cores can send multi segment packets on two different pcap ports.
> Because of this, we can't have one single buffer to linearize packets.

+1, you are right.

> 
> Use rte_pktmbuf_read() to copy the packet into a buffer on the stack
> and remove eth_pcap_gather_data().

+1

> 
> Fixes: 6db141c91e1f ("pcap: support jumbo frames")
> Cc: stable@dpdk.org
> 
> Signed-off-by: David Marchand <david.marchand@redhat.com>

Acked-by: Ferruh Yigit <ferruh.yigit@intel.com>
  
David Marchand July 25, 2019, 8:18 a.m. UTC | #2
On Wed, Jul 24, 2019 at 1:55 PM David Marchand
<david.marchand@redhat.com> wrote:
>
> Two cores can send multi segment packets on two different pcap ports.
> Because of this, we can't have one single buffer to linearize packets.
>
> Use rte_pktmbuf_read() to copy the packet into a buffer on the stack
> and remove eth_pcap_gather_data().
>
> Fixes: 6db141c91e1f ("pcap: support jumbo frames")
> Cc: stable@dpdk.org
>
> Signed-off-by: David Marchand <david.marchand@redhat.com>
> ---
>  drivers/net/pcap/rte_eth_pcap.c | 90 +++++++++++++++--------------------------
>  1 file changed, 32 insertions(+), 58 deletions(-)
>
> diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
> index 5e5aab7..7398b1b 100644
> --- a/drivers/net/pcap/rte_eth_pcap.c
> +++ b/drivers/net/pcap/rte_eth_pcap.c

[snip]

> @@ -336,31 +323,25 @@ struct pmd_devargs_all {
>          * dumper */
>         for (i = 0; i < nb_pkts; i++) {
>                 mbuf = bufs[i];
> -               calculate_timestamp(&header.ts);
> -               header.len = mbuf->pkt_len;
> -               header.caplen = header.len;
> -
> -               if (likely(mbuf->nb_segs == 1)) {
> -                       pcap_dump((u_char *)dumper, &header,
> -                                 rte_pktmbuf_mtod(mbuf, void*));
> +               len = rte_pktmbuf_pkt_len(mbuf);
> +               if (likely(rte_pktmbuf_is_contiguous(mbuf))) {
> +                       data = rte_pktmbuf_mtod(mbuf, unsigned char *);
> +               } else if (len <= sizeof(_data)) {
> +                       data = rte_pktmbuf_read(mbuf, 0, len, _data);

We can actually skip the check on contiguous data, since
rte_pktmbuf_read returns a pointer to the mbuf data without copying.
WDYT ?


--
David Marchand
  
Ferruh Yigit July 25, 2019, 11:07 a.m. UTC | #3
On 7/25/2019 9:18 AM, David Marchand wrote:
> On Wed, Jul 24, 2019 at 1:55 PM David Marchand
> <david.marchand@redhat.com> wrote:
>>
>> Two cores can send multi segment packets on two different pcap ports.
>> Because of this, we can't have one single buffer to linearize packets.
>>
>> Use rte_pktmbuf_read() to copy the packet into a buffer on the stack
>> and remove eth_pcap_gather_data().
>>
>> Fixes: 6db141c91e1f ("pcap: support jumbo frames")
>> Cc: stable@dpdk.org
>>
>> Signed-off-by: David Marchand <david.marchand@redhat.com>
>> ---
>>  drivers/net/pcap/rte_eth_pcap.c | 90 +++++++++++++++--------------------------
>>  1 file changed, 32 insertions(+), 58 deletions(-)
>>
>> diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
>> index 5e5aab7..7398b1b 100644
>> --- a/drivers/net/pcap/rte_eth_pcap.c
>> +++ b/drivers/net/pcap/rte_eth_pcap.c
> 
> [snip]
> 
>> @@ -336,31 +323,25 @@ struct pmd_devargs_all {
>>          * dumper */
>>         for (i = 0; i < nb_pkts; i++) {
>>                 mbuf = bufs[i];
>> -               calculate_timestamp(&header.ts);
>> -               header.len = mbuf->pkt_len;
>> -               header.caplen = header.len;
>> -
>> -               if (likely(mbuf->nb_segs == 1)) {
>> -                       pcap_dump((u_char *)dumper, &header,
>> -                                 rte_pktmbuf_mtod(mbuf, void*));
>> +               len = rte_pktmbuf_pkt_len(mbuf);
>> +               if (likely(rte_pktmbuf_is_contiguous(mbuf))) {
>> +                       data = rte_pktmbuf_mtod(mbuf, unsigned char *);
>> +               } else if (len <= sizeof(_data)) {
>> +                       data = rte_pktmbuf_read(mbuf, 0, len, _data);
> 
> We can actually skip the check on contiguous data, since
> rte_pktmbuf_read returns a pointer to the mbuf data without copying.
> WDYT ?

Right, +1 to skip 'rte_pktmbuf_is_contiguous()' only it can be good to comment
'rte_pktmbuf_read()' usage to say it covers multi-segment too.
  

Patch

diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index 5e5aab7..7398b1b 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -46,7 +46,6 @@ 
 #define RTE_PMD_PCAP_MAX_QUEUES 16
 
 static char errbuf[PCAP_ERRBUF_SIZE];
-static unsigned char tx_pcap_data[RTE_ETH_PCAP_SNAPLEN];
 static struct timeval start_time;
 static uint64_t start_cycles;
 static uint64_t hz;
@@ -180,21 +179,6 @@  struct pmd_devargs_all {
 	return mbuf->nb_segs;
 }
 
-/* Copy data from mbuf chain to a buffer suitable for writing to a PCAP file. */
-static void
-eth_pcap_gather_data(unsigned char *data, struct rte_mbuf *mbuf)
-{
-	uint16_t data_len = 0;
-
-	while (mbuf) {
-		rte_memcpy(data + data_len, rte_pktmbuf_mtod(mbuf, void *),
-			mbuf->data_len);
-
-		data_len += mbuf->data_len;
-		mbuf = mbuf->next;
-	}
-}
-
 static uint16_t
 eth_pcap_rx_infinite(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
@@ -325,6 +309,9 @@  struct pmd_devargs_all {
 	uint32_t tx_bytes = 0;
 	struct pcap_pkthdr header;
 	pcap_dumper_t *dumper;
+	unsigned char _data[RTE_ETH_PCAP_SNAPLEN];
+	const unsigned char *data;
+	size_t len;
 
 	pp = rte_eth_devices[dumper_q->port_id].process_private;
 	dumper = pp->tx_dumper[dumper_q->queue_id];
@@ -336,31 +323,25 @@  struct pmd_devargs_all {
 	 * dumper */
 	for (i = 0; i < nb_pkts; i++) {
 		mbuf = bufs[i];
-		calculate_timestamp(&header.ts);
-		header.len = mbuf->pkt_len;
-		header.caplen = header.len;
-
-		if (likely(mbuf->nb_segs == 1)) {
-			pcap_dump((u_char *)dumper, &header,
-				  rte_pktmbuf_mtod(mbuf, void*));
+		len = rte_pktmbuf_pkt_len(mbuf);
+		if (likely(rte_pktmbuf_is_contiguous(mbuf))) {
+			data = rte_pktmbuf_mtod(mbuf, unsigned char *);
+		} else if (len <= sizeof(_data)) {
+			data = rte_pktmbuf_read(mbuf, 0, len, _data);
 		} else {
-			if (mbuf->pkt_len <= RTE_ETHER_MAX_JUMBO_FRAME_LEN) {
-				eth_pcap_gather_data(tx_pcap_data, mbuf);
-				pcap_dump((u_char *)dumper, &header,
-					  tx_pcap_data);
-			} else {
-				PMD_LOG(ERR,
-					"Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
-					mbuf->pkt_len,
-					RTE_ETHER_MAX_JUMBO_FRAME_LEN);
-
-				rte_pktmbuf_free(mbuf);
-				continue;
-			}
+			PMD_LOG(ERR, "Dropping PCAP packet. Size (%zd) > max size (%zd).",
+				len, sizeof(_data));
+			rte_pktmbuf_free(mbuf);
+			continue;
 		}
 
+		calculate_timestamp(&header.ts);
+		header.len = len;
+		header.caplen = header.len;
+		pcap_dump((u_char *)dumper, &header, data);
+
 		num_tx++;
-		tx_bytes += mbuf->pkt_len;
+		tx_bytes += len;
 		rte_pktmbuf_free(mbuf);
 	}
 
@@ -408,13 +389,15 @@  struct pmd_devargs_all {
 eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
 	unsigned int i;
-	int ret;
 	struct rte_mbuf *mbuf;
 	struct pmd_process_private *pp;
 	struct pcap_tx_queue *tx_queue = queue;
 	uint16_t num_tx = 0;
 	uint32_t tx_bytes = 0;
 	pcap_t *pcap;
+	unsigned char _data[RTE_ETH_PCAP_SNAPLEN];
+	const unsigned char *data;
+	size_t len;
 
 	pp = rte_eth_devices[tx_queue->port_id].process_private;
 	pcap = pp->tx_pcap[tx_queue->queue_id];
@@ -424,30 +407,21 @@  struct pmd_devargs_all {
 
 	for (i = 0; i < nb_pkts; i++) {
 		mbuf = bufs[i];
-
-		if (likely(mbuf->nb_segs == 1)) {
-			ret = pcap_sendpacket(pcap,
-					rte_pktmbuf_mtod(mbuf, u_char *),
-					mbuf->pkt_len);
+		len = rte_pktmbuf_pkt_len(mbuf);
+		if (likely(rte_pktmbuf_is_contiguous(mbuf))) {
+			data = rte_pktmbuf_mtod(mbuf, unsigned char *);
+		} else if (len <= sizeof(_data)) {
+			data = rte_pktmbuf_read(mbuf, 0, len, _data);
 		} else {
-			if (mbuf->pkt_len <= RTE_ETHER_MAX_JUMBO_FRAME_LEN) {
-				eth_pcap_gather_data(tx_pcap_data, mbuf);
-				ret = pcap_sendpacket(pcap,
-						tx_pcap_data, mbuf->pkt_len);
-			} else {
-				PMD_LOG(ERR,
-					"Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
-					mbuf->pkt_len,
-					RTE_ETHER_MAX_JUMBO_FRAME_LEN);
-
-				rte_pktmbuf_free(mbuf);
-				continue;
-			}
+			PMD_LOG(ERR, "Dropping PCAP packet. Size (%zd) > max size (%zd).",
+				len, sizeof(_data));
+			rte_pktmbuf_free(mbuf);
+			continue;
 		}
 
-		if (ret == 0) {
+		if (pcap_sendpacket(pcap, data, len) == 0) {
 			num_tx++;
-			tx_bytes += mbuf->pkt_len;
+			tx_bytes += len;
 		}
 		rte_pktmbuf_free(mbuf);
 	}