[dpdk-dev,v2,1/3] net/octeontx: add support for event Rx adapter

Message ID 1508316342-17781-1-git-send-email-pbhagavatula@caviumnetworks.com (mailing list archive)
State Superseded, archived
Delegated to: Jerin Jacob
Headers

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail apply patch file failure

Commit Message

Pavan Nikhilesh Oct. 18, 2017, 8:45 a.m. UTC
  Add functions to modify and delete qos responsible for mapping eth queues
to event queues used for configuring event Rx adapter.
The mbox functions have been moved from octeontx_pkivf.c to
octeontx_pkivf.h to allow event_octeontx to access them.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---

 v2 changes:
 - Improve conditional statement readability (Nikhil).

 This series depends on http://dpdk.org/dev/patchwork/patch/30430

 drivers/net/octeontx/base/octeontx_pkivf.c |  65 ---------------
 drivers/net/octeontx/base/octeontx_pkivf.h | 126 ++++++++++++++++++++++++++++-
 drivers/net/octeontx/octeontx_ethdev.c     |   3 +-
 drivers/net/octeontx/octeontx_rxtx.c       | 108 +------------------------
 drivers/net/octeontx/octeontx_rxtx.h       |  89 ++++++++++++++++++++
 5 files changed, 216 insertions(+), 175 deletions(-)

--
2.7.4
  

Comments

Jerin Jacob Oct. 23, 2017, 6:09 p.m. UTC | #1
-----Original Message-----
> Date: Wed, 18 Oct 2017 14:15:40 +0530
> From: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> To: jerin.jacob@caviumnetworks.com, santosh.shukla@caviumnetworks.com,
>  nikhil.rao@intel.com
> Cc: dev@dpdk.org, Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> Subject: [dpdk-dev] [PATCH v2 1/3] net/octeontx: add support for event Rx
>  adapter
> X-Mailer: git-send-email 2.7.4
> 
> Add functions to modify and delete qos responsible for mapping eth queues
> to event queues used for configuring event Rx adapter.
> The mbox functions have been moved from octeontx_pkivf.c to
> octeontx_pkivf.h to allow event_octeontx to access them.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> ---
> 
>  v2 changes:
>  - Improve conditional statement readability (Nikhil).
> 
>  This series depends on http://dpdk.org/dev/patchwork/patch/30430
> 
> 
>  static __rte_always_inline uint16_t __hot
>  __octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
> @@ -195,10 +107,8 @@ octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
>  uint16_t __hot
>  octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
>  {
> -	struct rte_mbuf *mbuf;
>  	struct octeontx_rxq *rxq;
>  	struct rte_event ev;
> -	octtx_wqe_t *wqe;
>  	size_t count;
>  	uint16_t valid_event;
> 
> @@ -210,23 +120,7 @@ octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
>  							1, 0);
>  		if (!valid_event)
>  			break;
> -
> -		wqe = (octtx_wqe_t *)(uintptr_t)ev.u64;
> -		rte_prefetch_non_temporal(wqe);
> -
> -		/* Get mbuf from wqe */
> -		mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
> -						OCTTX_PACKET_WQE_SKIP);
> -		mbuf->packet_type =
> -		ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
> -		mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
> -		mbuf->pkt_len = wqe->s.w1.len;
> -		mbuf->data_len = mbuf->pkt_len;
> -		mbuf->nb_segs = 1;
> -		mbuf->ol_flags = 0;
> -		mbuf->port = rxq->port_id;

IMO, You don't need to move the mbuf conversion inline function to 2/3,
Instead if we do in 1/3.It will functionality correct at 1/3.

> -		rte_mbuf_refcnt_set(mbuf, 1);
> -		rx_pkts[count++] = mbuf;
> +		rx_pkts[count++] = (struct rte_mbuf *)ev.u64;

This will create build error on 32bit. You can avoid the typecast by
changing to ev.mbuf.
/export/dpdk-next-eventdev/drivers/net/octeontx/octeontx_rxtx.c: In
function ‘octeontx_recv_pkts’:
/export/dpdk-next-eventdev/drivers/net/octeontx/octeontx_rxtx.c:123:22:
error: cast to pointer from integer of different size
[-Werror=int-to-pointer-cast]
   rx_pkts[count++] = (struct rte_mbuf *)ev.u64;                                
                      ^                                                         
cc1: all warnings being treated as errors
  
Pavan Nikhilesh Oct. 24, 2017, 6:56 a.m. UTC | #2
On Mon, Oct 23, 2017 at 11:39:42PM +0530, Jerin Jacob wrote:
> -----Original Message-----
> > Date: Wed, 18 Oct 2017 14:15:40 +0530
> > From: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> > To: jerin.jacob@caviumnetworks.com, santosh.shukla@caviumnetworks.com,
> >  nikhil.rao@intel.com
> > Cc: dev@dpdk.org, Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> > Subject: [dpdk-dev] [PATCH v2 1/3] net/octeontx: add support for event Rx
> >  adapter
> > X-Mailer: git-send-email 2.7.4
> >
> > Add functions to modify and delete qos responsible for mapping eth queues
> > to event queues used for configuring event Rx adapter.
> > The mbox functions have been moved from octeontx_pkivf.c to
> > octeontx_pkivf.h to allow event_octeontx to access them.
> >
> > Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> > ---
> >
> >  v2 changes:
> >  - Improve conditional statement readability (Nikhil).
> >
> >  This series depends on http://dpdk.org/dev/patchwork/patch/30430
> >
> >
> >  static __rte_always_inline uint16_t __hot
> >  __octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
> > @@ -195,10 +107,8 @@ octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> >  uint16_t __hot
> >  octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
> >  {
> > -	struct rte_mbuf *mbuf;
> >  	struct octeontx_rxq *rxq;
> >  	struct rte_event ev;
> > -	octtx_wqe_t *wqe;
> >  	size_t count;
> >  	uint16_t valid_event;
> >
> > @@ -210,23 +120,7 @@ octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
> >  							1, 0);
> >  		if (!valid_event)
> >  			break;
> > -
> > -		wqe = (octtx_wqe_t *)(uintptr_t)ev.u64;
> > -		rte_prefetch_non_temporal(wqe);
> > -
> > -		/* Get mbuf from wqe */
> > -		mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
> > -						OCTTX_PACKET_WQE_SKIP);
> > -		mbuf->packet_type =
> > -		ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
> > -		mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
> > -		mbuf->pkt_len = wqe->s.w1.len;
> > -		mbuf->data_len = mbuf->pkt_len;
> > -		mbuf->nb_segs = 1;
> > -		mbuf->ol_flags = 0;
> > -		mbuf->port = rxq->port_id;
>
> IMO, You don't need to move the mbuf conversion inline function to 2/3,
> Instead if we do in 1/3.It will functionality correct at 1/3.
>
> > -		rte_mbuf_refcnt_set(mbuf, 1);
> > -		rx_pkts[count++] = mbuf;
> > +		rx_pkts[count++] = (struct rte_mbuf *)ev.u64;
>
> This will create build error on 32bit. You can avoid the typecast by
> changing to ev.mbuf.
> /export/dpdk-next-eventdev/drivers/net/octeontx/octeontx_rxtx.c: In
> function ‘octeontx_recv_pkts’:
> /export/dpdk-next-eventdev/drivers/net/octeontx/octeontx_rxtx.c:123:22:
> error: cast to pointer from integer of different size
> [-Werror=int-to-pointer-cast]
>    rx_pkts[count++] = (struct rte_mbuf *)ev.u64;
>                       ^
> cc1: all warnings being treated as errors

Agreed, will move mbuf conversion to 1/3 and fix 32 bit compilation issues and
send a v3.

Thanks,
Pavan
  

Patch

diff --git a/drivers/net/octeontx/base/octeontx_pkivf.c b/drivers/net/octeontx/base/octeontx_pkivf.c
index afae6a3..f9e4053 100644
--- a/drivers/net/octeontx/base/octeontx_pkivf.c
+++ b/drivers/net/octeontx/base/octeontx_pkivf.c
@@ -111,71 +111,6 @@  octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg)
 	return res;
 }

-int
-octeontx_pki_port_close(int port)
-{
-	struct octeontx_mbox_hdr hdr;
-	int res;
-
-	mbox_pki_port_t ptype;
-	int len = sizeof(mbox_pki_port_t);
-	memset(&ptype, 0, len);
-	ptype.port_type = OCTTX_PORT_TYPE_NET;
-
-	hdr.coproc = OCTEONTX_PKI_COPROC;
-	hdr.msg = MBOX_PKI_PORT_CLOSE;
-	hdr.vfid = port;
-
-	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
-	if (res < 0)
-		return -EACCES;
-
-	return res;
-}
-
-int
-octeontx_pki_port_start(int port)
-{
-	struct octeontx_mbox_hdr hdr;
-	int res;
-
-	mbox_pki_port_t ptype;
-	int len = sizeof(mbox_pki_port_t);
-	memset(&ptype, 0, len);
-	ptype.port_type = OCTTX_PORT_TYPE_NET;
-
-	hdr.coproc = OCTEONTX_PKI_COPROC;
-	hdr.msg = MBOX_PKI_PORT_START;
-	hdr.vfid = port;
-
-	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
-	if (res < 0)
-		return -EACCES;
-
-	return res;
-}
-
-int
-octeontx_pki_port_stop(int port)
-{
-	struct octeontx_mbox_hdr hdr;
-	int res;
-
-	mbox_pki_port_t ptype;
-	int len = sizeof(mbox_pki_port_t);
-	memset(&ptype, 0, len);
-	ptype.port_type = OCTTX_PORT_TYPE_NET;
-
-	hdr.coproc = OCTEONTX_PKI_COPROC;
-	hdr.msg = MBOX_PKI_PORT_STOP;
-	hdr.vfid = port;
-
-	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
-	if (res < 0)
-		return -EACCES;
-
-	return res;
-}

 int
 octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg)
diff --git a/drivers/net/octeontx/base/octeontx_pkivf.h b/drivers/net/octeontx/base/octeontx_pkivf.h
index 7cf8332..004dedc 100644
--- a/drivers/net/octeontx/base/octeontx_pkivf.h
+++ b/drivers/net/octeontx/base/octeontx_pkivf.h
@@ -240,10 +240,18 @@  typedef struct mbox_pki_port_modify_qos_entry {
 		uint8_t f_gaura:1;
 		uint8_t f_grptag_ok:1;
 		uint8_t f_grptag_bad:1;
+		uint8_t f_tag_type:1;
 	} mmask;
+	uint8_t tag_type;
 	struct mbox_pki_qos_entry qos_entry;
 } mbox_pki_mod_qos_t;

+/* pki flow/style enable qos */
+typedef struct mbox_pki_port_delete_qos_entry {
+	uint8_t port_type;
+	uint16_t index;
+} mbox_pki_del_qos_t;
+
 /* PKI maximum constants */
 #define PKI_VF_MAX			(1)
 #define PKI_MAX_PKTLEN			(32768)
@@ -407,6 +415,12 @@  typedef struct pki_port_create_qos {
 } pki_qos_cfg_t;

 /* pki flow/style enable qos */
+typedef struct pki_port_delete_qos_entry {
+	uint8_t port_type;
+	uint16_t index;
+} pki_del_qos_t;
+
+/* pki flow/style enable qos */
 typedef struct pki_port_modify_qos_entry {
 	uint8_t port_type;
 	uint16_t index;
@@ -415,17 +429,125 @@  typedef struct pki_port_modify_qos_entry {
 		uint8_t f_grp_ok:1;
 		uint8_t f_grp_bad:1;
 		uint8_t f_gaura:1;
+		uint8_t f_grptag_ok:1;
+		uint8_t f_grptag_bad:1;
+		uint8_t f_tag_type:1;
 	} mmask;
+	uint8_t tag_type;
 	struct pki_qos_entry qos_entry;
 } pki_mod_qos_t;

+static inline int
+octeontx_pki_port_modify_qos(int port, pki_mod_qos_t *qos_cfg)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_mod_qos_t q_cfg = *(mbox_pki_mod_qos_t *)qos_cfg;
+	int len = sizeof(mbox_pki_mod_qos_t);
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_MODIFY_QOS;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
+static inline int
+octeontx_pki_port_delete_qos(int port, pki_del_qos_t *qos_cfg)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_del_qos_t q_cfg = *(mbox_pki_del_qos_t *)qos_cfg;
+	int len = sizeof(mbox_pki_del_qos_t);
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_DELETE_QOS;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
+static inline int
+octeontx_pki_port_close(int port)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_port_t ptype;
+	int len = sizeof(mbox_pki_port_t);
+	memset(&ptype, 0, len);
+	ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_CLOSE;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
+static inline int
+octeontx_pki_port_start(int port)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_port_t ptype;
+	int len = sizeof(mbox_pki_port_t);
+	memset(&ptype, 0, len);
+	ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_START;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
+static inline int
+octeontx_pki_port_stop(int port)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_port_t ptype;
+	int len = sizeof(mbox_pki_port_t);
+	memset(&ptype, 0, len);
+	ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_STOP;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
 int octeontx_pki_port_open(int port);
 int octeontx_pki_port_hash_config(int port, pki_hash_cfg_t *hash_cfg);
 int octeontx_pki_port_pktbuf_config(int port, pki_pktbuf_cfg_t *buf_cfg);
 int octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg);
 int octeontx_pki_port_close(int port);
-int octeontx_pki_port_start(int port);
-int octeontx_pki_port_stop(int port);
 int octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg);

 #endif /* __OCTEONTX_PKI_H__ */
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 82e38c2..86de5d1 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -930,6 +930,7 @@  octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 			pki_hash.tag_slc = 1;
 			pki_hash.tag_dlf = 1;
 			pki_hash.tag_slf = 1;
+			pki_hash.tag_prt = 1;
 			octeontx_pki_port_hash_config(port, &pki_hash);
 		}

@@ -941,7 +942,7 @@  octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		pki_qos.qpg_qos = PKI_QPG_QOS_NONE;
 		pki_qos.num_entry = 1;
 		pki_qos.drop_policy = 0;
-		pki_qos.tag_type = 2L;
+		pki_qos.tag_type = 0L;
 		pki_qos.qos_entry[0].port_add = 0;
 		pki_qos.qos_entry[0].gaura = gaura;
 		pki_qos.qos_entry[0].ggrp_ok = ev_queues;
diff --git a/drivers/net/octeontx/octeontx_rxtx.c b/drivers/net/octeontx/octeontx_rxtx.c
index 0b15833..99531cd 100644
--- a/drivers/net/octeontx/octeontx_rxtx.c
+++ b/drivers/net/octeontx/octeontx_rxtx.c
@@ -47,94 +47,6 @@ 
 #include "octeontx_rxtx.h"
 #include "octeontx_logs.h"

-/* Packet type table */
-#define PTYPE_SIZE	OCCTX_PKI_LTYPE_LAST
-
-static const uint32_t __rte_cache_aligned
-ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
-	[LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
-	[LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
-	[LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
-	[LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
-	[LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
-	[LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
-	[LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
-	[LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
-
-	[LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
-	[LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
-	[LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
-	[LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
-	[LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
-	[LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
-	[LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
-	[LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_IPV4][LE_NONE][LF_NVGRE] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
-
-	[LC_IPV4_OPT][LE_NONE][LF_NONE] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
-	[LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
-	[LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
-	[LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
-	[LC_IPV4_OPT][LE_NONE][LF_TCP] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
-	[LC_IPV4_OPT][LE_NONE][LF_UDP] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
-	[LC_IPV4_OPT][LE_NONE][LF_GRE] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
-	[LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
-
-	[LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
-	[LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
-				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
-	[LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
-	[LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
-	[LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
-	[LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
-	[LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
-	[LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
-				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
-				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_IPV6][LE_NONE][LF_NVGRE] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
-	[LC_IPV6_OPT][LE_NONE][LF_NONE] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
-	[LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
-	[LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
-	[LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
-	[LC_IPV6_OPT][LE_NONE][LF_TCP] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
-	[LC_IPV6_OPT][LE_NONE][LF_UDP] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
-	[LC_IPV6_OPT][LE_NONE][LF_GRE] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
-	[LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
-
-};

 static __rte_always_inline uint16_t __hot
 __octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
@@ -195,10 +107,8 @@  octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 uint16_t __hot
 octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
-	struct rte_mbuf *mbuf;
 	struct octeontx_rxq *rxq;
 	struct rte_event ev;
-	octtx_wqe_t *wqe;
 	size_t count;
 	uint16_t valid_event;

@@ -210,23 +120,7 @@  octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 							1, 0);
 		if (!valid_event)
 			break;
-
-		wqe = (octtx_wqe_t *)(uintptr_t)ev.u64;
-		rte_prefetch_non_temporal(wqe);
-
-		/* Get mbuf from wqe */
-		mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
-						OCTTX_PACKET_WQE_SKIP);
-		mbuf->packet_type =
-		ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
-		mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
-		mbuf->pkt_len = wqe->s.w1.len;
-		mbuf->data_len = mbuf->pkt_len;
-		mbuf->nb_segs = 1;
-		mbuf->ol_flags = 0;
-		mbuf->port = rxq->port_id;
-		rte_mbuf_refcnt_set(mbuf, 1);
-		rx_pkts[count++] = mbuf;
+		rx_pkts[count++] = (struct rte_mbuf *)ev.u64;
 	}

 	return count; /* return number of pkts received */
diff --git a/drivers/net/octeontx/octeontx_rxtx.h b/drivers/net/octeontx/octeontx_rxtx.h
index 1f91532..382ff2b 100644
--- a/drivers/net/octeontx/octeontx_rxtx.h
+++ b/drivers/net/octeontx/octeontx_rxtx.h
@@ -39,6 +39,95 @@ 
 #define __hot	__attribute__((hot))
 #endif

+/* Packet type table */
+#define PTYPE_SIZE	OCCTX_PKI_LTYPE_LAST
+
+static const uint32_t __rte_cache_aligned
+ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
+	[LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
+	[LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
+	[LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
+	[LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
+	[LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
+	[LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
+	[LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
+	[LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
+
+	[LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
+	[LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
+	[LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
+	[LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
+	[LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+	[LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+	[LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
+	[LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_IPV4][LE_NONE][LF_NVGRE] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
+
+	[LC_IPV4_OPT][LE_NONE][LF_NONE] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
+	[LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
+	[LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
+	[LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
+	[LC_IPV4_OPT][LE_NONE][LF_TCP] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
+	[LC_IPV4_OPT][LE_NONE][LF_UDP] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
+	[LC_IPV4_OPT][LE_NONE][LF_GRE] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
+	[LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
+
+	[LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
+	[LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
+				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
+	[LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
+	[LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
+	[LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+	[LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+	[LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
+	[LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
+				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
+				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_IPV6][LE_NONE][LF_NVGRE] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
+	[LC_IPV6_OPT][LE_NONE][LF_NONE] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
+	[LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
+	[LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
+	[LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
+	[LC_IPV6_OPT][LE_NONE][LF_TCP] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
+	[LC_IPV6_OPT][LE_NONE][LF_UDP] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+	[LC_IPV6_OPT][LE_NONE][LF_GRE] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
+	[LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
+
+};
+
 uint16_t
 octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);