[dpdk-dev,35/38] net/dpaa: add support for packet type parsing

Message ID 1497591668-3320-36-git-send-email-shreyansh.jain@nxp.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation success Compilation OK

Commit Message

Shreyansh Jain June 16, 2017, 5:41 a.m. UTC
  Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Signed-off-by: Shreyansh Jain <shreyansh.jain@nxp.com>
---
 doc/guides/nics/features/dpaa.ini |   1 +
 drivers/net/dpaa/dpaa_ethdev.c    |  26 ++++++
 drivers/net/dpaa/dpaa_rxtx.c      | 111 ++++++++++++++++++++++++
 drivers/net/dpaa/dpaa_rxtx.h      | 174 ++++++++++++++++++++++++++++++++++++++
 4 files changed, 312 insertions(+)
  

Comments

Ferruh Yigit June 28, 2017, 3:50 p.m. UTC | #1
On 6/16/2017 6:41 AM, Shreyansh Jain wrote:
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> Signed-off-by: Shreyansh Jain <shreyansh.jain@nxp.com>

<...>

> +static const uint32_t *
> +dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
> +{
> +	static const uint32_t ptypes[] = {
> +		/*todo -= add more types */
> +		RTE_PTYPE_L2_ETHER,
> +		RTE_PTYPE_L3_IPV4,
> +		RTE_PTYPE_L3_IPV4_EXT,
> +		RTE_PTYPE_L3_IPV6,
> +		RTE_PTYPE_L3_IPV6_EXT,
> +		RTE_PTYPE_L4_TCP,
> +		RTE_PTYPE_L4_UDP,
> +		RTE_PTYPE_L4_SCTP
> +	};
> +
> +	PMD_INIT_FUNC_TRACE();
> +
> +	if (dev->rx_pkt_burst == dpaa_eth_queue_rx)

Isn't this only rx function exists? Is this check required?

> +		return ptypes;
> +	return NULL;
> +}
>  
>  static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
>  {
> @@ -159,6 +180,10 @@ static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
>  	dev_info->max_vfs = 0;
>  	dev_info->max_vmdq_pools = ETH_16_POOLS;
>  	dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
> +	dev_info->rx_offload_capa =
> +		(DEV_RX_OFFLOAD_IPV4_CKSUM |
> +		DEV_RX_OFFLOAD_UDP_CKSUM  |
> +		DEV_RX_OFFLOAD_TCP_CKSUM);

I guess this patch also enable L3/L4 Rx checksum offload, can you please
update commit log.

And should ol_flags set with one of the PKT_RX_IP_CKSUM_BAD,
PKT_RX_IP_CKSUM_GOOD, PKT_RX_IP_CKSUM_NONE? Also with L4 versions of these?

<...>

> +
> +	m->tx_offload = annot->parse.ip_off[0];
> +	m->tx_offload |= (annot->parse.l4_off - annot->parse.ip_off[0])
> +					<< DPAA_PKT_L3_LEN_SHIFT;

This is a received mbuf right? Is it required to set tx_offload flag?

> +
> +	/* Set the hash values */
> +	m->hash.rss = (uint32_t)(rte_be_to_cpu_64(annot->hash));> +	m->ol_flags = PKT_RX_RSS_HASH;
> +
> +	/* Check if Vlan is present */
> +	if (prs & DPAA_PARSE_VLAN_MASK)
> +		m->ol_flags |= PKT_RX_VLAN_PKT;

I guess PKT_RX_VLAN_STRIPPED is the preferred flag now.

<...>
  
Shreyansh Jain June 30, 2017, 11:40 a.m. UTC | #2
On Wednesday 28 June 2017 09:20 PM, Ferruh Yigit wrote:
> On 6/16/2017 6:41 AM, Shreyansh Jain wrote:
>> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
>> Signed-off-by: Shreyansh Jain <shreyansh.jain@nxp.com>
> 
> <...>
> 
>> +static const uint32_t *
>> +dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
>> +{
>> +	static const uint32_t ptypes[] = {
>> +		/*todo -= add more types */
>> +		RTE_PTYPE_L2_ETHER,
>> +		RTE_PTYPE_L3_IPV4,
>> +		RTE_PTYPE_L3_IPV4_EXT,
>> +		RTE_PTYPE_L3_IPV6,
>> +		RTE_PTYPE_L3_IPV6_EXT,
>> +		RTE_PTYPE_L4_TCP,
>> +		RTE_PTYPE_L4_UDP,
>> +		RTE_PTYPE_L4_SCTP
>> +	};
>> +
>> +	PMD_INIT_FUNC_TRACE();
>> +
>> +	if (dev->rx_pkt_burst == dpaa_eth_queue_rx)
> 
> Isn't this only rx function exists? Is this check required?

Yes, for now we only have a single function. But, just like other driver, we can add more in near future based on some variation of RX.
In fact, this is more to be in sync with how other drivers implement this function (albeit, we only have a single Rx variant).

> 
>> +		return ptypes;
>> +	return NULL;
>> +}
>>  
>>  static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
>>  {
>> @@ -159,6 +180,10 @@ static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
>>  	dev_info->max_vfs = 0;
>>  	dev_info->max_vmdq_pools = ETH_16_POOLS;
>>  	dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
>> +	dev_info->rx_offload_capa =
>> +		(DEV_RX_OFFLOAD_IPV4_CKSUM |
>> +		DEV_RX_OFFLOAD_UDP_CKSUM  |
>> +		DEV_RX_OFFLOAD_TCP_CKSUM);
> 
> I guess this patch also enable L3/L4 Rx checksum offload, can you please
> update commit log.

Ok. I will do that

> 
> And should ol_flags set with one of the PKT_RX_IP_CKSUM_BAD,
> PKT_RX_IP_CKSUM_GOOD, PKT_RX_IP_CKSUM_NONE? Also with L4 versions of these?

Yes. I will fix that.

> 
> <...>
> 
>> +
>> +	m->tx_offload = annot->parse.ip_off[0];
>> +	m->tx_offload |= (annot->parse.l4_off - annot->parse.ip_off[0])
>> +					<< DPAA_PKT_L3_LEN_SHIFT;
> 
> This is a received mbuf right? Is it required to set tx_offload flag?
> 
>> +
>> +	/* Set the hash values */
>> +	m->hash.rss = (uint32_t)(rte_be_to_cpu_64(annot->hash));> +	m->ol_flags = PKT_RX_RSS_HASH;
>> +
>> +	/* Check if Vlan is present */
>> +	if (prs & DPAA_PARSE_VLAN_MASK)
>> +		m->ol_flags |= PKT_RX_VLAN_PKT;
> 
> I guess PKT_RX_VLAN_STRIPPED is the preferred flag now.
> 
> <...>
> 

I will re-check the above (and fix).
  
Shreyansh Jain July 4, 2017, 12:11 p.m. UTC | #3
On Friday 30 June 2017 05:10 PM, Shreyansh Jain wrote:
> On Wednesday 28 June 2017 09:20 PM, Ferruh Yigit wrote:
>> On 6/16/2017 6:41 AM, Shreyansh Jain wrote:
>>> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
>>> Signed-off-by: Shreyansh Jain <shreyansh.jain@nxp.com>
>>
[...]
>>
>>> +
>>> +	m->tx_offload = annot->parse.ip_off[0];
>>> +	m->tx_offload |= (annot->parse.l4_off - annot->parse.ip_off[0])
>>> +					<< DPAA_PKT_L3_LEN_SHIFT;
>>
>> This is a received mbuf right? Is it required to set tx_offload flag?
>>
[...]

I had not replied to this in my previous response.
DPAA hardware fills parsed information into the annotation (annot) area.
When a packet is Rx'd, it would contain information about where the IP
offset field it. Once we read the packet, the 'annot' area is
overwritten in subsequent cycles.

This packet received may be forwarded, in which case, this information
(preserved in m->tx_offload) would be useful for optimized performance.

Indeed, this is one of the cases, but at least some optimization is
achieved using this.

-
Shreyansh
  

Patch

diff --git a/doc/guides/nics/features/dpaa.ini b/doc/guides/nics/features/dpaa.ini
index adb8458..2e19664 100644
--- a/doc/guides/nics/features/dpaa.ini
+++ b/doc/guides/nics/features/dpaa.ini
@@ -14,6 +14,7 @@  Allmulticast mode    = Y
 Unicast MAC filter   = Y
 RSS hash             = Y
 Flow control         = Y
+Packet type parsing  = Y
 Basic stats          = Y
 ARMv8                = Y
 Usage doc            = Y
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index fa664d8..4d2bae0 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -112,6 +112,27 @@  dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static const uint32_t *
+dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+	static const uint32_t ptypes[] = {
+		/*todo -= add more types */
+		RTE_PTYPE_L2_ETHER,
+		RTE_PTYPE_L3_IPV4,
+		RTE_PTYPE_L3_IPV4_EXT,
+		RTE_PTYPE_L3_IPV6,
+		RTE_PTYPE_L3_IPV6_EXT,
+		RTE_PTYPE_L4_TCP,
+		RTE_PTYPE_L4_UDP,
+		RTE_PTYPE_L4_SCTP
+	};
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (dev->rx_pkt_burst == dpaa_eth_queue_rx)
+		return ptypes;
+	return NULL;
+}
 
 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
 {
@@ -159,6 +180,10 @@  static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_vfs = 0;
 	dev_info->max_vmdq_pools = ETH_16_POOLS;
 	dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
+	dev_info->rx_offload_capa =
+		(DEV_RX_OFFLOAD_IPV4_CKSUM |
+		DEV_RX_OFFLOAD_UDP_CKSUM  |
+		DEV_RX_OFFLOAD_TCP_CKSUM);
 }
 
 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
@@ -465,6 +490,7 @@  static struct eth_dev_ops dpaa_devops = {
 	.dev_stop		  = dpaa_eth_dev_stop,
 	.dev_close		  = dpaa_eth_dev_close,
 	.dev_infos_get		  = dpaa_eth_dev_info,
+	.dev_supported_ptypes_get = dpaa_supported_ptypes_get,
 
 	.rx_queue_setup		  = dpaa_eth_rx_queue_setup,
 	.tx_queue_setup		  = dpaa_eth_tx_queue_setup,
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
index d2ef513..e2db3cc 100644
--- a/drivers/net/dpaa/dpaa_rxtx.c
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -85,6 +85,116 @@ 
 		(_fd)->bpid = _bpid; \
 	} while (0)
 
+static inline void dpaa_slow_parsing(struct rte_mbuf *m __rte_unused,
+				     uint64_t prs __rte_unused)
+{
+	PMD_RX_LOG(DEBUG, " Slow parsing");
+	/*TBD:XXX: to be implemented*/
+}
+
+static inline void dpaa_eth_packet_info(struct rte_mbuf *m,
+					uint64_t fd_virt_addr)
+{
+	struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr);
+	uint64_t prs = *((uint64_t *)(&annot->parse)) & DPAA_PARSE_MASK;
+
+	PMD_RX_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot);
+
+	switch (prs) {
+	case DPAA_PKT_TYPE_NONE:
+		m->packet_type = 0;
+		break;
+	case DPAA_PKT_TYPE_ETHER:
+		m->packet_type = RTE_PTYPE_L2_ETHER;
+		break;
+	case DPAA_PKT_TYPE_IPV4:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV4;
+		break;
+	case DPAA_PKT_TYPE_IPV6:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV6;
+		break;
+	case DPAA_PKT_TYPE_IPV4_FRAG:
+	case DPAA_PKT_TYPE_IPV4_FRAG_UDP:
+	case DPAA_PKT_TYPE_IPV4_FRAG_TCP:
+	case DPAA_PKT_TYPE_IPV4_FRAG_SCTP:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG;
+		break;
+	case DPAA_PKT_TYPE_IPV6_FRAG:
+	case DPAA_PKT_TYPE_IPV6_FRAG_UDP:
+	case DPAA_PKT_TYPE_IPV6_FRAG_TCP:
+	case DPAA_PKT_TYPE_IPV6_FRAG_SCTP:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG;
+		break;
+	case DPAA_PKT_TYPE_IPV4_EXT:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV4_EXT;
+		break;
+	case DPAA_PKT_TYPE_IPV6_EXT:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV6_EXT;
+		break;
+	case DPAA_PKT_TYPE_IPV4_TCP:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
+		break;
+	case DPAA_PKT_TYPE_IPV6_TCP:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
+		break;
+	case DPAA_PKT_TYPE_IPV4_UDP:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
+		break;
+	case DPAA_PKT_TYPE_IPV6_UDP:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
+		break;
+	case DPAA_PKT_TYPE_IPV4_EXT_UDP:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP;
+		break;
+	case DPAA_PKT_TYPE_IPV6_EXT_UDP:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP;
+		break;
+	case DPAA_PKT_TYPE_IPV4_EXT_TCP:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP;
+		break;
+	case DPAA_PKT_TYPE_IPV6_EXT_TCP:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP;
+		break;
+	case DPAA_PKT_TYPE_IPV4_SCTP:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
+		break;
+	case DPAA_PKT_TYPE_IPV6_SCTP:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
+		break;
+	/* More switch cases can be added */
+	default:
+		dpaa_slow_parsing(m, prs);
+	}
+
+	m->tx_offload = annot->parse.ip_off[0];
+	m->tx_offload |= (annot->parse.l4_off - annot->parse.ip_off[0])
+					<< DPAA_PKT_L3_LEN_SHIFT;
+
+	/* Set the hash values */
+	m->hash.rss = (uint32_t)(rte_be_to_cpu_64(annot->hash));
+	m->ol_flags = PKT_RX_RSS_HASH;
+
+	/* Check if Vlan is present */
+	if (prs & DPAA_PARSE_VLAN_MASK)
+		m->ol_flags |= PKT_RX_VLAN_PKT;
+}
+
 static inline struct rte_mbuf *dpaa_eth_fd_to_mbuf(struct qm_fd *fd,
 							uint32_t ifid)
 {
@@ -117,6 +227,7 @@  static inline struct rte_mbuf *dpaa_eth_fd_to_mbuf(struct qm_fd *fd,
 	mbuf->ol_flags = 0;
 	mbuf->next = NULL;
 	rte_mbuf_refcnt_set(mbuf, 1);
+	dpaa_eth_packet_info(mbuf, (uint64_t)mbuf->buf_addr);
 
 	return mbuf;
 }
diff --git a/drivers/net/dpaa/dpaa_rxtx.h b/drivers/net/dpaa/dpaa_rxtx.h
index 09f1aa4..f688934 100644
--- a/drivers/net/dpaa/dpaa_rxtx.h
+++ b/drivers/net/dpaa/dpaa_rxtx.h
@@ -44,6 +44,7 @@ 
 
 #define DPAA_MAX_DEQUEUE_NUM_FRAMES    63
 	/** <Maximum number of frames to be dequeued in a single rx call*/
+
 /* FD structure masks and offset */
 #define DPAA_FD_FORMAT_MASK 0xE0000000
 #define DPAA_FD_OFFSET_MASK 0x1FF00000
@@ -51,6 +52,179 @@ 
 #define DPAA_FD_FORMAT_SHIFT 29
 #define DPAA_FD_OFFSET_SHIFT 20
 
+/* Parsing mask (Little Endian) - 0x00E044ED00800000
+ *	Classification Plan ID 0x00
+ *	L4R 0xE0 -
+ *		0x20 - TCP
+ *		0x40 - UDP
+ *		0x80 - SCTP
+ *	L3R 0xEDC4 (in Big Endian) -
+ *		0x8000 - IPv4
+ *		0x4000 - IPv6
+ *		0x8140 - IPv4 Ext + Frag
+ *		0x8040 - IPv4 Frag
+ *		0x8100 - IPv4 Ext
+ *		0x4140 - IPv6 Ext + Frag
+ *		0x4040 - IPv6 Frag
+ *		0x4100 - IPv6 Ext
+ *	L2R 0x8000 (in Big Endian) -
+ *		0x8000 - Ethernet type
+ *	ShimR & Logical Port ID 0x0000
+ */
+#define DPAA_PARSE_MASK			0x00E044ED00800000
+#define DPAA_PARSE_VLAN_MASK		0x0000000000700000
+
+/* Parsed values (Little Endian) */
+#define DPAA_PKT_TYPE_NONE		0x0000000000000000
+#define DPAA_PKT_TYPE_ETHER		0x0000000000800000
+#define DPAA_PKT_TYPE_IPV4	(0x0000008000000000 | DPAA_PKT_TYPE_ETHER)
+#define DPAA_PKT_TYPE_IPV6	(0x0000004000000000 | DPAA_PKT_TYPE_ETHER)
+#define DPAA_PKT_TYPE_GRE	(0x0000002000000000 | DPAA_PKT_TYPE_ETHER)
+#define DPAA_PKT_TYPE_IPV4_FRAG	(0x0000400000000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_FRAG	(0x0000400000000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_EXT	(0x0000000100000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_EXT	(0x0000000100000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_TCP	(0x0020000000000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_TCP	(0x0020000000000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_UDP	(0x0040000000000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_UDP	(0x0040000000000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_SCTP	(0x0080000000000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_SCTP	(0x0080000000000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_FRAG_TCP (0x0020000000000000 | DPAA_PKT_TYPE_IPV4_FRAG)
+#define DPAA_PKT_TYPE_IPV6_FRAG_TCP (0x0020000000000000 | DPAA_PKT_TYPE_IPV6_FRAG)
+#define DPAA_PKT_TYPE_IPV4_FRAG_UDP (0x0040000000000000 | DPAA_PKT_TYPE_IPV4_FRAG)
+#define DPAA_PKT_TYPE_IPV6_FRAG_UDP (0x0040000000000000 | DPAA_PKT_TYPE_IPV6_FRAG)
+#define DPAA_PKT_TYPE_IPV4_FRAG_SCTP (0x0080000000000000 | DPAA_PKT_TYPE_IPV4_FRAG)
+#define DPAA_PKT_TYPE_IPV6_FRAG_SCTP (0x0080000000000000 | DPAA_PKT_TYPE_IPV6_FRAG)
+#define DPAA_PKT_TYPE_IPV4_EXT_UDP (0x0040000000000000 | DPAA_PKT_TYPE_IPV4_EXT)
+#define DPAA_PKT_TYPE_IPV6_EXT_UDP (0x0040000000000000 | DPAA_PKT_TYPE_IPV6_EXT)
+#define DPAA_PKT_TYPE_IPV4_EXT_TCP (0x0020000000000000 | DPAA_PKT_TYPE_IPV4_EXT)
+#define DPAA_PKT_TYPE_IPV6_EXT_TCP (0x0020000000000000 | DPAA_PKT_TYPE_IPV6_EXT)
+#define DPAA_PKT_TYPE_TUNNEL_4_4 (0x0000000800000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_TUNNEL_6_6 (0x0000000400000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_TUNNEL_4_6 (0x0000000400000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_TUNNEL_6_4 (0x0000000800000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_TUNNEL_4_4_UDP (0x0040000000000000 | DPAA_PKT_TYPE_TUNNEL_4_4)
+#define DPAA_PKT_TYPE_TUNNEL_6_6_UDP (0x0040000000000000 | DPAA_PKT_TYPE_TUNNEL_6_6)
+#define DPAA_PKT_TYPE_TUNNEL_4_6_UDP (0x0040000000000000 | DPAA_PKT_TYPE_TUNNEL_4_6)
+#define DPAA_PKT_TYPE_TUNNEL_6_4_UDP (0x0040000000000000 | DPAA_PKT_TYPE_TUNNEL_6_4)
+#define DPAA_PKT_TYPE_TUNNEL_4_4_TCP (0x0020000000000000 | DPAA_PKT_TYPE_TUNNEL_4_4)
+#define DPAA_PKT_TYPE_TUNNEL_6_6_TCP (0x0020000000000000 | DPAA_PKT_TYPE_TUNNEL_6_6)
+#define DPAA_PKT_TYPE_TUNNEL_4_6_TCP (0x0020000000000000 | DPAA_PKT_TYPE_TUNNEL_4_6)
+#define DPAA_PKT_TYPE_TUNNEL_6_4_TCP (0x0020000000000000 | DPAA_PKT_TYPE_TUNNEL_6_4)
+#define DPAA_PKT_L3_LEN_SHIFT	7
+
+/**
+ * FMan parse result array
+ */
+struct dpaa_eth_parse_results_t {
+	 uint8_t     lpid;		 /**< Logical port id */
+	 uint8_t     shimr;		 /**< Shim header result  */
+	 union {
+		uint16_t              l2r;	/**< Layer 2 result */
+		struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+			uint16_t      ethernet:1;
+			uint16_t      vlan:1;
+			uint16_t      llc_snap:1;
+			uint16_t      mpls:1;
+			uint16_t      ppoe_ppp:1;
+			uint16_t      unused_1:3;
+			uint16_t      unknown_eth_proto:1;
+			uint16_t      eth_frame_type:2;
+			uint16_t      l2r_err:5;
+			/*00-unicast, 01-multicast, 11-broadcast*/
+#else
+			uint16_t      l2r_err:5;
+			uint16_t      eth_frame_type:2;
+			uint16_t      unknown_eth_proto:1;
+			uint16_t      unused_1:3;
+			uint16_t      ppoe_ppp:1;
+			uint16_t      mpls:1;
+			uint16_t      llc_snap:1;
+			uint16_t      vlan:1;
+			uint16_t      ethernet:1;
+#endif
+		}__attribute__((__packed__));
+	 } __attribute__((__packed__));
+	 union {
+		uint16_t              l3r;	/**< Layer 3 result */
+		struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+			uint16_t      first_ipv4:1;
+			uint16_t      first_ipv6:1;
+			uint16_t      gre:1;
+			uint16_t      min_enc:1;
+			uint16_t      last_ipv4:1;
+			uint16_t      last_ipv6:1;
+			uint16_t      first_info_err:1;/*0 info, 1 error*/
+			uint16_t      first_ip_err_code:5;
+			uint16_t      last_info_err:1;	/*0 info, 1 error*/
+			uint16_t      last_ip_err_code:3;
+#else
+			uint16_t      last_ip_err_code:3;
+			uint16_t      last_info_err:1;	/*0 info, 1 error*/
+			uint16_t      first_ip_err_code:5;
+			uint16_t      first_info_err:1;/*0 info, 1 error*/
+			uint16_t      last_ipv6:1;
+			uint16_t      last_ipv4:1;
+			uint16_t      min_enc:1;
+			uint16_t      gre:1;
+			uint16_t      first_ipv6:1;
+			uint16_t      first_ipv4:1;
+#endif
+#define first_ip_option        first_ip_err_code & 0x01
+#define first_unknown_ip_proto first_ip_err_code & 0x02
+#define first_fragmented       first_ip_err_code & 0x04
+#define first_ip_type          first_ip_err_code & 0x18
+
+		}__attribute__((__packed__));
+	 } __attribute__((__packed__));
+	 union {
+		uint8_t               l4r;	/**< Layer 4 result */
+		struct{
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+			uint8_t	       l4_type:3;
+			uint8_t	       l4_info_err:1;
+			uint8_t	       l4_result:4; /*if type IPSec: 1 ESP, 2 AH*/
+#else
+			uint8_t        l4_result:4; /*if type IPSec: 1 ESP, 2 AH*/
+			uint8_t        l4_info_err:1;
+			uint8_t        l4_type:3;
+#endif
+		} __attribute__((__packed__));
+	 } __attribute__((__packed__));
+	 uint8_t     cplan;		 /**< Classification plan id */
+	 uint16_t    nxthdr;		 /**< Next Header  */
+	 uint16_t    cksum;		 /**< Checksum */
+	 uint32_t    lcv;		 /**< LCV */
+	 uint8_t     shim_off[3];	 /**< Shim offset */
+	 uint8_t     eth_off;		 /**< ETH offset */
+	 uint8_t     llc_snap_off;	 /**< LLC_SNAP offset */
+	 uint8_t     vlan_off[2];	 /**< VLAN offset */
+	 uint8_t     etype_off;		 /**< ETYPE offset */
+	 uint8_t     pppoe_off;		 /**< PPP offset */
+	 uint8_t     mpls_off[2];	 /**< MPLS offset */
+	 uint8_t     ip_off[2];		 /**< IP offset */
+	 uint8_t     gre_off;		 /**< GRE offset */
+	 uint8_t     l4_off;		 /**< Layer 4 offset */
+	 uint8_t     nxthdr_off;	 /**< Parser end point */
+} __attribute__ ((__packed__));
+
+/* The structure is the Prepended Data to the Frame which is used by FMAN */
+struct annotations_t {
+	uint8_t reserved[DEFAULT_RX_ICEOF];
+	struct dpaa_eth_parse_results_t parse;	/**< Pointer to Parsed result*/
+	uint64_t reserved1;
+	uint64_t hash;			/**< Hash Result */
+};
+
+#define GET_ANNOTATIONS(_buf) \
+	(struct annotations_t *)(_buf)
+
+#define GET_RX_PRS(_buf) \
+	(struct dpaa_eth_parse_results_t *)((uint8_t *)_buf + DEFAULT_RX_ICEOF)
+
 uint16_t dpaa_eth_queue_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs);
 
 uint16_t dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs);