[v4] vhost: check header for legacy dequeue offload

Message ID 20210615063507.18198-1-xiao.w.wang@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series [v4] vhost: check header for legacy dequeue offload |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-abi-testing success Testing PASS
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/iol-testing success Testing PASS
ci/github-robot success github build: passed
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-mellanox-Functional fail Functional Testing issues

Commit Message

Xiao Wang June 15, 2021, 6:35 a.m. UTC
  When parsing the virtio net header and packet header for dequeue offload,
we need to perform sanity check on the packet header to ensure:
  - No out-of-boundary memory access.
  - The packet header and virtio_net header are valid and aligned.

Fixes: d0cf91303d73 ("vhost: add Tx offload capabilities")
Cc: stable@dpdk.org

Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
---
v4:
- Rebase on head of main branch.
- Allow empty L4 payload in GSO.

v3:
- Check data_len before calling rte_pktmbuf_mtod. (David)

v2:
- Allow empty L4 payload for cksum offload. (Konstantin)
---
 lib/vhost/virtio_net.c | 52 +++++++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 45 insertions(+), 7 deletions(-)
  

Comments

David Marchand June 15, 2021, 7:57 a.m. UTC | #1
On Tue, Jun 15, 2021 at 9:06 AM Xiao Wang <xiao.w.wang@intel.com> wrote:
> diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
> index 8da8a86a10..351ff0a841 100644
> --- a/lib/vhost/virtio_net.c
> +++ b/lib/vhost/virtio_net.c
> @@ -2259,44 +2259,64 @@ virtio_net_with_host_offload(struct virtio_net *dev)
>         return false;
>  }
>
> -static void
> -parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
> +static int
> +parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr,
> +               uint16_t *len)
>  {


This function name is misleading, name could be parse_headers().
Its semantic gets more and more confusing with those l4_hdr and len pointers.

This function fills ->lX_len in the mbuf, everything is available for caller.

Caller can check that rte_pktmbuf_data_len() is >= m->l2_len +
m->l3_len + somesize.
=> no need for len.

l4_hdr can simply be deduced with rte_pktmbuf_mtod_offset(m, struct
somestruct *, m->l2_len + m->l3_len).
=> no need for l4_hdr.


>         struct rte_ipv4_hdr *ipv4_hdr;
>         struct rte_ipv6_hdr *ipv6_hdr;
>         void *l3_hdr = NULL;

No need for l3_hdr.


>         struct rte_ether_hdr *eth_hdr;
>         uint16_t ethertype;
> +       uint16_t data_len = m->data_len;

Avoid direct access to mbuf internals, we have inline helpers:
rte_pktmbuf_data_len(m).


> +
> +       if (data_len <= sizeof(struct rte_ether_hdr))

Strictly speaking, < is enough.


> +               return -EINVAL;
>
>         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
>
>         m->l2_len = sizeof(struct rte_ether_hdr);
>         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
> +       data_len -= sizeof(struct rte_ether_hdr);

No need to decrement data_len if checks below are all done for absolute value.
See suggestions below.


>
>         if (ethertype == RTE_ETHER_TYPE_VLAN) {
> +               if (data_len <= sizeof(struct rte_vlan_hdr))
> +                       return -EINVAL;

if (data_len < sizeof(rte_ether_hdr) + sizeof(struct rte_vlan_hdr))


> +
>                 struct rte_vlan_hdr *vlan_hdr =
>                         (struct rte_vlan_hdr *)(eth_hdr + 1);
>
>                 m->l2_len += sizeof(struct rte_vlan_hdr);
>                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
> +               data_len -= sizeof(struct rte_vlan_hdr);

Idem.


>         }
>
>         l3_hdr = (char *)eth_hdr + m->l2_len;
>
>         switch (ethertype) {
>         case RTE_ETHER_TYPE_IPV4:
> +               if (data_len <= sizeof(struct rte_ipv4_hdr))
> +                       return -EINVAL;

if (data_len < m->l2_len + sizeof(struct rte_ipv4_hdr))


>                 ipv4_hdr = l3_hdr;

ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, m->l2_len);


>                 *l4_proto = ipv4_hdr->next_proto_id;
>                 m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
> +               if (data_len <= m->l3_len) {

if (data_len < m->l2_len + m->l3_len)


> +                       m->l3_len = 0;
> +                       return -EINVAL;

Returning here leaves m->l2_len set.


> +               }
>                 *l4_hdr = (char *)l3_hdr + m->l3_len;
>                 m->ol_flags |= PKT_TX_IPV4;
> +               data_len -= m->l3_len;
>                 break;
>         case RTE_ETHER_TYPE_IPV6:
> +               if (data_len <= sizeof(struct rte_ipv6_hdr))
> +                       return -EINVAL;

if (data_len < m->l2_len + sizeof(struct rte_ipv6_hdr))
Returning here leaves m->l2_len set.


>                 ipv6_hdr = l3_hdr;

ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, m->l2_len);


>                 *l4_proto = ipv6_hdr->proto;
>                 m->l3_len = sizeof(struct rte_ipv6_hdr);
>                 *l4_hdr = (char *)l3_hdr + m->l3_len;
>                 m->ol_flags |= PKT_TX_IPV6;
> +               data_len -= m->l3_len;
>                 break;
>         default:
>                 m->l3_len = 0;
> @@ -2304,6 +2324,9 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
>                 *l4_hdr = NULL;
>                 break;
>         }
> +
> +       *len = data_len;
> +       return 0;
>  }
>
>  static __rte_always_inline void
> @@ -2312,21 +2335,27 @@ vhost_dequeue_offload_legacy(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
>         uint16_t l4_proto = 0;
>         void *l4_hdr = NULL;
>         struct rte_tcp_hdr *tcp_hdr = NULL;
> +       uint16_t len = 0, tcp_len;
> +
> +       if (parse_ethernet(m, &l4_proto, &l4_hdr, &len) < 0)
> +               return;
>
> -       parse_ethernet(m, &l4_proto, &l4_hdr);
>         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
>                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
>                         switch (hdr->csum_offset) {
>                         case (offsetof(struct rte_tcp_hdr, cksum)):
> -                               if (l4_proto == IPPROTO_TCP)
> +                               if (l4_proto == IPPROTO_TCP &&
> +                                       len >= sizeof(struct rte_tcp_hdr))

if (rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len + sizeof(struct
rte_tcp_hdr))
Then, if this check is wrong, we leave l2_len, l3_len + PKT_TX_IPVx
flag set in mbuf.

These two comments apply to other updates below.

>                                         m->ol_flags |= PKT_TX_TCP_CKSUM;
>                                 break;
>                         case (offsetof(struct rte_udp_hdr, dgram_cksum)):
> -                               if (l4_proto == IPPROTO_UDP)
> +                               if (l4_proto == IPPROTO_UDP &&
> +                                       len >= sizeof(struct rte_udp_hdr))
>                                         m->ol_flags |= PKT_TX_UDP_CKSUM;
>                                 break;
>                         case (offsetof(struct rte_sctp_hdr, cksum)):
> -                               if (l4_proto == IPPROTO_SCTP)
> +                               if (l4_proto == IPPROTO_SCTP &&
> +                                       len >= sizeof(struct rte_sctp_hdr))
>                                         m->ol_flags |= PKT_TX_SCTP_CKSUM;
>                                 break;
>                         default:
> @@ -2339,12 +2368,21 @@ vhost_dequeue_offload_legacy(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
>                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
>                 case VIRTIO_NET_HDR_GSO_TCPV4:
>                 case VIRTIO_NET_HDR_GSO_TCPV6:
> +                       if (l4_proto != IPPROTO_TCP ||
> +                               len < sizeof(struct rte_tcp_hdr))
> +                               break;
>                         tcp_hdr = l4_hdr;

tcp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *, m->l2_len +
m->l3_len);



> +                       tcp_len = (tcp_hdr->data_off & 0xf0) >> 2;
> +                       if (len < tcp_len)
> +                               break;
>                         m->ol_flags |= PKT_TX_TCP_SEG;
>                         m->tso_segsz = hdr->gso_size;
> -                       m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
> +                       m->l4_len = tcp_len;
>                         break;
>                 case VIRTIO_NET_HDR_GSO_UDP:
> +                       if (l4_proto != IPPROTO_UDP ||
> +                               len < sizeof(struct rte_udp_hdr))
> +                               break;
>                         m->ol_flags |= PKT_TX_UDP_SEG;
>                         m->tso_segsz = hdr->gso_size;
>                         m->l4_len = sizeof(struct rte_udp_hdr);
> --
> 2.15.1
>
  
Xiao Wang June 16, 2021, 2:33 p.m. UTC | #2
Hi David,

Thanks for your comments.
I agree with your suggestions. BTW, I notice some other invalid corner cases which need rolling back mbuf->l2_len, l3_len and ol_flag.
E.g. the default case in the "switch {}" context is not valid.
BTW, l4_proto variable is better to be a uint8_t, rather than uint16_t.

I will prepare a new version.

BRs,
Xiao

> -----Original Message-----
> From: David Marchand <david.marchand@redhat.com>
> Sent: Tuesday, June 15, 2021 3:57 PM
> To: Wang, Xiao W <xiao.w.wang@intel.com>
> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>; Xia, Chenbo
> <chenbo.xia@intel.com>; Jiang, Cheng1 <cheng1.jiang@intel.com>; dev
> <dev@dpdk.org>; dpdk stable <stable@dpdk.org>
> Subject: Re: [dpdk-dev] [PATCH v4] vhost: check header for legacy dequeue
> offload
> 
> On Tue, Jun 15, 2021 at 9:06 AM Xiao Wang <xiao.w.wang@intel.com>
> wrote:
> > diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
> > index 8da8a86a10..351ff0a841 100644
> > --- a/lib/vhost/virtio_net.c
> > +++ b/lib/vhost/virtio_net.c
> > @@ -2259,44 +2259,64 @@ virtio_net_with_host_offload(struct
> virtio_net *dev)
> >         return false;
> >  }
> >
> > -static void
> > -parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
> > +static int
> > +parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr,
> > +               uint16_t *len)
> >  {
> 
> 
> This function name is misleading, name could be parse_headers().
> Its semantic gets more and more confusing with those l4_hdr and len
> pointers.
> 
> This function fills ->lX_len in the mbuf, everything is available for caller.
> 
> Caller can check that rte_pktmbuf_data_len() is >= m->l2_len +
> m->l3_len + somesize.
> => no need for len.
> 
> l4_hdr can simply be deduced with rte_pktmbuf_mtod_offset(m, struct
> somestruct *, m->l2_len + m->l3_len).
> => no need for l4_hdr.
> 
> 
> >         struct rte_ipv4_hdr *ipv4_hdr;
> >         struct rte_ipv6_hdr *ipv6_hdr;
> >         void *l3_hdr = NULL;
> 
> No need for l3_hdr.
> 
> 
> >         struct rte_ether_hdr *eth_hdr;
> >         uint16_t ethertype;
> > +       uint16_t data_len = m->data_len;
> 
> Avoid direct access to mbuf internals, we have inline helpers:
> rte_pktmbuf_data_len(m).
> 
> 
> > +
> > +       if (data_len <= sizeof(struct rte_ether_hdr))
> 
> Strictly speaking, < is enough.
> 
> 
> > +               return -EINVAL;
> >
> >         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
> >
> >         m->l2_len = sizeof(struct rte_ether_hdr);
> >         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
> > +       data_len -= sizeof(struct rte_ether_hdr);
> 
> No need to decrement data_len if checks below are all done for absolute
> value.
> See suggestions below.
> 
> 
> >
> >         if (ethertype == RTE_ETHER_TYPE_VLAN) {
> > +               if (data_len <= sizeof(struct rte_vlan_hdr))
> > +                       return -EINVAL;
> 
> if (data_len < sizeof(rte_ether_hdr) + sizeof(struct rte_vlan_hdr))
> 
> 
> > +
> >                 struct rte_vlan_hdr *vlan_hdr =
> >                         (struct rte_vlan_hdr *)(eth_hdr + 1);
> >
> >                 m->l2_len += sizeof(struct rte_vlan_hdr);
> >                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
> > +               data_len -= sizeof(struct rte_vlan_hdr);
> 
> Idem.
> 
> 
> >         }
> >
> >         l3_hdr = (char *)eth_hdr + m->l2_len;
> >
> >         switch (ethertype) {
> >         case RTE_ETHER_TYPE_IPV4:
> > +               if (data_len <= sizeof(struct rte_ipv4_hdr))
> > +                       return -EINVAL;
> 
> if (data_len < m->l2_len + sizeof(struct rte_ipv4_hdr))
> 
> 
> >                 ipv4_hdr = l3_hdr;
> 
> ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, m->l2_len);
> 
> 
> >                 *l4_proto = ipv4_hdr->next_proto_id;
> >                 m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
> > +               if (data_len <= m->l3_len) {
> 
> if (data_len < m->l2_len + m->l3_len)
> 
> 
> > +                       m->l3_len = 0;
> > +                       return -EINVAL;
> 
> Returning here leaves m->l2_len set.
> 
> 
> > +               }
> >                 *l4_hdr = (char *)l3_hdr + m->l3_len;
> >                 m->ol_flags |= PKT_TX_IPV4;
> > +               data_len -= m->l3_len;
> >                 break;
> >         case RTE_ETHER_TYPE_IPV6:
> > +               if (data_len <= sizeof(struct rte_ipv6_hdr))
> > +                       return -EINVAL;
> 
> if (data_len < m->l2_len + sizeof(struct rte_ipv6_hdr))
> Returning here leaves m->l2_len set.
> 
> 
> >                 ipv6_hdr = l3_hdr;
> 
> ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, m->l2_len);
> 
> 
> >                 *l4_proto = ipv6_hdr->proto;
> >                 m->l3_len = sizeof(struct rte_ipv6_hdr);
> >                 *l4_hdr = (char *)l3_hdr + m->l3_len;
> >                 m->ol_flags |= PKT_TX_IPV6;
> > +               data_len -= m->l3_len;
> >                 break;
> >         default:
> >                 m->l3_len = 0;
> > @@ -2304,6 +2324,9 @@ parse_ethernet(struct rte_mbuf *m, uint16_t
> *l4_proto, void **l4_hdr)
> >                 *l4_hdr = NULL;
> >                 break;
> >         }
> > +
> > +       *len = data_len;
> > +       return 0;
> >  }
> >
> >  static __rte_always_inline void
> > @@ -2312,21 +2335,27 @@ vhost_dequeue_offload_legacy(struct
> virtio_net_hdr *hdr, struct rte_mbuf *m)
> >         uint16_t l4_proto = 0;
> >         void *l4_hdr = NULL;
> >         struct rte_tcp_hdr *tcp_hdr = NULL;
> > +       uint16_t len = 0, tcp_len;
> > +
> > +       if (parse_ethernet(m, &l4_proto, &l4_hdr, &len) < 0)
> > +               return;
> >
> > -       parse_ethernet(m, &l4_proto, &l4_hdr);
> >         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
> >                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
> >                         switch (hdr->csum_offset) {
> >                         case (offsetof(struct rte_tcp_hdr, cksum)):
> > -                               if (l4_proto == IPPROTO_TCP)
> > +                               if (l4_proto == IPPROTO_TCP &&
> > +                                       len >= sizeof(struct rte_tcp_hdr))
> 
> if (rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len + sizeof(struct
> rte_tcp_hdr))
> Then, if this check is wrong, we leave l2_len, l3_len + PKT_TX_IPVx
> flag set in mbuf.
> 
> These two comments apply to other updates below.
> 
> >                                         m->ol_flags |= PKT_TX_TCP_CKSUM;
> >                                 break;
> >                         case (offsetof(struct rte_udp_hdr, dgram_cksum)):
> > -                               if (l4_proto == IPPROTO_UDP)
> > +                               if (l4_proto == IPPROTO_UDP &&
> > +                                       len >= sizeof(struct rte_udp_hdr))
> >                                         m->ol_flags |= PKT_TX_UDP_CKSUM;
> >                                 break;
> >                         case (offsetof(struct rte_sctp_hdr, cksum)):
> > -                               if (l4_proto == IPPROTO_SCTP)
> > +                               if (l4_proto == IPPROTO_SCTP &&
> > +                                       len >= sizeof(struct rte_sctp_hdr))
> >                                         m->ol_flags |= PKT_TX_SCTP_CKSUM;
> >                                 break;
> >                         default:
> > @@ -2339,12 +2368,21 @@ vhost_dequeue_offload_legacy(struct
> virtio_net_hdr *hdr, struct rte_mbuf *m)
> >                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
> >                 case VIRTIO_NET_HDR_GSO_TCPV4:
> >                 case VIRTIO_NET_HDR_GSO_TCPV6:
> > +                       if (l4_proto != IPPROTO_TCP ||
> > +                               len < sizeof(struct rte_tcp_hdr))
> > +                               break;
> >                         tcp_hdr = l4_hdr;
> 
> tcp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *, m->l2_len +
> m->l3_len);
> 
> 
> 
> > +                       tcp_len = (tcp_hdr->data_off & 0xf0) >> 2;
> > +                       if (len < tcp_len)
> > +                               break;
> >                         m->ol_flags |= PKT_TX_TCP_SEG;
> >                         m->tso_segsz = hdr->gso_size;
> > -                       m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
> > +                       m->l4_len = tcp_len;
> >                         break;
> >                 case VIRTIO_NET_HDR_GSO_UDP:
> > +                       if (l4_proto != IPPROTO_UDP ||
> > +                               len < sizeof(struct rte_udp_hdr))
> > +                               break;
> >                         m->ol_flags |= PKT_TX_UDP_SEG;
> >                         m->tso_segsz = hdr->gso_size;
> >                         m->l4_len = sizeof(struct rte_udp_hdr);
> > --
> > 2.15.1
> >
> 
> 
> --
> David Marchand
  

Patch

diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 8da8a86a10..351ff0a841 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -2259,44 +2259,64 @@  virtio_net_with_host_offload(struct virtio_net *dev)
 	return false;
 }
 
-static void
-parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
+static int
+parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr,
+		uint16_t *len)
 {
 	struct rte_ipv4_hdr *ipv4_hdr;
 	struct rte_ipv6_hdr *ipv6_hdr;
 	void *l3_hdr = NULL;
 	struct rte_ether_hdr *eth_hdr;
 	uint16_t ethertype;
+	uint16_t data_len = m->data_len;
+
+	if (data_len <= sizeof(struct rte_ether_hdr))
+		return -EINVAL;
 
 	eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
 
 	m->l2_len = sizeof(struct rte_ether_hdr);
 	ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
+	data_len -= sizeof(struct rte_ether_hdr);
 
 	if (ethertype == RTE_ETHER_TYPE_VLAN) {
+		if (data_len <= sizeof(struct rte_vlan_hdr))
+			return -EINVAL;
+
 		struct rte_vlan_hdr *vlan_hdr =
 			(struct rte_vlan_hdr *)(eth_hdr + 1);
 
 		m->l2_len += sizeof(struct rte_vlan_hdr);
 		ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
+		data_len -= sizeof(struct rte_vlan_hdr);
 	}
 
 	l3_hdr = (char *)eth_hdr + m->l2_len;
 
 	switch (ethertype) {
 	case RTE_ETHER_TYPE_IPV4:
+		if (data_len <= sizeof(struct rte_ipv4_hdr))
+			return -EINVAL;
 		ipv4_hdr = l3_hdr;
 		*l4_proto = ipv4_hdr->next_proto_id;
 		m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
+		if (data_len <= m->l3_len) {
+			m->l3_len = 0;
+			return -EINVAL;
+		}
 		*l4_hdr = (char *)l3_hdr + m->l3_len;
 		m->ol_flags |= PKT_TX_IPV4;
+		data_len -= m->l3_len;
 		break;
 	case RTE_ETHER_TYPE_IPV6:
+		if (data_len <= sizeof(struct rte_ipv6_hdr))
+			return -EINVAL;
 		ipv6_hdr = l3_hdr;
 		*l4_proto = ipv6_hdr->proto;
 		m->l3_len = sizeof(struct rte_ipv6_hdr);
 		*l4_hdr = (char *)l3_hdr + m->l3_len;
 		m->ol_flags |= PKT_TX_IPV6;
+		data_len -= m->l3_len;
 		break;
 	default:
 		m->l3_len = 0;
@@ -2304,6 +2324,9 @@  parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
 		*l4_hdr = NULL;
 		break;
 	}
+
+	*len = data_len;
+	return 0;
 }
 
 static __rte_always_inline void
@@ -2312,21 +2335,27 @@  vhost_dequeue_offload_legacy(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
 	uint16_t l4_proto = 0;
 	void *l4_hdr = NULL;
 	struct rte_tcp_hdr *tcp_hdr = NULL;
+	uint16_t len = 0, tcp_len;
+
+	if (parse_ethernet(m, &l4_proto, &l4_hdr, &len) < 0)
+		return;
 
-	parse_ethernet(m, &l4_proto, &l4_hdr);
 	if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
 		if (hdr->csum_start == (m->l2_len + m->l3_len)) {
 			switch (hdr->csum_offset) {
 			case (offsetof(struct rte_tcp_hdr, cksum)):
-				if (l4_proto == IPPROTO_TCP)
+				if (l4_proto == IPPROTO_TCP &&
+					len >= sizeof(struct rte_tcp_hdr))
 					m->ol_flags |= PKT_TX_TCP_CKSUM;
 				break;
 			case (offsetof(struct rte_udp_hdr, dgram_cksum)):
-				if (l4_proto == IPPROTO_UDP)
+				if (l4_proto == IPPROTO_UDP &&
+					len >= sizeof(struct rte_udp_hdr))
 					m->ol_flags |= PKT_TX_UDP_CKSUM;
 				break;
 			case (offsetof(struct rte_sctp_hdr, cksum)):
-				if (l4_proto == IPPROTO_SCTP)
+				if (l4_proto == IPPROTO_SCTP &&
+					len >= sizeof(struct rte_sctp_hdr))
 					m->ol_flags |= PKT_TX_SCTP_CKSUM;
 				break;
 			default:
@@ -2339,12 +2368,21 @@  vhost_dequeue_offload_legacy(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
 		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
 		case VIRTIO_NET_HDR_GSO_TCPV4:
 		case VIRTIO_NET_HDR_GSO_TCPV6:
+			if (l4_proto != IPPROTO_TCP ||
+				len < sizeof(struct rte_tcp_hdr))
+				break;
 			tcp_hdr = l4_hdr;
+			tcp_len = (tcp_hdr->data_off & 0xf0) >> 2;
+			if (len < tcp_len)
+				break;
 			m->ol_flags |= PKT_TX_TCP_SEG;
 			m->tso_segsz = hdr->gso_size;
-			m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
+			m->l4_len = tcp_len;
 			break;
 		case VIRTIO_NET_HDR_GSO_UDP:
+			if (l4_proto != IPPROTO_UDP ||
+				len < sizeof(struct rte_udp_hdr))
+				break;
 			m->ol_flags |= PKT_TX_UDP_SEG;
 			m->tso_segsz = hdr->gso_size;
 			m->l4_len = sizeof(struct rte_udp_hdr);