[dpdk-dev] [PATCH 11/14] examples/ipsec-secgw: add app processing code

Ananyev, Konstantin konstantin.ananyev at intel.com
Tue Dec 24 14:13:06 CET 2019


> --- a/examples/ipsec-secgw/ipsec_worker.c
> +++ b/examples/ipsec-secgw/ipsec_worker.c
> @@ -15,6 +15,7 @@
>  #include <ctype.h>
>  #include <stdbool.h>
> 
> +#include <rte_acl.h>
>  #include <rte_common.h>
>  #include <rte_log.h>
>  #include <rte_memcpy.h>
> @@ -29,12 +30,51 @@
>  #include <rte_eventdev.h>
>  #include <rte_malloc.h>
>  #include <rte_mbuf.h>
> +#include <rte_lpm.h>
> +#include <rte_lpm6.h>
> 
>  #include "ipsec.h"
> +#include "ipsec_worker.h"
>  #include "event_helper.h"
> 
>  extern volatile bool force_quit;
> 
> +static inline enum pkt_type
> +process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
> +{
> +	struct rte_ether_hdr *eth;
> +
> +	eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
> +	if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
> +		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
> +				offsetof(struct ip, ip_p));
> +		if (**nlp == IPPROTO_ESP)
> +			return PKT_TYPE_IPSEC_IPV4;
> +		else
> +			return PKT_TYPE_PLAIN_IPV4;
> +	} else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
> +		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
> +				offsetof(struct ip6_hdr, ip6_nxt));
> +		if (**nlp == IPPROTO_ESP)
> +			return PKT_TYPE_IPSEC_IPV6;
> +		else
> +			return PKT_TYPE_PLAIN_IPV6;
> +	}
> +
> +	/* Unknown/Unsupported type */
> +	return PKT_TYPE_INVALID;
> +}

Looking though that file, it seems like you choose to create your own set of
helper functions, instead of trying to reuse existing ones: 

process_ipsec_get_pkt_type()  VS prepare_one_packet()
update_mac_addrs() VS prepare_tx_pkt()
check_sp() VS  inbound_sp_sa()

Obviously there is nothing good in code (and possible bugs) duplication.
Any reason why you can't reuse existing functions and need to reinvent your own?
 

> +
> +static inline void
> +update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid)
> +{
> +	struct rte_ether_hdr *ethhdr;
> +
> +	ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
> +	memcpy(&ethhdr->s_addr, &ethaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);
> +	memcpy(&ethhdr->d_addr, &ethaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);
> +}
> +
>  static inline void
>  ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
>  {
> @@ -45,6 +85,177 @@ ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
>  	rte_event_eth_tx_adapter_txq_set(m, 0);
>  }
> 
> +static inline int
> +check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)
> +{
> +	uint32_t res;
> +
> +	if (unlikely(sp == NULL))
> +		return 0;
> +
> +	rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
> +			DEFAULT_MAX_CATEGORIES);
> +
> +	if (unlikely(res == 0)) {
> +		/* No match */
> +		return 0;
> +	}
> +
> +	if (res == DISCARD)
> +		return 0;
> +	else if (res == BYPASS) {
> +		*sa_idx = 0;
> +		return 1;
> +	}
> +
> +	*sa_idx = SPI2IDX(res);
> +	if (*sa_idx < IPSEC_SA_MAX_ENTRIES)
> +		return 1;
> +
> +	/* Invalid SA IDX */
> +	return 0;
> +}
> +
> +static inline uint16_t
> +route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
> +{
> +	uint32_t dst_ip;
> +	uint16_t offset;
> +	uint32_t hop;
> +	int ret;
> +
> +	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
> +	dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
> +	dst_ip = rte_be_to_cpu_32(dst_ip);
> +
> +	ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
> +
> +	if (ret == 0) {
> +		/* We have a hit */
> +		return hop;
> +	}
> +
> +	/* else */
> +	return RTE_MAX_ETHPORTS;
> +}
> +
> +/* TODO: To be tested */
> +static inline uint16_t
> +route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
> +{
> +	uint8_t dst_ip[16];
> +	uint8_t *ip6_dst;
> +	uint16_t offset;
> +	uint32_t hop;
> +	int ret;
> +
> +	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
> +	ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
> +	memcpy(&dst_ip[0], ip6_dst, 16);
> +
> +	ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
> +
> +	if (ret == 0) {
> +		/* We have a hit */
> +		return hop;
> +	}
> +
> +	/* else */
> +	return RTE_MAX_ETHPORTS;
> +}
> +
> +static inline uint16_t
> +get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
> +{
> +	if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
> +		return route4_pkt(pkt, rt->rt4_ctx);
> +	else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
> +		return route6_pkt(pkt, rt->rt6_ctx);
> +
> +	return RTE_MAX_ETHPORTS;
> +}
> +
> +static inline int
> +process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
> +		struct rte_event *ev)
> +{
> +	struct ipsec_sa *sa = NULL;
> +	struct rte_mbuf *pkt;
> +	uint16_t port_id = 0;
> +	enum pkt_type type;
> +	uint32_t sa_idx;
> +	uint8_t *nlp;
> +
> +	/* Get pkt from event */
> +	pkt = ev->mbuf;
> +
> +	/* Check the packet type */
> +	type = process_ipsec_get_pkt_type(pkt, &nlp);
> +
> +	switch (type) {
> +	case PKT_TYPE_PLAIN_IPV4:
> +		if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD)
> +			sa = (struct ipsec_sa *) pkt->udata64;
> +
> +		/* Check if we have a match */
> +		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
> +			/* No valid match */
> +			goto drop_pkt_and_exit;
> +		}
> +		break;
> +
> +	case PKT_TYPE_PLAIN_IPV6:
> +		if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD)
> +			sa = (struct ipsec_sa *) pkt->udata64;
> +
> +		/* Check if we have a match */
> +		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
> +			/* No valid match */
> +			goto drop_pkt_and_exit;
> +		}
> +		break;
> +
> +	default:
> +		RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
> +		goto drop_pkt_and_exit;
> +	}
> +
> +	/* Check if the packet has to be bypassed */
> +	if (sa_idx == 0)
> +		goto route_and_send_pkt;
> +
> +	/* Else the packet has to be protected with SA */
> +
> +	/* If the packet was IPsec processed, then SA pointer should be set */
> +	if (sa == NULL)
> +		goto drop_pkt_and_exit;
> +
> +	/* SPI on the packet should match with the one in SA */
> +	if (unlikely(sa->spi != sa_idx))
> +		goto drop_pkt_and_exit;
> +
> +route_and_send_pkt:
> +	port_id = get_route(pkt, rt, type);
> +	if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
> +		/* no match */
> +		goto drop_pkt_and_exit;
> +	}
> +	/* else, we have a matching route */
> +
> +	/* Update mac addresses */
> +	update_mac_addrs(pkt, port_id);
> +
> +	/* Update the event with the dest port */
> +	ipsec_event_pre_forward(pkt, port_id);
> +	return 1;
> +
> +drop_pkt_and_exit:
> +	RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
> +	rte_pktmbuf_free(pkt);
> +	ev->mbuf = NULL;
> +	return 0;
> +}
> +
>  /*
>   * Event mode exposes various operating modes depending on the
>   * capabilities of the event device and the operating mode
> @@ -134,11 +345,11 @@ static void
>  ipsec_wrkr_non_burst_int_port_app_mode_inb(struct eh_event_link_info *links,
>  		uint8_t nb_links)
>  {
> +	struct lcore_conf_ev_tx_int_port_wrkr lconf;
>  	unsigned int nb_rx = 0;
> -	unsigned int port_id;
> -	struct rte_mbuf *pkt;
>  	struct rte_event ev;
>  	uint32_t lcore_id;
> +	int32_t socket_id;
> 
>  	/* Check if we have links registered for this lcore */
>  	if (nb_links == 0) {
> @@ -151,6 +362,21 @@ ipsec_wrkr_non_burst_int_port_app_mode_inb(struct eh_event_link_info *links,
>  	/* Get core ID */
>  	lcore_id = rte_lcore_id();
> 
> +	/* Get socket ID */
> +	socket_id = rte_lcore_to_socket_id(lcore_id);
> +
> +	/* Save routing table */
> +	lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
> +	lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
> +	lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
> +	lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
> +	lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
> +	lconf.inbound.session_pool = socket_ctx[socket_id].session_pool;
> +	lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
> +	lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
> +	lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
> +	lconf.outbound.session_pool = socket_ctx[socket_id].session_pool;
> +
>  	RTE_LOG(INFO, IPSEC,
>  		"Launching event mode worker (non-burst - Tx internal port - "
>  		"app mode - inbound) on lcore %d\n", lcore_id);
> @@ -175,13 +401,11 @@ ipsec_wrkr_non_burst_int_port_app_mode_inb(struct eh_event_link_info *links,
>  		if (nb_rx == 0)
>  			continue;
> 
> -		port_id = ev.queue_id;
> -		pkt = ev.mbuf;
> -
> -		rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
> -
> -		/* Process packet */
> -		ipsec_event_pre_forward(pkt, port_id);
> +		if (process_ipsec_ev_inbound(&lconf.inbound,
> +				&lconf.rt, &ev) != 1) {
> +			/* The pkt has been dropped */
> +			continue;
> +		}
> 
>  		/*
>  		 * Since tx internal port is available, events can be
> diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
> new file mode 100644
> index 0000000..fd18a2e
> --- /dev/null
> +++ b/examples/ipsec-secgw/ipsec_worker.h
> @@ -0,0 +1,39 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2018 Cavium, Inc
> + */
> +#ifndef _IPSEC_WORKER_H_
> +#define _IPSEC_WORKER_H_
> +
> +#include "ipsec.h"
> +
> +enum pkt_type {
> +	PKT_TYPE_PLAIN_IPV4 = 1,
> +	PKT_TYPE_IPSEC_IPV4,
> +	PKT_TYPE_PLAIN_IPV6,
> +	PKT_TYPE_IPSEC_IPV6,
> +	PKT_TYPE_INVALID
> +};
> +
> +struct route_table {
> +	struct rt_ctx *rt4_ctx;
> +	struct rt_ctx *rt6_ctx;
> +};
> +
> +/*
> + * Conf required by event mode worker with tx internal port
> + */
> +struct lcore_conf_ev_tx_int_port_wrkr {
> +	struct ipsec_ctx inbound;
> +	struct ipsec_ctx outbound;
> +	struct route_table rt;
> +} __rte_cache_aligned;
> +
> +/* TODO
> + *
> + * Move this function to ipsec_worker.c
> + */
> +void ipsec_poll_mode_worker(void);
> +
> +int ipsec_launch_one_lcore(void *args);
> +
> +#endif /* _IPSEC_WORKER_H_ */
> diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
> index 7f046e3..9e17ba0 100644
> --- a/examples/ipsec-secgw/sa.c
> +++ b/examples/ipsec-secgw/sa.c
> @@ -772,17 +772,6 @@ print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
>  	printf("\n");
>  }
> 
> -struct sa_ctx {
> -	void *satbl; /* pointer to array of rte_ipsec_sa objects*/
> -	struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES];
> -	union {
> -		struct {
> -			struct rte_crypto_sym_xform a;
> -			struct rte_crypto_sym_xform b;
> -		};
> -	} xf[IPSEC_SA_MAX_ENTRIES];
> -};
> -
>  static struct sa_ctx *
>  sa_create(const char *name, int32_t socket_id)
>  {
> --
> 2.7.4



More information about the dev mailing list