[dpdk-dev] [RFC PATCH 4/4] example/ipsec-secgw: add support for offloading crypto op

Akhil Goyal akhil.goyal at nxp.com
Tue Aug 15 08:35:05 CEST 2017


ipsec-secgw application is modified so that it can support
following type of actions for crypto operations
1. full protocol offload using crypto devices.
2. inline ipsec using ethernet devices to perform crypto operations
3. full protocol offload using ethernet devices.
4. non protocol offload

action type 1 is implemented as part of this patch.
action type 2 and 3 will be added as part of the original RFC in this
thread. action type 4 is already supported.

Signed-off-by: Akhil Goyal <akhil.goyal at nxp.com>
---
 examples/ipsec-secgw/ipsec.c | 125 +++++++++++++++++++++++++------------
 examples/ipsec-secgw/ipsec.h |  13 +++-
 examples/ipsec-secgw/sa.c    | 142 ++++++++++++++++++++++++++++++++-----------
 3 files changed, 206 insertions(+), 74 deletions(-)

diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index 0afb9d6..c8fde1c 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -37,6 +37,7 @@
 #include <rte_branch_prediction.h>
 #include <rte_log.h>
 #include <rte_crypto.h>
+#include <rte_security.h>
 #include <rte_cryptodev.h>
 #include <rte_mbuf.h>
 #include <rte_hash.h>
@@ -71,22 +72,40 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
 			ipsec_ctx->tbl[cdev_id_qp].id,
 			ipsec_ctx->tbl[cdev_id_qp].qp);
 
-	sa->crypto_session = rte_cryptodev_sym_session_create(
-			ipsec_ctx->session_pool);
-	rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
-			sa->crypto_session, sa->xforms,
-			ipsec_ctx->session_pool);
-
-	rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id, &cdev_info);
-	if (cdev_info.sym.max_nb_sessions_per_qp > 0) {
-		ret = rte_cryptodev_queue_pair_attach_sym_session(
-				ipsec_ctx->tbl[cdev_id_qp].id,
-				ipsec_ctx->tbl[cdev_id_qp].qp,
-				sa->crypto_session);
+	if (sa->type == RTE_SECURITY_SESS_NONE) {
+		sa->crypto_session = rte_cryptodev_sym_session_create(
+				ipsec_ctx->session_pool);
+		rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
+				sa->crypto_session, sa->xforms,
+				ipsec_ctx->session_pool);
+
+		rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id, &cdev_info);
+		if (cdev_info.sym.max_nb_sessions_per_qp > 0) {
+			ret = rte_cryptodev_queue_pair_attach_sym_session(
+					ipsec_ctx->tbl[cdev_id_qp].id,
+					ipsec_ctx->tbl[cdev_id_qp].qp,
+					sa->crypto_session);
+			if (ret < 0) {
+				RTE_LOG(ERR, IPSEC,
+					"Session cannot be attached to qp %u ",
+					ipsec_ctx->tbl[cdev_id_qp].qp);
+				return -1;
+			}
+		}
+	} else {
+		struct rte_security_sess_conf sess_conf;
+
+		sa->sec_session = rte_security_session_create(
+				ipsec_ctx->session_pool);
+		sess_conf.action_type = sa->type;
+		sess_conf.protocol = RTE_SEC_CONF_IPSEC;
+		sess_conf.ipsec_xform = sa->sec_xform;
+
+		ret = rte_security_session_init(sa->portid, sa->sec_session,
+					&sess_conf, ipsec_ctx->session_pool);
 		if (ret < 0) {
-			RTE_LOG(ERR, IPSEC,
-				"Session cannot be attached to qp %u ",
-				ipsec_ctx->tbl[cdev_id_qp].qp);
+			RTE_LOG(ERR, IPSEC, "SEC Session init failed: err: %d",
+					ret);
 			return -1;
 		}
 	}
@@ -125,6 +144,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
 {
 	int32_t ret = 0, i;
 	struct ipsec_mbuf_metadata *priv;
+	struct rte_crypto_sym_op *sym_cop;
 	struct ipsec_sa *sa;
 
 	for (i = 0; i < nb_pkts; i++) {
@@ -140,24 +160,50 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
 		sa = sas[i];
 		priv->sa = sa;
 
-		priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
-		priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-
-		rte_prefetch0(&priv->sym_cop);
-
-		if ((unlikely(sa->crypto_session == NULL)) &&
-				create_session(ipsec_ctx, sa)) {
-			rte_pktmbuf_free(pkts[i]);
-			continue;
-		}
-
-		rte_crypto_op_attach_sym_session(&priv->cop,
-				sa->crypto_session);
-
-		ret = xform_func(pkts[i], sa, &priv->cop);
-		if (unlikely(ret)) {
-			rte_pktmbuf_free(pkts[i]);
-			continue;
+		switch (sa->type) {
+		case RTE_SECURITY_SESS_CRYPTO_PROTO_OFFLOAD:
+			priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+			priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+			rte_prefetch0(&priv->sym_cop);
+
+			if ((unlikely(sa->sec_session == NULL)) &&
+					create_session(ipsec_ctx, sa)) {
+				rte_pktmbuf_free(pkts[i]);
+				continue;
+			}
+
+			sym_cop = get_sym_cop(&priv->cop);
+			sym_cop->m_src = pkts[i];
+
+			rte_security_attach_session(&priv->cop,
+					sa->sec_session);
+			break;
+		case RTE_SECURITY_SESS_NONE:
+
+			priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+			priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+			rte_prefetch0(&priv->sym_cop);
+
+			if ((unlikely(sa->crypto_session == NULL)) &&
+					create_session(ipsec_ctx, sa)) {
+				rte_pktmbuf_free(pkts[i]);
+				continue;
+			}
+
+			rte_crypto_op_attach_sym_session(&priv->cop,
+					sa->crypto_session);
+
+			ret = xform_func(pkts[i], sa, &priv->cop);
+			if (unlikely(ret)) {
+				rte_pktmbuf_free(pkts[i]);
+				continue;
+			}
+			break;
+		case RTE_SECURITY_SESS_ETH_PROTO_OFFLOAD:
+		case RTE_SECURITY_SESS_ETH_INLINE_CRYPTO:
+			break;
 		}
 
 		RTE_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps);
@@ -199,11 +245,14 @@ ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
 
 			RTE_ASSERT(sa != NULL);
 
-			ret = xform_func(pkt, sa, cops[j]);
-			if (unlikely(ret))
-				rte_pktmbuf_free(pkt);
-			else
-				pkts[nb_pkts++] = pkt;
+			if (sa->type == RTE_SECURITY_SESS_NONE) {
+				ret = xform_func(pkt, sa, cops[j]);
+				if (unlikely(ret)) {
+					rte_pktmbuf_free(pkt);
+					continue;
+				}
+			}
+			pkts[nb_pkts++] = pkt;
 		}
 	}
 
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index da1fb1b..6291d86 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -38,6 +38,7 @@
 
 #include <rte_byteorder.h>
 #include <rte_crypto.h>
+#include <rte_security.h>
 
 #define RTE_LOGTYPE_IPSEC       RTE_LOGTYPE_USER1
 #define RTE_LOGTYPE_IPSEC_ESP   RTE_LOGTYPE_USER2
@@ -99,7 +100,10 @@ struct ipsec_sa {
 	uint32_t cdev_id_qp;
 	uint64_t seq;
 	uint32_t salt;
-	struct rte_cryptodev_sym_session *crypto_session;
+	union {
+		struct rte_cryptodev_sym_session *crypto_session;
+		struct rte_security_session *sec_session;
+	};
 	enum rte_crypto_cipher_algorithm cipher_algo;
 	enum rte_crypto_auth_algorithm auth_algo;
 	enum rte_crypto_aead_algorithm aead_algo;
@@ -117,7 +121,12 @@ struct ipsec_sa {
 	uint8_t auth_key[MAX_KEY_SIZE];
 	uint16_t auth_key_len;
 	uint16_t aad_len;
-	struct rte_crypto_sym_xform *xforms;
+	union {
+		struct rte_crypto_sym_xform *xforms;
+		struct rte_security_ipsec_xform *sec_xform;
+	};
+	enum rte_security_session_action_type type;
+	uint16_t portid;
 } __rte_cache_aligned;
 
 struct ipsec_mbuf_metadata {
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 7be0e62..851262b 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -41,6 +41,7 @@
 
 #include <rte_memzone.h>
 #include <rte_crypto.h>
+#include <rte_security.h>
 #include <rte_cryptodev.h>
 #include <rte_byteorder.h>
 #include <rte_errno.h>
@@ -51,6 +52,8 @@
 #include "esp.h"
 #include "parser.h"
 
+#define IPDEFTTL 64
+
 struct supported_cipher_algo {
 	const char *keyword;
 	enum rte_crypto_cipher_algorithm algo;
@@ -238,6 +241,8 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
 	uint32_t src_p = 0;
 	uint32_t dst_p = 0;
 	uint32_t mode_p = 0;
+	uint32_t type_p = 0;
+	uint32_t portid_p = 0;
 
 	if (strcmp(tokens[0], "in") == 0) {
 		ri = &nb_sa_in;
@@ -550,6 +555,47 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
 			continue;
 		}
 
+		if (strcmp(tokens[ti], "type") == 0) {
+			APP_CHECK_PRESENCE(type_p, tokens[ti], status);
+			if (status->status < 0)
+				return;
+
+			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
+			if (status->status < 0)
+				return;
+
+			if (strcmp(tokens[ti], "eth-inline-crypto") == 0)
+				rule->type = RTE_SECURITY_SESS_ETH_INLINE_CRYPTO;
+			else if (strcmp(tokens[ti], "eth-proto-offload") == 0)
+				rule->type = RTE_SECURITY_SESS_ETH_PROTO_OFFLOAD;
+			else if (strcmp(tokens[ti], "crypto-proto-offload") == 0)
+				rule->type = RTE_SECURITY_SESS_CRYPTO_PROTO_OFFLOAD;
+			else if (strcmp(tokens[ti], "non-proto") == 0)
+				rule->type = RTE_SECURITY_SESS_NONE;
+			else {
+				APP_CHECK(0, status, "unrecognized "
+					"input \"%s\"", tokens[ti]);
+				return;
+			}
+
+			type_p = 1;
+			continue;
+		}
+
+		if (strcmp(tokens[ti], "port_id") == 0) {
+			APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
+			if (status->status < 0)
+				return;
+			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
+			if (status->status < 0)
+				return;
+			rule->portid = atoi(tokens[ti]);
+			if (status->status < 0)
+				return;
+			portid_p = 1;
+			continue;
+		}
+
 		/* unrecognizeable input */
 		APP_CHECK(0, status, "unrecognized input \"%s\"",
 			tokens[ti]);
@@ -580,6 +626,14 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
 	if (status->status < 0)
 		return;
 
+	if ((rule->type != RTE_SECURITY_SESS_NONE) && (portid_p == 0))
+		printf("Missing portid option, falling back to non-offload");
+
+	if (!type_p || !portid_p) {
+		rule->type = RTE_SECURITY_SESS_NONE;
+		rule->portid = -1;
+	}
+
 	*ri = *ri + 1;
 }
 
@@ -647,9 +701,12 @@ print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
 
 struct sa_ctx {
 	struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES];
-	struct {
-		struct rte_crypto_sym_xform a;
-		struct rte_crypto_sym_xform b;
+	union {
+		struct {
+			struct rte_crypto_sym_xform a;
+			struct rte_crypto_sym_xform b;
+		};
+		struct rte_security_ipsec_xform c;
 	} xf[IPSEC_SA_MAX_ENTRIES];
 };
 
@@ -706,40 +763,57 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 			sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
 		}
 
+		if (sa->type == RTE_SECURITY_SESS_CRYPTO_PROTO_OFFLOAD) {
+			sa_ctx->xf[idx].c.cipher_alg = sa->cipher_algo;
+			sa_ctx->xf[idx].c.auth_alg = sa->auth_algo;
+			sa_ctx->xf[idx].c.cipher_key.data = sa->cipher_key;
+			sa_ctx->xf[idx].c.auth_key.data = sa->auth_key;
+			sa_ctx->xf[idx].c.cipher_key.length =
+						sa->cipher_key_len;
+			sa_ctx->xf[idx].c.auth_key.length = sa->auth_key_len;
+			sa_ctx->xf[idx].c.op = (inbound == 1)?
+						RTE_SECURITY_IPSEC_OP_DECAP :
+						RTE_SECURITY_IPSEC_OP_ENCAP;
+			sa_ctx->xf[idx].c.salt = sa->salt;
+			sa_ctx->xf[idx].c.spi = sa->spi;
+			if (sa->flags == IP4_TUNNEL) {
+				sa_ctx->xf[idx].c.mode =
+					RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
+				sa_ctx->xf[idx].c.tunnel.ipv4.ttl = IPDEFTTL;
+				memcpy((uint8_t *)&sa_ctx->xf[idx].c.tunnel.ipv4.src_ip,
+						(uint8_t *)&sa->src.ip.ip4, 4);
+				memcpy((uint8_t *)&sa_ctx->xf[idx].c.tunnel.ipv4.dst_ip,
+						(uint8_t *)&sa->dst.ip.ip4, 4);
+//				sa_ctx->xf[idx].c.tunnel.ipv4.src_ip =
+//					(struct in_addr)sa->src.ip.ip4;
+//				sa_ctx->xf[idx].c.tunnel.ipv4.dst_ip =
+//					(struct in_addr)sa->dst.ip.ip4;
+			}
+			/* TODO support for Transport and IPV6 tunnel */
+			sa->sec_xform = &sa_ctx->xf[idx].c;
+
+			print_one_sa_rule(sa, inbound);
+			continue;
+		}
+
 		if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
 			iv_length = 16;
 
-			if (inbound) {
-				sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
-				sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
-				sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
-				sa_ctx->xf[idx].a.aead.key.length =
-					sa->cipher_key_len;
-				sa_ctx->xf[idx].a.aead.op =
-					RTE_CRYPTO_AEAD_OP_DECRYPT;
-				sa_ctx->xf[idx].a.next = NULL;
-				sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
-				sa_ctx->xf[idx].a.aead.iv.length = iv_length;
-				sa_ctx->xf[idx].a.aead.aad_length =
-					sa->aad_len;
-				sa_ctx->xf[idx].a.aead.digest_length =
-					sa->digest_len;
-			} else { /* outbound */
-				sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
-				sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
-				sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
-				sa_ctx->xf[idx].a.aead.key.length =
-					sa->cipher_key_len;
-				sa_ctx->xf[idx].a.aead.op =
-					RTE_CRYPTO_AEAD_OP_ENCRYPT;
-				sa_ctx->xf[idx].a.next = NULL;
-				sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
-				sa_ctx->xf[idx].a.aead.iv.length = iv_length;
-				sa_ctx->xf[idx].a.aead.aad_length =
-					sa->aad_len;
-				sa_ctx->xf[idx].a.aead.digest_length =
-					sa->digest_len;
-			}
+			sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+			sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
+			sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
+			sa_ctx->xf[idx].a.aead.key.length =
+				sa->cipher_key_len;
+			sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
+				RTE_CRYPTO_AEAD_OP_DECRYPT :
+				RTE_CRYPTO_AEAD_OP_ENCRYPT;
+			sa_ctx->xf[idx].a.next = NULL;
+			sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
+			sa_ctx->xf[idx].a.aead.iv.length = iv_length;
+			sa_ctx->xf[idx].a.aead.aad_length =
+				sa->aad_len;
+			sa_ctx->xf[idx].a.aead.digest_length =
+				sa->digest_len;
 
 			sa->xforms = &sa_ctx->xf[idx].a;
 
-- 
2.9.3



More information about the dev mailing list