[dpdk-dev,11/22] examples/ipsec-secgw: move IV to crypto op private data

Message ID 20170621074731.45013-11-pablo.de.lara.guarch@intel.com (mailing list archive)
State Not Applicable, archived
Headers

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail apply patch file failure

Commit Message

De Lara Guarch, Pablo June 21, 2017, 7:47 a.m. UTC
  Usually, IV will change for each crypto operation.
Therefore, instead of pointing at the same location,
IV is copied after each crypto operation.

This will let the IV to be passed as an offset from
the beginning of the crypto operation, instead of
a pointer.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
---
 examples/ipsec-secgw/esp.c   | 27 ++++++++++++++++++---------
 examples/ipsec-secgw/ipsec.h |  2 +-
 2 files changed, 19 insertions(+), 10 deletions(-)
  

Patch

diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c
index e77afa0..1504386 100644
--- a/examples/ipsec-secgw/esp.c
+++ b/examples/ipsec-secgw/esp.c
@@ -50,6 +50,9 @@ 
 #include "esp.h"
 #include "ipip.h"
 
+#define IV_OFFSET		(sizeof(struct rte_crypto_op) + \
+				sizeof(struct rte_crypto_sym_op))
+
 int
 esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
 		struct rte_crypto_op *cop)
@@ -93,13 +96,17 @@  esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
 	struct cnt_blk *icb;
 	uint8_t *aad;
 	uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
+	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(cop,
+				uint8_t *, IV_OFFSET);
 
 	switch (sa->cipher_algo) {
 	case RTE_CRYPTO_CIPHER_NULL:
 	case RTE_CRYPTO_CIPHER_AES_CBC:
-		sym_cop->cipher.iv.data = iv;
-		sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m,
-				 ip_hdr_len + sizeof(struct esp_hdr));
+		/* Copy IV at the end of crypto operation */
+		rte_memcpy(IV_ptr, iv, sa->iv_len);
+		sym_cop->cipher.iv.data = IV_ptr;
+		sym_cop->cipher.iv.phys_addr =
+				rte_crypto_op_ctophys_offset(cop, IV_OFFSET);
 		sym_cop->cipher.iv.length = sa->iv_len;
 		break;
 	case RTE_CRYPTO_CIPHER_AES_CTR:
@@ -108,9 +115,9 @@  esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
 		icb->salt = sa->salt;
 		memcpy(&icb->iv, iv, 8);
 		icb->cnt = rte_cpu_to_be_32(1);
-		sym_cop->cipher.iv.data = (uint8_t *)icb;
-		sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m,
-			 (uint8_t *)icb - rte_pktmbuf_mtod(m, uint8_t *));
+		sym_cop->cipher.iv.data = IV_ptr;
+		sym_cop->cipher.iv.phys_addr =
+				rte_crypto_op_ctophys_offset(cop, IV_OFFSET);
 		sym_cop->cipher.iv.length = 16;
 		break;
 	default:
@@ -341,13 +348,15 @@  esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
 	padding[pad_len - 2] = pad_len - 2;
 	padding[pad_len - 1] = nlp;
 
+	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(cop,
+				uint8_t *, IV_OFFSET);
 	struct cnt_blk *icb = get_cnt_blk(m);
 	icb->salt = sa->salt;
 	icb->iv = sa->seq;
 	icb->cnt = rte_cpu_to_be_32(1);
-	sym_cop->cipher.iv.data = (uint8_t *)icb;
-	sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m,
-			 (uint8_t *)icb - rte_pktmbuf_mtod(m, uint8_t *));
+	sym_cop->cipher.iv.data = IV_ptr;
+	sym_cop->cipher.iv.phys_addr =
+			rte_crypto_op_ctophys_offset(cop, IV_OFFSET);
 	sym_cop->cipher.iv.length = 16;
 
 	uint8_t *aad;
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index fe42661..de1df7b 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -118,10 +118,10 @@  struct ipsec_sa {
 } __rte_cache_aligned;
 
 struct ipsec_mbuf_metadata {
-	uint8_t buf[32];
 	struct ipsec_sa *sa;
 	struct rte_crypto_op cop;
 	struct rte_crypto_sym_op sym_cop;
+	uint8_t buf[32];
 } __rte_cache_aligned;
 
 struct cdev_qp {