[dpdk-dev] [PATCH 08/13] app/crypto-perf: move IV to crypto op private data

Pablo de Lara pablo.de.lara.guarch at intel.com
Sun May 28 23:05:17 CEST 2017


Usually, IV will change for each crypto operation.
Therefore, instead of pointing at the same location,
IV is copied after each crypto operation.

This will let the IV to be passed as an offset from
the beginning of the crypto operation, instead of
a pointer.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch at intel.com>
---
 app/test-crypto-perf/cperf_ops.c                 | 45 +++++++++++++++++-------
 app/test-crypto-perf/cperf_ops.h                 |  3 +-
 app/test-crypto-perf/cperf_test_latency.c        |  9 +++--
 app/test-crypto-perf/cperf_test_throughput.c     |  9 +++--
 app/test-crypto-perf/cperf_test_vector_parsing.c |  1 -
 app/test-crypto-perf/cperf_test_vectors.c        |  1 -
 app/test-crypto-perf/cperf_test_verify.c         |  8 +++--
 7 files changed, 55 insertions(+), 21 deletions(-)

diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c
index a101ba1..a1f2c69 100644
--- a/app/test-crypto-perf/cperf_ops.c
+++ b/app/test-crypto-perf/cperf_ops.c
@@ -40,7 +40,8 @@ cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
 		struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
 		const struct cperf_options *options,
-		const struct cperf_test_vector *test_vector __rte_unused)
+		const struct cperf_test_vector *test_vector __rte_unused,
+		uint16_t iv_offset __rte_unused)
 {
 	uint16_t i;
 
@@ -65,7 +66,8 @@ cperf_set_ops_null_auth(struct rte_crypto_op **ops,
 		struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
 		const struct cperf_options *options,
-		const struct cperf_test_vector *test_vector __rte_unused)
+		const struct cperf_test_vector *test_vector __rte_unused,
+		uint16_t iv_offset __rte_unused)
 {
 	uint16_t i;
 
@@ -90,7 +92,8 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops,
 		struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
 		const struct cperf_options *options,
-		const struct cperf_test_vector *test_vector)
+		const struct cperf_test_vector *test_vector,
+		uint16_t iv_offset)
 {
 	uint16_t i;
 
@@ -103,9 +106,14 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops,
 		sym_op->m_dst = bufs_out[i];
 
 		/* cipher parameters */
-		sym_op->cipher.iv.data = test_vector->iv.data;
-		sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
+		sym_op->cipher.iv.data = rte_crypto_op_ctod_offset(ops[i],
+							uint8_t *, iv_offset);
+		sym_op->cipher.iv.phys_addr = rte_crypto_op_ctophys_offset(ops[i],
+							iv_offset);
 		sym_op->cipher.iv.length = test_vector->iv.length;
+		memcpy(sym_op->cipher.iv.data,
+				test_vector->iv.data,
+				test_vector->iv.length);
 
 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
 				options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
@@ -125,7 +133,8 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
 		struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
 		const struct cperf_options *options,
-		const struct cperf_test_vector *test_vector)
+		const struct cperf_test_vector *test_vector,
+		uint16_t iv_offset __rte_unused)
 {
 	uint16_t i;
 
@@ -188,7 +197,8 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
 		struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
 		const struct cperf_options *options,
-		const struct cperf_test_vector *test_vector)
+		const struct cperf_test_vector *test_vector,
+		uint16_t iv_offset)
 {
 	uint16_t i;
 
@@ -201,9 +211,14 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
 		sym_op->m_dst = bufs_out[i];
 
 		/* cipher parameters */
-		sym_op->cipher.iv.data = test_vector->iv.data;
-		sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
+		sym_op->cipher.iv.data = rte_crypto_op_ctod_offset(ops[i],
+							uint8_t *, iv_offset);
+		sym_op->cipher.iv.phys_addr = rte_crypto_op_ctophys_offset(ops[i],
+							iv_offset);
 		sym_op->cipher.iv.length = test_vector->iv.length;
+		memcpy(sym_op->cipher.iv.data,
+				test_vector->iv.data,
+				test_vector->iv.length);
 
 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
 				options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
@@ -264,7 +279,8 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
 		struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
 		const struct cperf_options *options,
-		const struct cperf_test_vector *test_vector)
+		const struct cperf_test_vector *test_vector,
+		uint16_t iv_offset)
 {
 	uint16_t i;
 
@@ -277,9 +293,14 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
 		sym_op->m_dst = bufs_out[i];
 
 		/* cipher parameters */
-		sym_op->cipher.iv.data = test_vector->iv.data;
-		sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
+		sym_op->cipher.iv.data = rte_crypto_op_ctod_offset(ops[i],
+							uint8_t *, iv_offset);
+		sym_op->cipher.iv.phys_addr = rte_crypto_op_ctophys_offset(ops[i],
+							iv_offset);
 		sym_op->cipher.iv.length = test_vector->iv.length;
+		memcpy(sym_op->cipher.iv.data,
+				test_vector->iv.data,
+				test_vector->iv.length);
 
 		sym_op->cipher.data.length = options->test_buffer_size;
 		sym_op->cipher.data.offset =
diff --git a/app/test-crypto-perf/cperf_ops.h b/app/test-crypto-perf/cperf_ops.h
index 1b748da..f7b431c 100644
--- a/app/test-crypto-perf/cperf_ops.h
+++ b/app/test-crypto-perf/cperf_ops.h
@@ -48,7 +48,8 @@ typedef int (*cperf_populate_ops_t)(struct rte_crypto_op **ops,
 		struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
 		const struct cperf_options *options,
-		const struct cperf_test_vector *test_vector);
+		const struct cperf_test_vector *test_vector,
+		uint16_t iv_offset);
 
 struct cperf_op_fns {
 	cperf_sessions_create_t sess_create;
diff --git a/app/test-crypto-perf/cperf_test_latency.c b/app/test-crypto-perf/cperf_test_latency.c
index 215614e..780eef0 100644
--- a/app/test-crypto-perf/cperf_test_latency.c
+++ b/app/test-crypto-perf/cperf_test_latency.c
@@ -68,6 +68,7 @@ struct cperf_latency_ctx {
 
 struct priv_op_data {
 	struct cperf_op_result *result;
+	uint8_t IV[0];
 };
 
 #define max(a, b) (a > b ? (uint64_t)a : (uint64_t)b)
@@ -280,7 +281,7 @@ cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
 	snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
 			dev_id);
 
-	uint16_t priv_size = sizeof(struct priv_op_data);
+	uint16_t priv_size = sizeof(struct priv_op_data) + test_vector->iv.length;
 	ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
 			RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 0, priv_size,
 			rte_socket_id());
@@ -344,6 +345,10 @@ cperf_latency_test_runner(void *arg)
 	else
 		test_burst_size = ctx->options->burst_size_list[0];
 
+	uint32_t iv_offset = sizeof(struct rte_crypto_op) +
+		sizeof(struct rte_crypto_sym_op) +
+		sizeof(struct cperf_op_result *);
+
 	while (test_burst_size <= ctx->options->max_burst_size) {
 		uint64_t ops_enqd = 0, ops_deqd = 0;
 		uint64_t m_idx = 0, b_idx = 0;
@@ -372,7 +377,7 @@ cperf_latency_test_runner(void *arg)
 			(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
 					&ctx->mbufs_out[m_idx],
 					burst_size, ctx->sess, ctx->options,
-					ctx->test_vector);
+					ctx->test_vector, iv_offset);
 
 			tsc_start = rte_rdtsc_precise();
 
diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c
index 61b27ea..144b550 100644
--- a/app/test-crypto-perf/cperf_test_throughput.c
+++ b/app/test-crypto-perf/cperf_test_throughput.c
@@ -262,8 +262,10 @@ cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
 	snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
 			dev_id);
 
+	uint16_t priv_size = test_vector->iv.length;
+
 	ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
-			RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 0, 0,
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 0, priv_size,
 			rte_socket_id());
 	if (ctx->crypto_op_pool == NULL)
 		goto err;
@@ -315,6 +317,9 @@ cperf_throughput_test_runner(void *test_ctx)
 	else
 		test_burst_size = ctx->options->burst_size_list[0];
 
+	uint32_t iv_offset = sizeof(struct rte_crypto_op) +
+		sizeof(struct rte_crypto_sym_op);
+
 	while (test_burst_size <= ctx->options->max_burst_size) {
 		uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
 		uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
@@ -346,7 +351,7 @@ cperf_throughput_test_runner(void *test_ctx)
 			(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
 					&ctx->mbufs_out[m_idx],
 					ops_needed, ctx->sess, ctx->options,
-					ctx->test_vector);
+					ctx->test_vector, iv_offset);
 
 			/**
 			 * When ops_needed is smaller than ops_enqd, the
diff --git a/app/test-crypto-perf/cperf_test_vector_parsing.c b/app/test-crypto-perf/cperf_test_vector_parsing.c
index f384e3d..62d0c91 100644
--- a/app/test-crypto-perf/cperf_test_vector_parsing.c
+++ b/app/test-crypto-perf/cperf_test_vector_parsing.c
@@ -303,7 +303,6 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
 	} else if (strstr(key_token, "iv")) {
 		rte_free(vector->iv.data);
 		vector->iv.data = data;
-		vector->iv.phys_addr = rte_malloc_virt2phy(vector->iv.data);
 		if (tc_found)
 			vector->iv.length = data_length;
 		else {
diff --git a/app/test-crypto-perf/cperf_test_vectors.c b/app/test-crypto-perf/cperf_test_vectors.c
index 757957f..36b3f6f 100644
--- a/app/test-crypto-perf/cperf_test_vectors.c
+++ b/app/test-crypto-perf/cperf_test_vectors.c
@@ -423,7 +423,6 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
 			memcpy(t_vec->iv.data, iv, options->cipher_iv_sz);
 		}
 		t_vec->ciphertext.length = options->max_buffer_size;
-		t_vec->iv.phys_addr = rte_malloc_virt2phy(t_vec->iv.data);
 		t_vec->iv.length = options->cipher_iv_sz;
 		t_vec->data.cipher_offset = 0;
 		t_vec->data.cipher_length = options->max_buffer_size;
diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-perf/cperf_test_verify.c
index 454221e..a599d91 100644
--- a/app/test-crypto-perf/cperf_test_verify.c
+++ b/app/test-crypto-perf/cperf_test_verify.c
@@ -266,8 +266,9 @@ cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
 	snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
 			dev_id);
 
+	uint16_t priv_size = test_vector->iv.length;
 	ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
-			RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 0, 0,
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 0, priv_size,
 			rte_socket_id());
 	if (ctx->crypto_op_pool == NULL)
 		goto err;
@@ -417,6 +418,9 @@ cperf_verify_test_runner(void *test_ctx)
 		printf("\n# Running verify test on device: %u, lcore: %u\n",
 			ctx->dev_id, lcore);
 
+	uint32_t iv_offset = sizeof(struct rte_crypto_op) +
+		sizeof(struct rte_crypto_sym_op);
+
 	while (ops_enqd_total < ctx->options->total_ops) {
 
 		uint16_t burst_size = ((ops_enqd_total + ctx->options->max_burst_size)
@@ -438,7 +442,7 @@ cperf_verify_test_runner(void *test_ctx)
 		(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
 				&ctx->mbufs_out[m_idx],
 				ops_needed, ctx->sess, ctx->options,
-				ctx->test_vector);
+				ctx->test_vector, iv_offset);
 
 #ifdef CPERF_LINEARIZATION_ENABLE
 		if (linearize) {
-- 
2.7.4



More information about the dev mailing list