pipeline: add symmetric crypto to table action

Message ID 20180828081929.34085-1-roy.fan.zhang@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Cristian Dumitrescu
Headers
Series pipeline: add symmetric crypto to table action |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation success Compilation OK

Commit Message

Fan Zhang Aug. 28, 2018, 8:19 a.m. UTC
  This patch adds the symmetric crypto action support to pipeline
library. The symmetric crypto action works as the shim layer
between pipeline and DPDK cryptodev and is able to interact with
cryptodev with the control path requests such as session
creation/deletion and data path work to assemble the crypto
operations for received packets.

Change-Id: Id0b547bb10f9e8814b08f5df2343337daca0ae92
Signed-off-by: Zhang, Roy Fan <roy.fan.zhang@intel.com>
---
 lib/librte_pipeline/Makefile           |   2 +-
 lib/librte_pipeline/meson.build        |   2 +-
 lib/librte_pipeline/rte_table_action.c | 466 +++++++++++++++++++++++++++++++++
 lib/librte_pipeline/rte_table_action.h |  70 +++++
 4 files changed, 538 insertions(+), 2 deletions(-)
  

Patch

diff --git a/lib/librte_pipeline/Makefile b/lib/librte_pipeline/Makefile
index 84afe98cb..cf265503f 100644
--- a/lib/librte_pipeline/Makefile
+++ b/lib/librte_pipeline/Makefile
@@ -12,7 +12,7 @@  CFLAGS += -DALLOW_EXPERIMENTAL_API
 CFLAGS += -O3
 CFLAGS += $(WERROR_FLAGS)
 LDLIBS += -lrte_eal -lrte_mempool -lrte_mbuf -lrte_table
-LDLIBS += -lrte_port -lrte_meter -lrte_sched
+LDLIBS += -lrte_port -lrte_meter -lrte_sched -lrte_cryptodev
 
 EXPORT_MAP := rte_pipeline_version.map
 
diff --git a/lib/librte_pipeline/meson.build b/lib/librte_pipeline/meson.build
index dc16ab42f..04e5f5179 100644
--- a/lib/librte_pipeline/meson.build
+++ b/lib/librte_pipeline/meson.build
@@ -5,4 +5,4 @@  version = 3
 allow_experimental_apis = true
 sources = files('rte_pipeline.c', 'rte_port_in_action.c', 'rte_table_action.c')
 headers = files('rte_pipeline.h', 'rte_port_in_action.h', 'rte_table_action.h')
-deps += ['port', 'table', 'meter', 'sched']
+deps += ['port', 'table', 'meter', 'sched', 'cryptodev']
diff --git a/lib/librte_pipeline/rte_table_action.c b/lib/librte_pipeline/rte_table_action.c
index 83ffa5ded..a958aa82a 100644
--- a/lib/librte_pipeline/rte_table_action.c
+++ b/lib/librte_pipeline/rte_table_action.c
@@ -15,6 +15,7 @@ 
 #include <rte_esp.h>
 #include <rte_tcp.h>
 #include <rte_udp.h>
+#include <rte_cryptodev.h>
 
 #include "rte_table_action.h"
 
@@ -1219,6 +1220,428 @@  pkt_work_time(struct time_data *data,
 	data->time = time;
 }
 
+
+/**
+ * RTE_TABLE_ACTION_CRYPTO
+ */
+
+#define CRYPTO_OP_MASK_CIPHER	0x1
+#define CRYPTO_OP_MASK_AUTH	0x2
+#define CRYPTO_OP_MASK_AEAD	0x4
+#define CRYPTO_IV_OFFSET						\
+	sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op)
+
+struct crypto_op_sym_iv_aad {
+	struct rte_crypto_op op;
+	struct rte_crypto_sym_op sym_op;
+	union {
+		struct {
+			uint8_t cipher_iv[
+				RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
+			uint8_t auth_iv[
+				RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
+		} cipher_auth;
+
+		struct {
+			uint8_t iv[RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
+			uint8_t aad[RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX];
+		} aead_iv_aad;
+
+	} iv_aad;
+};
+
+struct sym_crypto_data {
+
+	union {
+		struct {
+
+			/** Length of cipher iv. */
+			uint16_t cipher_iv_len;
+
+			/** Offset from start of IP header to the cipher iv. */
+			uint16_t cipher_iv_data_offset;
+
+			/** Length of cipher iv to be updated in the mbuf. */
+			uint16_t cipher_iv_update_len;
+
+			/** Offset from start of IP header to the auth iv. */
+			uint16_t auth_iv_data_offset;
+
+			/** Length of auth iv in the mbuf. */
+			uint16_t auth_iv_len;
+
+			/** Length of auth iv to be updated in the mbuf. */
+			uint16_t auth_iv_update_len;
+
+		} cipher_auth;
+		struct {
+
+			/** Length of iv. */
+			uint16_t iv_len;
+
+			/** Offset from start of IP header to the aead iv. */
+			uint16_t iv_data_offset;
+
+			/** Length of iv to be updated in the mbuf. */
+			uint16_t iv_update_len;
+
+			/** Length of aad */
+			uint16_t aad_len;
+
+			/** Offset from start of IP header to the aad. */
+			uint16_t aad_data_offset;
+
+			/** Length of aad to updated in the mbuf. */
+			uint16_t aad_update_len;
+
+		} aead;
+	};
+
+	/** Offset from start of IP header to the data. */
+	uint16_t data_offset;
+
+	/** Digest length. */
+	uint16_t digest_len;
+
+	/** block size */
+	uint16_t block_size;
+
+	/** Mask of crypto operation */
+	uint16_t op_mask;
+
+	/** Session pointer. */
+	struct rte_cryptodev_sym_session *session;
+
+	/** Private data size to store cipher iv / aad. */
+	uint8_t iv_aad_data[0];
+
+	/** Direction of crypto, encrypt or decrypt */
+	uint16_t direction;
+
+} __attribute__((__packed__));
+
+static int
+sym_crypto_cfg_check(struct rte_table_action_sym_crypto_config *cfg)
+{
+	if (cfg->mempool_session_create == NULL ||
+			cfg->mempool_session_init == NULL)
+		return -EINVAL;
+
+	if (cfg->cryptodev_id >= rte_cryptodev_count())
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+get_block_size(const struct rte_crypto_sym_xform *xform, uint8_t cdev_id)
+{
+	unsigned int i = 0;
+	struct rte_cryptodev_info dev_info;
+	const struct rte_cryptodev_capabilities *cap;
+
+	rte_cryptodev_info_get(cdev_id, &dev_info);
+	cap = &dev_info.capabilities[0];
+
+	while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+		if (cap->sym.xform_type != xform->type)
+			continue;
+
+
+		if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
+				(cap->sym.cipher.algo == xform->cipher.algo))
+			return cap->sym.cipher.block_size;
+
+		if ((xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) &&
+				(cap->sym.aead.algo == xform->aead.algo))
+			return cap->sym.aead.block_size;
+
+		cap = &dev_info.capabilities[++i];
+	}
+
+	return -1;
+}
+
+static int
+sym_crypto_apply(struct sym_crypto_data *data,
+	struct rte_table_action_sym_crypto_config *cfg,
+	struct rte_table_action_sym_crypto_params *p)
+{
+	struct rte_cryptodev_sym_session *sess;
+	struct rte_crypto_sym_xform *xform = p->xform;
+	struct rte_crypto_cipher_xform *cipher_xform = NULL;
+	struct rte_crypto_auth_xform *auth_xform = NULL;
+	struct rte_crypto_aead_xform *aead_xform = NULL;
+	int ret;
+
+	memset(data, 0, sizeof(*data));
+
+	while (xform) {
+		if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+			cipher_xform = &xform->cipher;
+
+			if (cipher_xform->iv.length >
+				RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX)
+			return -ENOMEM;
+
+			ret = get_block_size(xform, cfg->cryptodev_id);
+			if (ret < 0)
+				return -1;
+			data->block_size = (uint16_t)ret;
+			data->op_mask |= CRYPTO_OP_MASK_CIPHER;
+
+			data->cipher_auth.cipher_iv_data_offset = (uint16_t)
+					p->cipher_auth.cipher_iv.offset;
+			data->cipher_auth.cipher_iv_len =
+					cipher_xform->iv.length;
+			data->cipher_auth.cipher_iv_update_len = (uint16_t)
+					p->cipher_auth.cipher_iv.length;
+
+			rte_memcpy(data->iv_aad_data,
+					p->cipher_auth.cipher_iv.val,
+					p->cipher_auth.cipher_iv.length);
+
+			data->direction = cipher_xform->op;
+
+			cipher_xform->iv.offset = CRYPTO_IV_OFFSET;
+
+		} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+			auth_xform = &xform->auth;
+			if (auth_xform->iv.length >
+				RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX)
+				return -ENOMEM;
+			data->op_mask |= CRYPTO_OP_MASK_AUTH;
+
+			data->cipher_auth.auth_iv_data_offset =
+					(uint16_t)p->cipher_auth.auth_iv.offset;
+			data->cipher_auth.auth_iv_len = auth_xform->iv.length;
+			data->cipher_auth.auth_iv_update_len = (uint16_t)
+					p->cipher_auth.auth_iv.length;
+			data->digest_len = auth_xform->digest_length;
+
+			if (auth_xform->iv.length)
+				auth_xform->iv.offset = CRYPTO_IV_OFFSET +
+				RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX;
+
+			data->direction = (auth_xform->op ==
+					RTE_CRYPTO_AUTH_OP_GENERATE) ?
+					RTE_CRYPTO_CIPHER_OP_ENCRYPT :
+					RTE_CRYPTO_CIPHER_OP_DECRYPT;
+
+		} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+			aead_xform = &xform->aead;
+
+			if ((aead_xform->iv.length >
+				RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX) || (
+				aead_xform->aad_length >
+				RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX))
+				return -EINVAL;
+
+			ret = get_block_size(xform, cfg->cryptodev_id);
+			if (ret < 0)
+				return -1;
+			data->block_size = (uint16_t)ret;
+			data->op_mask |= CRYPTO_OP_MASK_AEAD;
+			data->digest_len = aead_xform->digest_length;
+
+			data->aead.iv_data_offset = (uint16_t)p->aead.iv.offset;
+			data->aead.iv_len = aead_xform->iv.length;
+			data->aead.iv_update_len = (uint16_t)p->aead.iv.length;
+
+			data->aead.aad_data_offset = (uint16_t)
+					p->aead.aad.offset;
+			data->aead.aad_len = aead_xform->aad_length;
+			data->aead.aad_update_len =
+					(uint16_t)p->aead.aad.length;
+
+			if (aead_xform->iv.length)
+				aead_xform->iv.offset = CRYPTO_IV_OFFSET;
+
+			rte_memcpy(data->iv_aad_data,
+					p->aead.iv.val,
+					p->aead.iv.length);
+
+			rte_memcpy(data->iv_aad_data + p->aead.iv.length,
+					p->aead.aad.val,
+					p->aead.aad.length);
+
+			data->direction = (aead_xform->op ==
+					RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+					RTE_CRYPTO_CIPHER_OP_ENCRYPT :
+					RTE_CRYPTO_CIPHER_OP_DECRYPT;
+		} else
+			return -EINVAL;
+
+		xform = xform->next;
+	}
+
+	data->data_offset = (uint16_t)p->data_offset;
+
+	sess = rte_cryptodev_sym_session_create(cfg->mempool_session_create);
+	if (!sess) {
+		memset(data, 0, sizeof(*data));
+		return -ENOMEM;
+	}
+
+	ret = rte_cryptodev_sym_session_init(cfg->cryptodev_id, sess, xform,
+			cfg->mempool_session_init);
+	if (ret < 0) {
+		memset(data, 0, sizeof(*data));
+		return ret;
+	}
+
+	data->session = sess;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+pkt_work_sym_crypto(struct rte_mbuf *mbuf, struct sym_crypto_data *data,
+		struct rte_table_action_sym_crypto_config *cfg,
+		uint16_t ip_offset)
+{
+	struct crypto_op_sym_iv_aad *crypto_op = (struct crypto_op_sym_iv_aad *)
+			RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->op_offset);
+	struct rte_crypto_op *op = &crypto_op->op;
+	uint16_t rel_ip_offset = ip_offset - mbuf->data_off;
+	uint16_t payload_len = 0;
+
+	op->sym->m_src = mbuf;
+	op->sym->m_dst = NULL;
+	op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+	op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
+	op->phys_addr = rte_pktmbuf_iova_offset(mbuf, cfg->op_offset);
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	op->sym->session = data->session;
+
+	/** pad the packet */
+	if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+		payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(mbuf) -
+				(data->data_offset + rel_ip_offset),
+				data->block_size) - rte_pktmbuf_pkt_len(mbuf);
+
+		if (unlikely(rte_pktmbuf_append(mbuf, payload_len +
+				data->digest_len) == NULL))
+			return 1;
+	}
+
+	payload_len = rte_pktmbuf_pkt_len(mbuf);
+
+	if (data->op_mask & CRYPTO_OP_MASK_CIPHER) {
+		/** prepare cipher op */
+		uint8_t *iv = crypto_op->iv_aad.cipher_auth.cipher_iv;
+
+		op->sym->cipher.data.length = payload_len;
+		op->sym->cipher.data.offset = data->data_offset + rel_ip_offset;
+
+		if (data->cipher_auth.cipher_iv_update_len) {
+			uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
+				data->cipher_auth.cipher_iv_data_offset
+				+ ip_offset);
+
+			/** For encryption, update the pkt iv field, otherwise
+			 *  update the iv_aad_field
+			 **/
+			if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+				rte_memcpy(pkt_iv, data->iv_aad_data,
+					data->cipher_auth.cipher_iv_update_len);
+			else
+				rte_memcpy(data->iv_aad_data, pkt_iv,
+					data->cipher_auth.cipher_iv_update_len);
+		}
+
+		/** write iv */
+		rte_memcpy(iv, data->iv_aad_data,
+				data->cipher_auth.cipher_iv_len);
+	}
+
+	if (data->op_mask & CRYPTO_OP_MASK_AUTH) {
+		op->sym->auth.data.offset = rel_ip_offset;
+		op->sym->auth.data.length = payload_len;
+		op->sym->auth.digest.data = rte_pktmbuf_mtod_offset(mbuf,
+				uint8_t *, rte_pktmbuf_pkt_len(mbuf) -
+				data->digest_len);
+		op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
+				rte_pktmbuf_pkt_len(mbuf) - data->digest_len);
+
+		if (data->cipher_auth.auth_iv_update_len) {
+			uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
+					data->cipher_auth.auth_iv_data_offset
+					+ ip_offset);
+			uint8_t *data_iv = data->iv_aad_data +
+					data->cipher_auth.cipher_iv_len;
+
+			if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+				rte_memcpy(pkt_iv, data_iv,
+					data->cipher_auth.auth_iv_update_len);
+			else
+				rte_memcpy(data_iv, pkt_iv,
+					data->cipher_auth.auth_iv_update_len);
+		}
+
+		if (data->cipher_auth.auth_iv_len) {
+			/** prepare cipher op */
+			uint8_t *iv = crypto_op->iv_aad.cipher_auth.auth_iv;
+
+			rte_memcpy(iv, data->iv_aad_data +
+					data->cipher_auth.cipher_iv_len,
+					data->cipher_auth.auth_iv_len);
+		}
+	}
+
+	if (data->op_mask & CRYPTO_OP_MASK_AEAD) {
+		uint8_t *iv = crypto_op->iv_aad.aead_iv_aad.iv;
+		uint8_t *aad = crypto_op->iv_aad.aead_iv_aad.aad;
+
+		op->sym->aead.aad.data = aad;
+		op->sym->aead.aad.phys_addr = op->phys_addr +
+				CRYPTO_IV_OFFSET +
+				RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX;
+		op->sym->aead.digest.data = rte_pktmbuf_mtod_offset(mbuf,
+				uint8_t *, rte_pktmbuf_pkt_len(mbuf) -
+				data->digest_len);
+		op->sym->aead.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
+				rte_pktmbuf_pkt_len(mbuf) - data->digest_len);
+		op->sym->aead.data.offset = data->data_offset + rel_ip_offset;
+		op->sym->aead.data.length = payload_len;
+
+		if (data->aead.iv_update_len) {
+			uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
+					data->aead.iv_data_offset + ip_offset);
+			uint8_t *data_iv = data->iv_aad_data;
+
+			if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+				rte_memcpy(pkt_iv, data_iv,
+						data->aead.iv_update_len);
+			else
+				rte_memcpy(data_iv, pkt_iv,
+					data->aead.iv_update_len);
+		}
+
+		rte_memcpy(iv, data->iv_aad_data, data->aead.iv_len);
+
+
+		if (data->aead.aad_update_len) {
+			uint8_t *pkt_aad = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
+					data->aead.aad_data_offset + ip_offset);
+			uint8_t *data_aad = data->iv_aad_data +
+					data->aead.iv_len;
+
+			if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+				rte_memcpy(pkt_aad, data_aad,
+						data->aead.iv_update_len);
+			else
+				rte_memcpy(data_aad, pkt_aad,
+					data->aead.iv_update_len);
+		}
+
+		rte_memcpy(aad, data->iv_aad_data + data->aead.iv_len,
+					data->aead.aad_len);
+	}
+
+	return 0;
+}
+
 /**
  * Action profile
  */
@@ -1235,6 +1658,7 @@  action_valid(enum rte_table_action_type action)
 	case RTE_TABLE_ACTION_TTL:
 	case RTE_TABLE_ACTION_STATS:
 	case RTE_TABLE_ACTION_TIME:
+	case RTE_TABLE_ACTION_SYM_CRYPTO:
 		return 1;
 	default:
 		return 0;
@@ -1254,6 +1678,7 @@  struct ap_config {
 	struct rte_table_action_nat_config nat;
 	struct rte_table_action_ttl_config ttl;
 	struct rte_table_action_stats_config stats;
+	struct rte_table_action_sym_crypto_config crypto;
 };
 
 static size_t
@@ -1274,6 +1699,8 @@  action_cfg_size(enum rte_table_action_type action)
 		return sizeof(struct rte_table_action_ttl_config);
 	case RTE_TABLE_ACTION_STATS:
 		return sizeof(struct rte_table_action_stats_config);
+	case RTE_TABLE_ACTION_SYM_CRYPTO:
+		return sizeof(struct rte_table_action_sym_crypto_config);
 	default:
 		return 0;
 	}
@@ -1305,6 +1732,8 @@  action_cfg_get(struct ap_config *ap_config,
 	case RTE_TABLE_ACTION_STATS:
 		return &ap_config->stats;
 
+	case RTE_TABLE_ACTION_SYM_CRYPTO:
+		return &ap_config->crypto;
 	default:
 		return NULL;
 	}
@@ -1460,6 +1889,10 @@  rte_table_action_profile_action_register(struct rte_table_action_profile *profil
 		status = stats_cfg_check(action_config);
 		break;
 
+	case RTE_TABLE_ACTION_SYM_CRYPTO:
+		status = sym_crypto_cfg_check(action_config);
+		break;
+
 	default:
 		status = 0;
 		break;
@@ -1609,6 +2042,11 @@  rte_table_action_apply(struct rte_table_action *action,
 		return time_apply(action_data,
 			action_params);
 
+	case RTE_TABLE_ACTION_SYM_CRYPTO:
+		return sym_crypto_apply(action_data,
+				&action->cfg.crypto,
+				action_params);
+
 	default:
 		return -EINVAL;
 	}
@@ -1966,6 +2404,14 @@  pkt_work(struct rte_mbuf *mbuf,
 		pkt_work_time(data, time);
 	}
 
+	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
+		void *data = action_data_get(table_entry, action,
+				RTE_TABLE_ACTION_SYM_CRYPTO);
+
+		drop_mask |= pkt_work_sym_crypto(mbuf, data, &cfg->crypto,
+				ip_offset);
+	}
+
 	return drop_mask;
 }
 
@@ -2254,6 +2700,26 @@  pkt4_work(struct rte_mbuf **mbufs,
 		pkt_work_time(data3, time);
 	}
 
+	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
+		void *data0 = action_data_get(table_entry0, action,
+				RTE_TABLE_ACTION_SYM_CRYPTO);
+		void *data1 = action_data_get(table_entry1, action,
+				RTE_TABLE_ACTION_SYM_CRYPTO);
+		void *data2 = action_data_get(table_entry2, action,
+				RTE_TABLE_ACTION_SYM_CRYPTO);
+		void *data3 = action_data_get(table_entry3, action,
+				RTE_TABLE_ACTION_SYM_CRYPTO);
+
+		drop_mask0 |= pkt_work_sym_crypto(mbuf0, data0, &cfg->crypto,
+				ip_offset);
+		drop_mask1 |= pkt_work_sym_crypto(mbuf1, data1, &cfg->crypto,
+				ip_offset);
+		drop_mask2 |= pkt_work_sym_crypto(mbuf2, data2, &cfg->crypto,
+				ip_offset);
+		drop_mask3 |= pkt_work_sym_crypto(mbuf3, data3, &cfg->crypto,
+				ip_offset);
+	}
+
 	return drop_mask0 |
 		(drop_mask1 << 1) |
 		(drop_mask2 << 2) |
diff --git a/lib/librte_pipeline/rte_table_action.h b/lib/librte_pipeline/rte_table_action.h
index c7f751aae..897f42308 100644
--- a/lib/librte_pipeline/rte_table_action.h
+++ b/lib/librte_pipeline/rte_table_action.h
@@ -93,6 +93,9 @@  enum rte_table_action_type {
 
 	/** Timestamp. */
 	RTE_TABLE_ACTION_TIME,
+
+	/** Crypto. */
+	RTE_TABLE_ACTION_SYM_CRYPTO,
 };
 
 /** Common action configuration (per table action profile). */
@@ -898,6 +901,73 @@  rte_table_action_time_read(struct rte_table_action *action,
 	void *data,
 	uint64_t *timestamp);
 
+
+/**
+ * RTE_TABLE_ACTION_CRYPTO
+ */
+/** Symmetric crypto action configuration (per table action profile). */
+struct rte_table_action_sym_crypto_config {
+	/** Target Cryptodev ID. */
+	uint8_t cryptodev_id;
+
+	/** Crypto Session mempool. */
+	struct rte_mempool *mempool_session_create;
+
+	/** Crypto session init mempool, used for init cryptodev session. */
+	struct rte_mempool *mempool_session_init;
+
+	/** The offset to fetch rte_crypto_op. */
+	uint32_t op_offset;
+};
+
+#ifndef RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX
+#define RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX		(16)
+#endif
+
+#ifndef RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX
+#define RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX	(16)
+#endif
+
+/** Common action structure to store the data's value, length, and offset */
+struct rte_table_action_vlo {
+	uint8_t *val;
+	uint32_t length;
+	uint32_t offset;
+};
+
+/** Symmetric Crypto action parameters (per table rule). */
+struct rte_table_action_sym_crypto_params {
+
+	/** Xform pointer contains all relevant information */
+	struct rte_crypto_sym_xform *xform;
+
+	/**
+	 * Offset from start of IP packet to the first packet byte to be
+	 * processed by the crypto unit.
+	 **/
+	uint32_t data_offset;
+
+	union {
+		struct {
+			/** Cipher iv data, offset from start of ip header */
+			struct rte_table_action_vlo cipher_iv;
+
+			/** Auth iv data, offset from start of ip header */
+			struct rte_table_action_vlo auth_iv;
+
+		} cipher_auth;
+
+		struct {
+			/** AEAD AAD data, offset from start of ip header */
+			struct rte_table_action_vlo aad;
+
+			/** AEAD iv data, offset from start of ip header */
+			struct rte_table_action_vlo iv;
+
+		} aead;
+	};
+};
+
 #ifdef __cplusplus
 }
 #endif