[dpdk-dev] [PATCH v2 3/4] crypto/qat: enable support of Kasumi F8 in QAT cryptodev

Deepak Kumar Jain deepak.k.jain at intel.com
Thu Aug 25 15:23:38 CEST 2016


From: Deepak Kumar JAIN <deepak.k.jain at intel.com>

This patch enables the support of Kasumi F8
for Intel Quick Assist Technology.

Signed-off-by: Deepak Kumar Jain <deepak.k.jain at intel.com>
---
 doc/guides/cryptodevs/qat.rst                    |  5 +--
 drivers/crypto/qat/qat_adf/qat_algs.h            |  3 +-
 drivers/crypto/qat/qat_adf/qat_algs_build_desc.c | 44 +++++++++++++++++++++---
 drivers/crypto/qat/qat_crypto.c                  | 39 +++++++++++++++++++--
 4 files changed, 81 insertions(+), 10 deletions(-)

diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst
index 6b73d95..0502483 100644
--- a/doc/guides/cryptodevs/qat.rst
+++ b/doc/guides/cryptodevs/qat.rst
@@ -51,6 +51,7 @@ Cipher algorithms:
 * ``RTE_CRYPTO_SYM_CIPHER_SNOW3G_UEA2``
 * ``RTE_CRYPTO_CIPHER_AES_GCM``
 * ``RTE_CRYPTO_CIPHER_NULL``
+* ``RTE_CRYPTO_CIPHER_KASUMI_F8``
 
 Hash algorithms:
 
@@ -70,10 +71,10 @@ Limitations
 
 * Chained mbufs are not supported.
 * Hash only is not supported except Snow3G UIA2 and KASUMI F9.
-* Cipher only is not supported except Snow3G UEA2.
+* Cipher only is not supported except Snow3G UEA2 and KASUMI F8.
 * Only supports the session-oriented API implementation (session-less APIs are not supported).
 * Not performance tuned.
-* Snow3g(UEA2) supported only if cipher length, cipher offset fields are byte-aligned.
+* Snow3g(UEA2) and KAUSMI(F8) supported only if cipher length, cipher offset fields are byte-aligned.
 * Snow3g(UIA2) and KASUMI(F9) supported only if hash length, hash offset fields are byte-aligned.
 * No BSD support as BSD QAT kernel driver not available.
 
diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h
index 0cc176f..fad8471 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs.h
+++ b/drivers/crypto/qat/qat_adf/qat_algs.h
@@ -57,6 +57,7 @@
  */
 #define KASUMI_F9_KEY_MODIFIER_4_BYTES   0xAAAAAAAA
 
+#define KASUMI_F8_KEY_MODIFIER_4_BYTES   0x55555555
 
 #define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
 	ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
@@ -137,5 +138,5 @@ void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cd,
 
 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
 int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-
+int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
 #endif
diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
index 085a652..9d1df56 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
+++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
@@ -457,7 +457,8 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
 	uint32_t total_key_size;
 	uint16_t proto = ICP_QAT_FW_LA_NO_PROTO;	/* no CCM/GCM/Snow3G */
 	uint16_t cipher_offset, cd_size;
-
+	uint32_t wordIndex  = 0;
+	uint32_t *temp_key = NULL;
 	PMD_INIT_FUNC_TRACE();
 
 	if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
@@ -507,6 +508,11 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
 		cipher_cd_ctrl->cipher_state_sz =
 			ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
 		proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
+	} else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
+		total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
+		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
+		cipher_cd_ctrl->cipher_padding_sz =
+					(2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
 	} else {
 		total_key_size = cipherkeylen;
 		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
@@ -524,9 +530,27 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
 	    ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
 					cdesc->qat_cipher_alg, key_convert,
 					cdesc->qat_dir);
-	memcpy(cipher->aes.key, cipherkey, cipherkeylen);
-	cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
-			cipherkeylen;
+
+	if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
+		temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
+					sizeof(struct icp_qat_hw_cipher_config)
+					+ cipherkeylen);
+		memcpy(cipher->aes.key, cipherkey, cipherkeylen);
+		memcpy(temp_key, cipherkey, cipherkeylen);
+
+		/* XOR Key with KASUMI F8 key modifier at 4 bytes level */
+		for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
+								wordIndex++)
+			temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
+
+		cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
+					cipherkeylen + cipherkeylen;
+	} else {
+		memcpy(cipher->aes.key, cipherkey, cipherkeylen);
+		cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
+					cipherkeylen;
+	}
+
 	if (total_key_size > cipherkeylen) {
 		uint32_t padding_size =  total_key_size-cipherkeylen;
 
@@ -859,3 +883,15 @@ int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
 	}
 	return 0;
 }
+
+int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+	switch (key_len) {
+	case ICP_QAT_HW_KASUMI_KEY_SZ:
+		*alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 1de95f1..1282312 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -324,6 +324,31 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
 			}, }
 		}, }
 	},
+	{	/* SNOW3G (UIA2) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_KASUMI_F9,
+				.block_size = 8,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 4,
+					.max = 4,
+					.increment = 0
+				},
+				.aad_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
@@ -449,11 +474,18 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
 	case RTE_CRYPTO_CIPHER_NULL:
 		session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
 		break;
+	case RTE_CRYPTO_CIPHER_KASUMI_F8:
+		if (qat_alg_validate_kasumi_key(cipher_xform->key.length,
+					&session->qat_cipher_alg) != 0) {
+			PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size");
+			goto error_out;
+		}
+		session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
+		break;
 	case RTE_CRYPTO_CIPHER_3DES_ECB:
 	case RTE_CRYPTO_CIPHER_3DES_CBC:
 	case RTE_CRYPTO_CIPHER_AES_ECB:
 	case RTE_CRYPTO_CIPHER_AES_CCM:
-	case RTE_CRYPTO_CIPHER_KASUMI_F8:
 		PMD_DRV_LOG(ERR, "Crypto: Unsupported Cipher alg %u",
 				cipher_xform->algo);
 		goto error_out;
@@ -796,11 +828,12 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
 
 	cipher_param->cipher_length = op->sym->cipher.data.length;
 	cipher_param->cipher_offset = op->sym->cipher.data.offset;
-	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
 		if (unlikely((cipher_param->cipher_length % BYTE_LENGTH != 0) ||
 				(cipher_param->cipher_offset
 					% BYTE_LENGTH != 0))) {
-			PMD_DRV_LOG(ERR, " For Snow3g, QAT PMD only "
+			PMD_DRV_LOG(ERR, " For Snow3g/Kasumi, QAT PMD only "
 				"supports byte aligned values");
 			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
 			return -EINVAL;
-- 
2.5.5



More information about the dev mailing list