[dpdk-dev] [PATCH 2/3] crypto/qat: add SGL capability to Intel QuickAssist driver

Arek Kusztal arkadiuszx.kusztal at intel.com
Wed Nov 30 16:52:14 CET 2016


This commit adds scatter-gather list capability to Intel QuickAssist
Technology driver.

Signed-off-by: Arek Kusztal <arkadiuszx.kusztal at intel.com>
---
 doc/guides/rel_notes/release_17_02.rst |   5 ++
 drivers/crypto/qat/qat_crypto.c        | 145 ++++++++++++++++++++++++++++++---
 drivers/crypto/qat/qat_crypto.h        |   6 ++
 drivers/crypto/qat/qat_qp.c            |  28 +++++++
 4 files changed, 173 insertions(+), 11 deletions(-)

diff --git a/doc/guides/rel_notes/release_17_02.rst b/doc/guides/rel_notes/release_17_02.rst
index 3b65038..873333b 100644
--- a/doc/guides/rel_notes/release_17_02.rst
+++ b/doc/guides/rel_notes/release_17_02.rst
@@ -38,6 +38,11 @@ New Features
      Also, make sure to start the actual text at the margin.
      =========================================================
 
+* **Updated the QAT PMD.**
+
+  The QAT PMD was updated with additional support for:
+
+  * Scatter-gather list (SGL) support.
 
 Resolved Issues
 ---------------
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index afce4ac..fa3c277 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -67,6 +67,13 @@
 
 #define BYTE_LENGTH    8
 
+#define SGL_2ND_COOKIE_OFF		(QAT_SGL_MAX_NUMBER \
+				* sizeof(struct qat_alg_buf) \
+				+ sizeof(struct qat_alg_buf_list))
+
+#define SGL_SECOND_COOKIE_ADDR(arg, cast)	((cast)(arg) \
+				+ SGL_2ND_COOKIE_OFF)
+
 static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
 	{	/* SHA1 HMAC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
@@ -503,7 +510,8 @@ static inline uint32_t
 adf_modulo(uint32_t data, uint32_t shift);
 
 static inline int
-qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg);
+qat_write_hw_desc_entry(struct rte_crypto_op *op,
+		uint8_t *out_msg, struct qat_qp *qp);
 
 void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
 		void *session)
@@ -839,7 +847,6 @@ unsigned qat_crypto_sym_get_session_private_size(
 	return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
 }
 
-
 uint16_t
 qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
@@ -873,9 +880,16 @@ qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
 	}
 
 	while (nb_ops_sent != nb_ops_possible) {
-		ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail);
+		ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail,
+				tmp_qp);
 		if (ret != 0) {
 			tmp_qp->stats.enqueue_err_count++;
+			/*
+			 * This message cannot be enqueued,
+			 * decrease number of ops that wasnt sent
+			 */
+			rte_atomic16_sub(&tmp_qp->inflights16,
+					nb_ops_possible - nb_ops_sent);
 			if (nb_ops_sent == 0)
 				return 0;
 			goto kick_tail;
@@ -884,6 +898,7 @@ qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
 		tail = adf_modulo(tail + queue->msg_size, queue->modulo);
 		nb_ops_sent++;
 		cur_op++;
+
 	}
 kick_tail:
 	WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
@@ -914,7 +929,7 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
 
 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
 		rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
-				sizeof(struct icp_qat_fw_comn_resp));
+			sizeof(struct icp_qat_fw_comn_resp));
 #endif
 		if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
 				ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
@@ -923,6 +938,15 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
 		} else {
 			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
 		}
+		/*
+		 * _priv set for symmetric means it is SGL
+		 * free this address from qp mempool of SGL's
+		 */
+		if (rx_op->_priv) {
+			rte_mempool_put(tmp_qp->sgl_pool, rx_op->_priv);
+			rx_op->_priv = NULL;
+		}
+
 		*(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
 		queue->head = adf_modulo(queue->head +
 				queue->msg_size,
@@ -945,8 +969,51 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
 }
 
 static inline int
-qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
+qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
+		void *sgl_cookie, int32_t total_pck_len)
 {
+	int nr = 0;
+	struct qat_alg_buf_list *list = sgl_cookie;
+
+	int32_t total_len = rte_pktmbuf_mtophys(buf)
+					- buff_start;
+
+	uint32_t first_buf_len = rte_pktmbuf_mtophys(buf) -
+			buff_start + rte_pktmbuf_data_len(buf);
+
+	while (buf) {
+		if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
+			PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL"
+					" entry(%u)",
+					QAT_SGL_MAX_NUMBER);
+			return -EINVAL;
+		}
+
+		list->bufers[nr].len = rte_pktmbuf_data_len(buf);
+		list->bufers[nr].resrvd = 0;
+		list->bufers[nr].addr = rte_pktmbuf_mtophys(buf);
+
+		++nr;
+		buf = buf->next;
+
+		total_len += list->bufers[nr].len;
+		if (total_len >= total_pck_len)
+			buf = NULL;
+	}
+
+	list->bufers[0].addr = buff_start;
+	list->bufers[0].len = first_buf_len;
+
+	list->num_bufs = nr;
+
+	return 0;
+}
+
+static inline int
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
+		struct qat_qp *qp)
+{
+	int ret = 0;
 	struct qat_session *ctx;
 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
 	struct icp_qat_fw_la_auth_req_params *auth_param;
@@ -956,6 +1023,9 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
 	uint32_t auth_len = 0, auth_ofs = 0;
 	uint32_t min_ofs = 0;
 	uint64_t src_buf_start = 0, dst_buf_start = 0;
+	void *cookie_virt_addr = NULL;
+	phys_addr_t cookie_phys_addr = 0;
+	uint8_t do_sgl = 0;
 
 
 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
@@ -1073,10 +1143,25 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
 
 	}
 
+	if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
+		do_sgl = 1;
+
+	if (do_sgl) {
+		if (rte_mempool_get(qp->sgl_pool, &cookie_virt_addr)) {
+			PMD_DRV_LOG(ERR, "QAT PMD Cannot get sgl_cookie");
+			return -EFAULT;
+		}
+		cookie_phys_addr = rte_mempool_virt2phy(qp->sgl_pool,
+				cookie_virt_addr);
+	}
+
 	/* adjust for chain case */
 	if (do_cipher && do_auth)
 		min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
 
+	if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
+		min_ofs = 0;
+
 	if (unlikely(op->sym->m_dst != NULL)) {
 		/* Out-of-place operation (OOP)
 		 * Don't align DMA start. DMA the minimum data-set
@@ -1086,6 +1171,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
 			rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs);
 		dst_buf_start =
 			rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs);
+
 	} else {
 		/* In-place operation
 		 * Start DMA at nearest aligned address below min_ofs
@@ -1131,8 +1217,49 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
 		(cipher_param->cipher_offset + cipher_param->cipher_length)
 		: (auth_param->auth_off + auth_param->auth_len);
 
-	qat_req->comn_mid.src_data_addr = src_buf_start;
-	qat_req->comn_mid.dest_data_addr = dst_buf_start;
+	if (do_sgl) {
+
+		op->_priv = cookie_virt_addr;
+
+		ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
+				QAT_COMN_PTR_TYPE_SGL);
+		ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
+				cookie_virt_addr, qat_req->comn_mid.src_length);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
+			rte_mempool_put(qp->sgl_pool, cookie_virt_addr);
+			return ret;
+		}
+
+		if (likely(op->sym->m_dst == NULL))
+			qat_req->comn_mid.dest_data_addr =
+				qat_req->comn_mid.src_data_addr =
+				cookie_phys_addr;
+		else {
+			ret = qat_sgl_fill_array(op->sym->m_dst,
+					dst_buf_start,
+				SGL_SECOND_COOKIE_ADDR(cookie_virt_addr,
+						uint8_t *),
+						qat_req->comn_mid.dst_length);
+
+			if (ret) {
+				PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
+				rte_mempool_put(qp->sgl_pool, cookie_virt_addr);
+				return ret;
+			}
+
+			qat_req->comn_mid.src_data_addr =
+				cookie_phys_addr;
+
+			qat_req->comn_mid.dest_data_addr =
+				SGL_SECOND_COOKIE_ADDR(cookie_phys_addr,
+						phys_addr_t);
+		}
+
+	} else {
+		qat_req->comn_mid.src_data_addr = src_buf_start;
+		qat_req->comn_mid.dest_data_addr = dst_buf_start;
+	}
 
 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
 			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
@@ -1164,13 +1291,9 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
 		}
 	}
 
-
 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
 	rte_hexdump(stdout, "qat_req:", qat_req,
 			sizeof(struct icp_qat_fw_la_bulk_req));
-	rte_hexdump(stdout, "src_data:",
-			rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
-			rte_pktmbuf_data_len(op->sym->m_src));
 	rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
 			op->sym->cipher.iv.length);
 	rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 6b84488..8612706 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -45,6 +45,11 @@
 	(((num) + (align) - 1) & ~((align) - 1))
 #define QAT_64_BTYE_ALIGN_MASK (~0x3f)
 
+/*
+ * Maximum number of SGL entries
+ */
+#define QAT_SGL_MAX_NUMBER	16
+
 /**
  * Structure associated with each queue.
  */
@@ -69,6 +74,7 @@ struct qat_qp {
 	struct	qat_queue	tx_q;
 	struct	qat_queue	rx_q;
 	struct	rte_cryptodev_stats stats;
+	struct rte_mempool *sgl_pool;
 } __rte_cache_aligned;
 
 /** private data structure for each QAT device */
diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c
index 2e7188b..27c5f9a 100644
--- a/drivers/crypto/qat/qat_qp.c
+++ b/drivers/crypto/qat/qat_qp.c
@@ -41,6 +41,7 @@
 
 #include "qat_logs.h"
 #include "qat_crypto.h"
+#include "qat_algs.h"
 #include "adf_transport_access_macros.h"
 
 #define ADF_MAX_SYM_DESC			4096
@@ -136,6 +137,7 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
 {
 	struct qat_qp *qp;
 	int ret;
+	char sgl_pool_name[RTE_RING_NAMESIZE];
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -191,9 +193,31 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
 		qat_queue_delete(&(qp->tx_q));
 		goto create_err;
 	}
+
 	adf_configure_queues(qp);
 	adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
+
+	snprintf(sgl_pool_name, RTE_RING_NAMESIZE, "%s_qp_sgl_%d_%hu",
+		dev->driver->pci_drv.driver.name, dev->data->dev_id,
+		queue_pair_id);
+
+	qp->sgl_pool = rte_mempool_lookup(sgl_pool_name);
+
+	if (qp->sgl_pool == NULL)
+		qp->sgl_pool = rte_mempool_create(sgl_pool_name,
+				qp->tx_q.max_inflights,
+				(sizeof(struct qat_alg_buf_list) +
+				sizeof(struct qat_alg_buf) *
+				QAT_SGL_MAX_NUMBER) * 2, 0, 0,
+				NULL, NULL, NULL, NULL, socket_id, 0);
+	if (qp->sgl_pool == NULL) {
+		PMD_DRV_LOG(ERR, "QAT PMD Cannot create"
+				" sgl mempool");
+		goto create_err;
+	}
+
 	dev->data->queue_pairs[queue_pair_id] = qp;
+
 	return 0;
 
 create_err:
@@ -221,6 +245,10 @@ int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
 	}
 
 	adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
+
+	if (qp->sgl_pool)
+		rte_mempool_free(qp->sgl_pool);
+
 	rte_free(qp);
 	dev->data->queue_pairs[queue_pair_id] = NULL;
 	return 0;
-- 
2.1.0



More information about the dev mailing list