[v2,1/2] common/qat: add sgl header

Message ID 1532351135-10064-1-git-send-email-fiona.trahe@intel.com (mailing list archive)
State Accepted, archived
Delegated to: Pablo de Lara Guarch
Headers
Series [v2,1/2] common/qat: add sgl header |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Fiona Trahe July 23, 2018, 1:05 p.m. UTC
  This patch refactors the sgl struct so it includes a flexible
array of flat buffers as sym and compress PMDs can have
different size sgls.

Signed-off-by: Tomasz Jozwiak <tomaszx.jozwiak@intel.com>
Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
---
v2 changes:
 - changed max_segs from int32_t to uint16_t to match max allowed by mbuf
 - declared loop index outside for statement to work for all compilers

 drivers/common/qat/qat_common.c | 57 +++++++++++++++++++++++++++++++----------
 drivers/common/qat/qat_common.h | 23 +++++++++--------
 drivers/crypto/qat/qat_sym.c    | 12 +++++----
 drivers/crypto/qat/qat_sym.h    | 14 ++++++++--
 4 files changed, 75 insertions(+), 31 deletions(-)
  

Comments

De Lara Guarch, Pablo July 23, 2018, 6:10 p.m. UTC | #1
> -----Original Message-----
> From: Trahe, Fiona
> Sent: Monday, July 23, 2018 2:06 PM
> To: dev@dpdk.org
> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; Trahe, Fiona
> <fiona.trahe@intel.com>; Jozwiak, TomaszX <tomaszx.jozwiak@intel.com>
> Subject: [PATCH v2 1/2] common/qat: add sgl header
> 
> This patch refactors the sgl struct so it includes a flexible array of flat buffers as
> sym and compress PMDs can have different size sgls.
> 
> Signed-off-by: Tomasz Jozwiak <tomaszx.jozwiak@intel.com>
> Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>

Applied to dpdk-next-crypto.
Thanks,

Pablo
  

Patch

diff --git a/drivers/common/qat/qat_common.c b/drivers/common/qat/qat_common.c
index c206d3b..81a99c1 100644
--- a/drivers/common/qat/qat_common.c
+++ b/drivers/common/qat/qat_common.c
@@ -8,40 +8,53 @@ 
 
 int
 qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start,
-		struct qat_sgl *list, uint32_t data_len)
+		void *list_in, uint32_t data_len,
+		const uint16_t max_segs)
 {
 	int nr = 1;
-
-	uint32_t buf_len = rte_pktmbuf_iova(buf) -
-			buf_start + rte_pktmbuf_data_len(buf);
+	struct qat_sgl *list = (struct qat_sgl *)list_in;
+	/* buf_start allows the first buffer to start at an address before or
+	 * after the mbuf data start. It's used to either optimally align the
+	 * dma to 64 or to start dma from an offset.
+	 */
+	uint32_t buf_len;
+	uint32_t first_buf_len = rte_pktmbuf_data_len(buf) +
+			(rte_pktmbuf_mtophys(buf) - buf_start);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	uint8_t *virt_addr[max_segs];
+	virt_addr[0] = rte_pktmbuf_mtod(buf, uint8_t*) +
+			(rte_pktmbuf_mtophys(buf) - buf_start);
+#endif
 
 	list->buffers[0].addr = buf_start;
 	list->buffers[0].resrvd = 0;
-	list->buffers[0].len = buf_len;
+	list->buffers[0].len = first_buf_len;
 
-	if (data_len <= buf_len) {
+	if (data_len <= first_buf_len) {
 		list->num_bufs = nr;
 		list->buffers[0].len = data_len;
-		return 0;
+		goto sgl_end;
 	}
 
 	buf = buf->next;
+	buf_len = first_buf_len;
 	while (buf) {
-		if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
-			QAT_LOG(ERR,
-				"QAT PMD exceeded size of QAT SGL entry(%u)",
-					QAT_SGL_MAX_NUMBER);
+		if (unlikely(nr == max_segs)) {
+			QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)",
+					max_segs);
 			return -EINVAL;
 		}
 
 		list->buffers[nr].len = rte_pktmbuf_data_len(buf);
 		list->buffers[nr].resrvd = 0;
-		list->buffers[nr].addr = rte_pktmbuf_iova(buf);
-
+		list->buffers[nr].addr = rte_pktmbuf_mtophys(buf);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		virt_addr[nr] = rte_pktmbuf_mtod(buf, uint8_t*);
+#endif
 		buf_len += list->buffers[nr].len;
 		buf = buf->next;
 
-		if (buf_len > data_len) {
+		if (buf_len >= data_len) {
 			list->buffers[nr].len -=
 				buf_len - data_len;
 			buf = NULL;
@@ -50,6 +63,22 @@  qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start,
 	}
 	list->num_bufs = nr;
 
+sgl_end:
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	{
+		uint16_t i;
+		QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+		for (i = 0; i < list->num_bufs; i++) {
+			QAT_DP_LOG(INFO,
+				"QAT SGL buf %d, len = %d, iova = 0x%012lx",
+				i, list->buffers[i].len,
+				list->buffers[i].addr);
+			QAT_DP_HEXDUMP_LOG(DEBUG, "qat SGL",
+					virt_addr[i], list->buffers[i].len);
+		}
+	}
+#endif
+
 	return 0;
 }
 
diff --git a/drivers/common/qat/qat_common.h b/drivers/common/qat/qat_common.h
index db85d54..b26aa26 100644
--- a/drivers/common/qat/qat_common.h
+++ b/drivers/common/qat/qat_common.h
@@ -10,11 +10,6 @@ 
 
 /**< Intel(R) QAT device name for PCI registration */
 #define QAT_PCI_NAME	qat
-/*
- * Maximum number of SGL entries
- */
-#define QAT_SGL_MAX_NUMBER	16
-
 #define QAT_64_BTYE_ALIGN_MASK (~0x3f)
 
 /* Intel(R) QuickAssist Technology device generation is enumerated
@@ -31,6 +26,7 @@  enum qat_service_type {
 	QAT_SERVICE_COMPRESSION,
 	QAT_SERVICE_INVALID
 };
+
 #define QAT_MAX_SERVICES		(QAT_SERVICE_INVALID)
 
 /**< Common struct for scatter-gather list operations */
@@ -40,11 +36,17 @@  struct qat_flat_buf {
 	uint64_t addr;
 } __rte_packed;
 
+#define qat_sgl_hdr  struct { \
+	uint64_t resrvd; \
+	uint32_t num_bufs; \
+	uint32_t num_mapped_bufs; \
+}
+
+__extension__
 struct qat_sgl {
-	uint64_t resrvd;
-	uint32_t num_bufs;
-	uint32_t num_mapped_bufs;
-	struct qat_flat_buf buffers[QAT_SGL_MAX_NUMBER];
+	qat_sgl_hdr;
+	/* flexible array of flat buffers*/
+	struct qat_flat_buf buffers[0];
 } __rte_packed __rte_cache_aligned;
 
 /** Common, i.e. not service-specific, statistics */
@@ -64,7 +66,8 @@  struct qat_pci_device;
 
 int
 qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start,
-		struct qat_sgl *list, uint32_t data_len);
+		void *list_in, uint32_t data_len,
+		const uint16_t max_segs);
 void
 qat_stats_get(struct qat_pci_device *dev,
 		struct qat_common_stats *stats,
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 4ed7d95..8273968 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -495,8 +495,9 @@  qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
 				QAT_COMN_PTR_TYPE_SGL);
 		ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
-				&cookie->qat_sgl_src,
-				qat_req->comn_mid.src_length);
+					&cookie->qat_sgl_src,
+					qat_req->comn_mid.src_length,
+					QAT_SYM_SGL_MAX_NUMBER);
 
 		if (unlikely(ret)) {
 			QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
@@ -509,9 +510,10 @@  qat_sym_build_request(void *in_op, uint8_t *out_msg,
 				cookie->qat_sgl_src_phys_addr;
 		else {
 			ret = qat_sgl_fill_array(op->sym->m_dst,
-					dst_buf_start,
-					&cookie->qat_sgl_dst,
-						qat_req->comn_mid.dst_length);
+						 dst_buf_start,
+						 &cookie->qat_sgl_dst,
+						 qat_req->comn_mid.dst_length,
+						 QAT_SYM_SGL_MAX_NUMBER);
 
 			if (unlikely(ret)) {
 				QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index e4e1ae8..bc6426c 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -21,11 +21,21 @@ 
  */
 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
 
+/*
+ * Maximum number of SGL entries
+ */
+#define QAT_SYM_SGL_MAX_NUMBER	16
+
 struct qat_sym_session;
 
+struct qat_sym_sgl {
+	qat_sgl_hdr;
+	struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER];
+} __rte_packed __rte_cache_aligned;
+
 struct qat_sym_op_cookie {
-	struct qat_sgl qat_sgl_src;
-	struct qat_sgl qat_sgl_dst;
+	struct qat_sym_sgl qat_sgl_src;
+	struct qat_sym_sgl qat_sgl_dst;
 	phys_addr_t qat_sgl_src_phys_addr;
 	phys_addr_t qat_sgl_dst_phys_addr;
 };