@@ -513,6 +513,13 @@ CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER_DEBUG=n
CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
#
+# Compile PMD for IXGBE inline ipsec device
+#
+CONFIG_RTE_LIBRTE_PMD_IXGBE_CRYPTO=n
+CONFIG_RTE_LIBRTE_PMD_IXGBE_CRYPTO_DEBUG=n
+
+
+#
# Compile generic event device library
#
CONFIG_RTE_LIBRTE_EVENTDEV=y
@@ -55,5 +55,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
DEPDIRS-null = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec
DEPDIRS-dpaa2_sec = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_IXGBE_CRYPTO) += ixgbe
+DEPDIRS-ixgbe = $(core-libs)
include $(RTE_SDK)/mk/rte.subdir.mk
new file mode 100644
@@ -0,0 +1,63 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+
+# library name
+LIB = librte_pmd_ixgbe_crypto.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+CFLAGS += -I$(RTE_SDK)/drivers/net/ixgbe/
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_ixgbe_crypto_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_IXGBE_CRYPTO) += ixgbe_crypto_pmd_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_IXGBE_CRYPTO) += ixgbe_rte_cyptodev.c
+
+# export include files
+SYMLINK-y-include +=
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_IXGBE_CRYPTO) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_IXGBE_CRYPTO) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_IXGBE_CRYPTO) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_IXGBE_CRYPTO) += lib/librte_ring
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_IXGBE_CRYPTO) += lib/librte_cryptodev
+
+include $(RTE_SDK)/mk/rte.lib.mk
new file mode 100644
@@ -0,0 +1,576 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_ip.h>
+
+#include "../ixgbe/ixgbe_crypto_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities ixgbe_crypto_pmd_capabilities[] = {
+ { /* AES GCM (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 16
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 65535,
+ .increment = 1
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM (CIPHER) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 16
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* Inline IPSEC (CIPHER)*/
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_IPSEC,
+ }, }
+ },
+ { /* Inline IPSEC (AUTH)*/
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_IPSEC,
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+ixgbe_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+ixgbe_crypto_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+ixgbe_crypto_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+ixgbe_crypto_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Get device statistics */
+static void
+ixgbe_crypto_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct ixgbe_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+ixgbe_crypto_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct ixgbe_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+
+/** Get device info */
+static void
+ixgbe_crypto_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct ixgbe_crypto_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->dev_type = dev->dev_type;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = ixgbe_crypto_pmd_capabilities;
+ }
+}
+
+/** Release queue pair */
+static int
+ixgbe_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+ixgbe_crypto_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct ixgbe_crypto_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "inln_crypto_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+ixgbe_crypto_pmd_qp_create_processed_pkts_ring(struct ixgbe_crypto_qp *qp,
+ unsigned ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ IXGBE_CRYPTO_LOG_INFO(
+ "Reusing existing ring %s for processed packets",
+ qp->name);
+ return r;
+ }
+
+ IXGBE_CRYPTO_LOG_INFO(
+ "Unable to reuse existing ring %s for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+ixgbe_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct ixgbe_crypto_private *internals = dev->data->dev_private;
+ struct ixgbe_crypto_qp *qp;
+ int retval;
+
+ if (qp_id >= internals->max_nb_qpairs) {
+ IXGBE_CRYPTO_LOG_ERR("Invalid qp_id %u, greater than maximum "
+ "number of queue pairs supported (%u).",
+ qp_id, internals->max_nb_qpairs);
+ return (-EINVAL);
+ }
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ ixgbe_crypto_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("Null Crypto PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL) {
+ IXGBE_CRYPTO_LOG_ERR("Failed to allocate queue pair memory");
+ return (-ENOMEM);
+ }
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ retval = ixgbe_crypto_pmd_qp_set_unique_name(dev, qp);
+ if (retval) {
+ IXGBE_CRYPTO_LOG_ERR("Failed to create unique name for ixgbe inline "
+ "crypto device");
+ goto qp_setup_cleanup;
+ }
+
+ qp->processed_pkts = ixgbe_crypto_pmd_qp_create_processed_pkts_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_pkts == NULL) {
+ IXGBE_CRYPTO_LOG_ERR("Failed to create unique name for ixgbe inline "
+ "crypto device");
+ goto qp_setup_cleanup;
+ }
+
+ qp->sess_mp = dev->data->session_pool;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Start queue pair */
+static int
+ixgbe_crypto_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+ixgbe_crypto_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+ixgbe_crypto_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the inline crypto crypto session structure */
+static unsigned
+ixgbe_crypto_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct ixgbe_crypto_session);
+}
+
+/** Configure a null crypto session from a crypto xform chain */
+static void *
+ixgbe_crypto_pmd_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ int retval;
+
+ if (unlikely(sess == NULL)) {
+ IXGBE_CRYPTO_LOG_ERR("invalid session struct");
+ return NULL;
+ }
+ retval = ixgbe_crypto_set_session_parameters(
+ (struct ixgbe_crypto_session *)sess, xform);
+ if (retval != 0) {
+ IXGBE_CRYPTO_LOG_ERR("failed configure session parameters");
+ return NULL;
+ }
+
+ if (retval < 0) {
+ IXGBE_CRYPTO_LOG_ERR("failed to add crypto session");
+ return NULL;
+ }
+
+ retval = crypto_ixgbe_add_sa(dev, sess);
+ ((struct ixgbe_crypto_session *)sess)->sa_index = retval;
+
+ return sess;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+ixgbe_crypto_pmd_session_clear(struct rte_cryptodev *dev __rte_unused,
+ void *sess)
+{
+ if (sess)
+ memset(sess, 0, sizeof(struct ixgbe_crypto_session));
+}
+
+
+/** verify and set session parameters */
+int
+ixgbe_crypto_set_session_parameters(
+ struct ixgbe_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *ipsec_xform;
+
+ if (xform->next == NULL || xform->next->next != NULL) {
+ IXGBE_CRYPTO_LOG_ERR("Two and only two chained xform required");
+ return -EINVAL;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_IPSEC) {
+ cipher_xform = xform;
+ ipsec_xform = xform->next;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_IPSEC) {
+ auth_xform = xform;
+ ipsec_xform = xform->next;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_IPSEC &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ ipsec_xform = xform;
+ cipher_xform = xform->next;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_IPSEC &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ ipsec_xform = xform;
+ auth_xform = xform->next;
+ } else {
+ IXGBE_CRYPTO_LOG_ERR("Cipher or auth xform and ipsec xform required");
+ return -EINVAL;
+ }
+
+
+ if ((cipher_xform && cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_GCM) ||
+ (auth_xform && auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GCM)) {
+ IXGBE_CRYPTO_LOG_ERR("Only AES GCM supported");
+ return -EINVAL;
+ }
+
+ /* Select Crypto operation */
+ if (ipsec_xform->ipsec.dir == RTE_CRYPTO_OUTBOUND)
+ sess->op = IXGBE_OP_AUTHENTICATED_ENCRYPTION;
+ else if (ipsec_xform->ipsec.dir == RTE_CRYPTO_INBOUND)
+ sess->op = IXGBE_OP_AUTHENTICATED_DECRYPTION;
+ else {
+ IXGBE_CRYPTO_LOG_ERR("Invalid operation");
+ return -EINVAL;
+ }
+
+ if (cipher_xform != NULL) {
+ sess->enc = 1;
+ sess->key = cipher_xform->cipher.key.data;
+ } else {
+ sess->enc = 0;
+ sess->key = auth_xform->auth.key.data;
+ }
+
+ sess->salt = ipsec_xform->ipsec.salt;
+ sess->dir = ipsec_xform->ipsec.dir;
+ sess->dst_ip = ipsec_xform->ipsec.dst_ip;
+ sess->src_ip = ipsec_xform->ipsec.src_ip;
+ sess->spi = ipsec_xform->ipsec.spi;
+
+ return 0;
+}
+
+
+
+/** Process crypto operation for mbuf */
+static int
+process_op(const struct ixgbe_crypto_qp *qp, struct rte_crypto_op *op,
+ struct ixgbe_crypto_session *sess)
+{
+
+ if (sess->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
+ if (op->sym->m_src->ol_flags & PKT_RX_IPSEC_INLINE_CRYPTO_AUTH_FAILED)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ else if (!(op->sym->m_src->ol_flags & PKT_RX_IPSEC_INLINE_CRYPTO))
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ else
+ {
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ /*FIXME why is the buffer coming 4 bytes short?*/
+ {
+ struct rte_mbuf *buff = op->sym->m_dst? op->sym->m_dst: op->sym->m_src;
+ struct ipv4_hdr *ip4 = rte_pktmbuf_mtod(buff, struct ipv4_hdr*);
+ uint16_t plen = rte_pktmbuf_pkt_len(buff);
+ uint16_t iplen;
+ if ((ip4->version_ihl & 0xf0) == 0x40) {
+ iplen = rte_be_to_cpu_16(ip4->total_length);
+
+ } else {
+ struct ipv6_hdr *ip6 = (struct ipv6_hdr*)ip4;
+ iplen = rte_be_to_cpu_16(ip6->payload_len) + sizeof(struct ipv6_hdr);
+ }
+ if (iplen > plen)
+ rte_pktmbuf_append(buff, (iplen - plen));
+ }
+ }
+
+ } else {
+ /* set status as successful by default */
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ struct rte_mbuf *buff = op->sym->m_dst? op->sym->m_dst: op->sym->m_src;
+ uint8_t *pad_len = rte_pktmbuf_mtod_offset(buff, uint8_t *,
+ rte_pktmbuf_pkt_len(buff) - 18);
+ buff->ol_flags |= PKT_TX_IPSEC_INLINE_CRYPTO;
+ buff->inline_ipsec.enc = sess->enc;
+ buff->inline_ipsec.sa_idx = sess->sa_index;
+ buff->inline_ipsec.pad_len = *pad_len;
+ }
+
+ /*
+ * if crypto session and operation are valid just enqueue the packet
+ * in the processed ring
+ */
+ return rte_ring_enqueue(qp->processed_pkts, (void *)op);
+}
+
+static struct ixgbe_crypto_session *
+get_session(struct ixgbe_crypto_qp *qp, struct rte_crypto_sym_op *op)
+{
+ struct ixgbe_crypto_session *sess;
+
+ if (op->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (unlikely(op->session == NULL ||
+ op->session->dev_type != RTE_CRYPTODEV_IXGBE_PMD))
+ return NULL;
+
+ sess = (struct ixgbe_crypto_session *)op->session->_private;
+ } else {
+ struct rte_cryptodev_session *c_sess = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
+ return NULL;
+
+ sess = (struct ixgbe_crypto_session *)c_sess->_private;
+
+ if (ixgbe_crypto_set_session_parameters(sess, op->xform) != 0)
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Enqueue burst */
+uint16_t
+ixgbe_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct ixgbe_crypto_session *sess;
+ struct ixgbe_crypto_qp *qp = queue_pair;
+
+ int i, retval;
+
+ for (i = 0; i < nb_ops; i++) {
+ sess = get_session(qp, ops[i]->sym);
+ if (unlikely(sess == NULL))
+ goto enqueue_err;
+
+ retval = process_op(qp, ops[i], sess);
+ if (unlikely(retval < 0))
+ goto enqueue_err;
+ }
+
+ qp->qp_stats.enqueued_count += i;
+ return i;
+
+enqueue_err:
+ if (ops[i])
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+
+ qp->qp_stats.enqueue_err_count++;
+ return i;
+}
+
+/** Dequeue burst */
+uint16_t
+ixgbe_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct ixgbe_crypto_qp *qp = queue_pair;
+
+ unsigned nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+struct rte_cryptodev_ops ixgbe_crypto_pmd_ops = {
+ .dev_configure = ixgbe_crypto_pmd_config,
+ .dev_start = ixgbe_crypto_pmd_start,
+ .dev_stop = ixgbe_crypto_pmd_stop,
+ .dev_close = ixgbe_crypto_pmd_close,
+
+ .stats_get = ixgbe_crypto_pmd_stats_get,
+ .stats_reset = ixgbe_crypto_pmd_stats_reset,
+
+ .dev_infos_get = ixgbe_crypto_pmd_info_get,
+
+ .queue_pair_setup = ixgbe_crypto_pmd_qp_setup,
+ .queue_pair_release = ixgbe_crypto_pmd_qp_release,
+ .queue_pair_start = ixgbe_crypto_pmd_qp_start,
+ .queue_pair_stop = ixgbe_crypto_pmd_qp_stop,
+ .queue_pair_count = ixgbe_crypto_pmd_qp_count,
+
+ .session_get_size = ixgbe_crypto_pmd_session_get_size,
+ .session_configure = ixgbe_crypto_pmd_session_configure,
+ .session_clear = ixgbe_crypto_pmd_session_clear
+};
new file mode 100644
@@ -0,0 +1,180 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IXGBE_CRYPTO_PMD_PRIVATE_H_
+#define _IXGBE_CRYPTO_PMD_PRIVATE_H_
+
+#include "rte_config.h"
+#include "rte_ethdev.h"
+#include "base/ixgbe_type.h"
+#include "base/ixgbe_api.h"
+#include "ixgbe_rxtx.h"
+#include "ixgbe_ethdev.h"
+
+#define IXGBE_CRYPTO_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_IXGBE_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_PMD_IXGBE_CRYPTO_DEBUG
+#define IXGBE_CRYPTO_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_IXGBE_PMD), \
+ __func__, __LINE__, ## args)
+
+#define IXGBE_CRYPTO_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_IXGBE_PMD), \
+ __func__, __LINE__, ## args)
+#else
+#define IXGBE_CRYPTO_LOG_INFO(fmt, args...)
+#define IXGBE_CRYPTO_LOG_DBG(fmt, args...)
+#endif
+
+
+#define IPSRXIDX_RX_EN 0x00000001
+#define IPSRXIDX_TABLE_IP 0x00000002
+#define IPSRXIDX_TABLE_SPI 0x00000004
+#define IPSRXIDX_TABLE_KEY 0x00000006
+#define IPSRXIDX_WRITE 0x80000000
+#define IPSRXIDX_READ 0x40000000
+#define IPSRXMOD_VALID 0x00000001
+#define IPSRXMOD_PROTO 0x00000004
+#define IPSRXMOD_DECRYPT 0x00000008
+#define IPSRXMOD_IPV6 0x00000010
+#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400
+#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000
+#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000
+#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000
+#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
+#define IXGBE_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000
+
+#define IPSEC_MAX_RX_IP_COUNT 128
+#define IPSEC_MAX_SA_COUNT 1024
+
+
+enum ixgbe_operation {
+ IXGBE_OP_AUTHENTICATED_ENCRYPTION,
+ IXGBE_OP_AUTHENTICATED_DECRYPTION
+};
+
+enum ixgbe_gcm_key {
+ IXGBE_GCM_KEY_128,
+ IXGBE_GCM_KEY_256
+};
+
+/** inline crypto crypto queue pair */
+struct ixgbe_crypto_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_pkts;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+} __rte_cache_aligned;
+
+
+/** inline crypto crypto private session structure */
+struct ixgbe_crypto_session {
+ enum ixgbe_operation op;
+ uint8_t enc;
+ enum rte_crypto_ipsec_direction dir;
+ uint8_t* key;
+ uint32_t salt;
+ uint32_t sa_index;
+ uint32_t spi;
+ struct rte_crypto_ipsec_addr src_ip;
+ struct rte_crypto_ipsec_addr dst_ip;
+
+ uint32_t reserved;
+} __rte_cache_aligned;
+
+struct ixgbe_crypto_rx_ip_table
+{
+ struct rte_crypto_ipsec_addr ip;
+ uint16_t ref_count;
+};
+struct ixgbe_crypto_rx_sa_table
+{
+ uint32_t spi;
+ uint32_t ip_index;
+ uint32_t key[4];
+ uint32_t salt;
+ uint8_t mode;
+ uint8_t used;
+};
+
+struct ixgbe_crypto_tx_sa_table
+{
+ uint32_t spi;
+ uint32_t key[4];
+ uint32_t salt;
+ uint8_t used;
+};
+
+
+/** private data structure for each inline crypto crypto device */
+struct ixgbe_crypto_private {
+ struct ixgbe_adapter ixgbe_private;
+ unsigned max_nb_qpairs; /**< Max number of queue pairs */
+ unsigned max_nb_sessions; /**< Max number of sessions */
+#define IS_INITIALIZED (1 << 0)
+ uint8_t flags;
+ struct ixgbe_crypto_rx_ip_table rx_ip_table[IPSEC_MAX_RX_IP_COUNT];
+ struct ixgbe_crypto_rx_sa_table rx_sa_table[IPSEC_MAX_SA_COUNT];
+ struct ixgbe_crypto_tx_sa_table tx_sa_table[IPSEC_MAX_SA_COUNT];
+};
+
+
+/** Set and validate inline crypto crypto session parameters */
+extern int
+ixgbe_crypto_set_session_parameters(struct ixgbe_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+extern uint16_t
+ixgbe_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+extern uint16_t
+ixgbe_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+int crypto_ixgbe_add_sa(struct rte_cryptodev *cryptodev, struct ixgbe_crypto_session *sess);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops ixgbe_crypto_pmd_ops;
+
+#endif /* _IXGBE_CRYPTO_PMD_PRIVATE_H_ */
new file mode 100644
@@ -0,0 +1,474 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+
+#include "../ixgbe/ixgbe_crypto_pmd_private.h"
+
+#define IXGBE_WAIT_RW(__reg,__rw) \
+{ \
+ IXGBE_WRITE_REG(hw, (__reg), reg); \
+ while ((IXGBE_READ_REG(hw, (__reg))) & (__rw)); \
+}
+#define IXGBE_WAIT_RREAD IXGBE_WAIT_RW(IXGBE_IPSRXIDX, IPSRXIDX_READ)
+#define IXGBE_WAIT_RWRITE IXGBE_WAIT_RW(IXGBE_IPSRXIDX, IPSRXIDX_WRITE)
+#define IXGBE_WAIT_TREAD IXGBE_WAIT_RW(IXGBE_IPSTXIDX, IPSRXIDX_READ)
+#define IXGBE_WAIT_TWRITE IXGBE_WAIT_RW(IXGBE_IPSTXIDX, IPSRXIDX_WRITE)
+
+#define CMP_IP(a, b) \
+ ((a).ipv6[0] == (b).ipv6[0] && (a).ipv6[1] == (b).ipv6[1] && \
+ (a).ipv6[2] == (b).ipv6[2] && (a).ipv6[3] == (b).ipv6[3])
+
+
+static void crypto_ixgbe_clear_ipsec_tables(struct rte_cryptodev *cryptodev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(cryptodev->data->dev_private);
+ int i = 0;
+
+ /* clear Rx IP table*/
+ for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+ uint16_t index = i << 3;
+ uint32_t reg = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | IPSRXIDX_TABLE_IP | index;
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), 0);
+ IXGBE_WAIT_RWRITE;
+ }
+
+ /* clear Rx SPI and Rx/Tx SA tables*/
+ for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+ uint32_t index = i << 3;
+ uint32_t reg = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | index;
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0);
+ IXGBE_WAIT_RWRITE;
+ reg = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | IPSRXIDX_TABLE_KEY | index;
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, 0);
+ IXGBE_WAIT_RWRITE;
+ reg = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | index;
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, 0);
+ IXGBE_WAIT_TWRITE;
+ }
+}
+
+static int crypto_ixgbe_enable_ipsec(struct rte_cryptodev *cryptodev)
+{
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cryptodev->device);
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(cryptodev->data->dev_private);
+ struct rte_eth_link link;
+ uint8_t port_id;
+ uint32_t reg;
+ int ret;
+ char pci_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ hw->device_id = pci_dev->id.device_id;
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+ ixgbe_set_mac_type(hw);
+ rte_pci_device_name(&(RTE_DEV_TO_PCI(cryptodev->device)->addr), pci_name, sizeof(pci_name));
+ ret = rte_eth_dev_get_port_by_name(pci_name, &port_id);
+
+ if (ret) {
+ IXGBE_CRYPTO_LOG_ERR("Error getting the port id");
+ return -1;
+ }
+ else {
+ IXGBE_CRYPTO_LOG_DBG("inline ipsec crypto device at %s port id %d",
+ pci_name, port_id);
+ }
+
+ /* Halt the data paths */
+ reg = IXGBE_SECTXCTRL_TX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
+ reg = IXGBE_SECRXCTRL_RX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
+
+ /* Wait for Tx path to empty */
+ do {
+ rte_eth_link_get_nowait(port_id, &link);
+ if (link.link_status != ETH_LINK_UP) {
+ /* Fix for HSD:4426139
+ If the Tx FIFO has data but no link, we can't clear the Tx Sec
+ block. So set MAC loopback before block clear*/
+ reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+ reg |= IXGBE_MACC_FLU;
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ reg |= IXGBE_HLREG0_LPBK;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
+ struct timespec time;
+ time.tv_sec = 0;
+ time.tv_nsec = 1000000 * 3;
+ nanosleep(&time, NULL);
+ }
+
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
+
+ rte_eth_link_get_nowait(port_id, &link);
+ if (link.link_status != ETH_LINK_UP) {
+ reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+ reg &= ~(IXGBE_MACC_FLU);
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ reg &= ~(IXGBE_HLREG0_LPBK);
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
+ }
+ } while (!(reg & IXGBE_SECTXSTAT_SECTX_RDY));
+
+ /* Wait for Rx path to empty*/
+ do
+ {
+ reg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
+ }
+ while (!(reg & IXGBE_SECRXSTAT_SECRX_RDY));
+
+ /* Set IXGBE_SECTXBUFFAF to 0x15 as required in the datasheet*/
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x15);
+
+ /* IFG needs to be set to 3 when we are using security. Otherwise a Tx
+ hang will occur with heavy traffic.*/
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ reg = (reg & 0xFFFFFFF0) | 0x3;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
+
+ /* Enable the Tx crypto engine and restart the Tx data path;
+ set the STORE_FORWARD bit for IPSec.*/
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD);
+
+ /* Enable the Rx crypto engine and restart the Rx data path*/
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
+
+ /* Test if crypto was enabled */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ if (reg != IXGBE_SECTXCTRL_STORE_FORWARD)
+ {
+ IXGBE_CRYPTO_LOG_ERR("Error enabling Tx Crypto");
+ return -1;
+ }
+ reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ if (reg != 0)
+ {
+ IXGBE_CRYPTO_LOG_ERR("Error enabling Rx Crypto");
+ return -1;
+ }
+
+ crypto_ixgbe_clear_ipsec_tables(cryptodev);
+ return 0;
+}
+
+
+int crypto_ixgbe_add_sa(struct rte_cryptodev *cryptodev, struct ixgbe_crypto_session *sess)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(cryptodev->data->dev_private);
+ struct ixgbe_crypto_private *internals = cryptodev->data->dev_private;
+ char pci_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ uint32_t reg;
+ int sa_index = -1;
+
+ rte_pci_device_name(&(RTE_DEV_TO_PCI(cryptodev->device)->addr), pci_name, sizeof(pci_name));
+
+ if (!(internals->flags & IS_INITIALIZED)) {
+ if (crypto_ixgbe_enable_ipsec(cryptodev) == 0)
+ internals->flags |= IS_INITIALIZED;
+ }
+
+ if (sess->dir == RTE_CRYPTO_INBOUND) {
+ int i, ip_index = -1;
+
+ /* Find a match in the IP table*/
+ for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+ if (CMP_IP(internals->rx_ip_table[i].ip, sess->dst_ip)) {
+ ip_index = i;
+ break;
+ }
+ }
+ /* If no match, find a free entry in the IP table*/
+ if (ip_index < 0) {
+ for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+ if (internals->rx_ip_table[i].ref_count == 0) {
+ ip_index = i;
+ break;
+ }
+ }
+ }
+
+ /* Fail if no match and no free entries*/
+ if (ip_index < 0) {
+ IXGBE_CRYPTO_LOG_ERR("%s no free entry left in the Rx IP table\n", pci_name);
+ return -1;
+ }
+
+ /* Find a free entry in the SA table*/
+ for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+ if (internals->rx_sa_table[i].used == 0) {
+ sa_index = i;
+ break;
+ }
+ }
+ /* Fail if no free entries*/
+ if (sa_index < 0) {
+ IXGBE_CRYPTO_LOG_ERR("%s no free entry left in the Rx SA table\n", pci_name);
+ return -1;
+ }
+
+ internals->rx_ip_table[ip_index].ip.ipv6[0] = rte_cpu_to_be_32(sess->dst_ip.ipv6[0]);
+ internals->rx_ip_table[ip_index].ip.ipv6[1] = rte_cpu_to_be_32(sess->dst_ip.ipv6[1]);
+ internals->rx_ip_table[ip_index].ip.ipv6[2] = rte_cpu_to_be_32(sess->dst_ip.ipv6[2]);
+ internals->rx_ip_table[ip_index].ip.ipv6[3] = rte_cpu_to_be_32(sess->dst_ip.ipv6[3]);
+ internals->rx_ip_table[ip_index].ref_count++;
+
+ internals->rx_sa_table[sa_index].spi = rte_cpu_to_be_32(sess->spi);
+ internals->rx_sa_table[sa_index].ip_index = ip_index;
+ internals->rx_sa_table[sa_index].key[3] = rte_cpu_to_be_32(*(uint32_t*)&sess->key[0]);
+ internals->rx_sa_table[sa_index].key[2] = rte_cpu_to_be_32(*(uint32_t*)&sess->key[4]);
+ internals->rx_sa_table[sa_index].key[1] = rte_cpu_to_be_32(*(uint32_t*)&sess->key[8]);
+ internals->rx_sa_table[sa_index].key[0] = rte_cpu_to_be_32(*(uint32_t*)&sess->key[12]);
+ internals->rx_sa_table[sa_index].salt = rte_cpu_to_be_32(sess->salt);
+ internals->rx_sa_table[sa_index].mode = IPSRXMOD_VALID;
+ if (sess->enc)
+ internals->rx_sa_table[sa_index].mode |= (IPSRXMOD_PROTO | IPSRXMOD_DECRYPT);
+ if (sess->dst_ip.type == IPV6_ADDRESS)
+ internals->rx_sa_table[sa_index].mode |= IPSRXMOD_IPV6;
+ internals->rx_sa_table[sa_index].used = 1;
+
+
+ /* write IP table entry*/
+ reg = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | IPSRXIDX_TABLE_IP | (ip_index << 3);
+ if (internals->rx_ip_table[ip_index].ip.type == IPV4_ADDRESS) {
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), internals->rx_ip_table[ip_index].ip.ipv4);
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), internals->rx_ip_table[ip_index].ip.ipv6[0]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), internals->rx_ip_table[ip_index].ip.ipv6[1]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), internals->rx_ip_table[ip_index].ip.ipv6[2]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), internals->rx_ip_table[ip_index].ip.ipv6[3]);
+ }
+ IXGBE_WAIT_RWRITE;
+
+ /* write SPI table entry*/
+ reg = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | (sa_index << 3);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, internals->rx_sa_table[sa_index].spi);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, internals->rx_sa_table[sa_index].ip_index);
+ IXGBE_WAIT_RWRITE;
+
+ /* write Key table entry*/
+ reg = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | IPSRXIDX_TABLE_KEY | (sa_index << 3);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), internals->rx_sa_table[sa_index].key[0]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), internals->rx_sa_table[sa_index].key[1]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), internals->rx_sa_table[sa_index].key[2]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), internals->rx_sa_table[sa_index].key[3]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, internals->rx_sa_table[sa_index].salt);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, internals->rx_sa_table[sa_index].mode);
+ IXGBE_WAIT_RWRITE;
+
+ } else { /* sess->dir == RTE_CRYPTO_OUTBOUND */
+ int i;
+
+ /* Find a free entry in the SA table*/
+ for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+ if (internals->tx_sa_table[i].used == 0) {
+ sa_index = i;
+ break;
+ }
+ }
+ /* Fail if no free entries*/
+ if (sa_index < 0) {
+ IXGBE_CRYPTO_LOG_ERR("%s no free entry left in the Tx SA table\n", pci_name);
+ return -1;
+ }
+
+ internals->tx_sa_table[sa_index].spi = rte_cpu_to_be_32(sess->spi);
+ internals->tx_sa_table[sa_index].key[3] = rte_cpu_to_be_32(*(uint32_t*)&sess->key[0]);
+ internals->tx_sa_table[sa_index].key[2] = rte_cpu_to_be_32(*(uint32_t*)&sess->key[4]);
+ internals->tx_sa_table[sa_index].key[1] = rte_cpu_to_be_32(*(uint32_t*)&sess->key[8]);
+ internals->tx_sa_table[sa_index].key[0] = rte_cpu_to_be_32(*(uint32_t*)&sess->key[12]);
+ internals->tx_sa_table[sa_index].salt = rte_cpu_to_be_32(sess->salt);
+
+ reg = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | (sa_index << 3);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), internals->tx_sa_table[sa_index].key[0]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), internals->tx_sa_table[sa_index].key[1]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), internals->tx_sa_table[sa_index].key[2]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), internals->tx_sa_table[sa_index].key[3]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, internals->tx_sa_table[sa_index].salt);
+ IXGBE_WAIT_TWRITE;
+
+ }
+
+ return sa_index;
+}
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_ixgbe_crypto_map[] = {
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_SFP) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_RNDC) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_560FLR) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_ECNA_DP) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) },
+#ifdef RTE_NIC_BYPASS
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) },
+#endif
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+
+static int
+crypto_ixgbe_dev_init(__rte_unused struct rte_cryptodev_driver *crypto_drv,
+ struct rte_cryptodev *cryptodev)
+{
+ struct ixgbe_crypto_private *internals;
+ char pci_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ PMD_INIT_FUNC_TRACE();
+
+ rte_pci_device_name(&(RTE_DEV_TO_PCI(cryptodev->device)->addr), pci_name, sizeof(pci_name));
+ IXGBE_CRYPTO_LOG_DBG("Found crypto device at %s", pci_name);
+
+ cryptodev->dev_type = RTE_CRYPTODEV_IXGBE_PMD;
+ cryptodev->dev_ops = &ixgbe_crypto_pmd_ops;
+
+ cryptodev->enqueue_burst = ixgbe_crypto_pmd_enqueue_burst;
+ cryptodev->dequeue_burst = ixgbe_crypto_pmd_dequeue_burst;
+
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
+
+ internals = cryptodev->data->dev_private;
+ internals->max_nb_sessions = RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS;
+ internals->max_nb_qpairs = RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS;
+ internals->flags = 0;
+ memset(internals->rx_ip_table, 0, sizeof(internals->rx_ip_table));
+ memset(internals->rx_sa_table, 0, sizeof(internals->rx_sa_table));
+ memset(internals->tx_sa_table, 0, sizeof(internals->tx_sa_table));
+
+
+ /*
+ * For secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ IXGBE_CRYPTO_LOG_DBG("Device already initialised by primary process");
+ return 0;
+ }
+
+ return 0;
+}
+
+
+static struct rte_cryptodev_driver cryptodev_ixgbe_pmd_drv = {
+ .pci_drv = {
+ .id_table = pci_id_ixgbe_crypto_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = rte_cryptodev_pci_probe,
+ .remove = rte_cryptodev_pci_remove,
+ },
+ .cryptodev_init = crypto_ixgbe_dev_init,
+ .dev_private_size = sizeof(struct ixgbe_crypto_private),
+};
+
+RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_IXGBE_PMD, cryptodev_ixgbe_pmd_drv.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(CRYPTODEV_NAME_IXGBE_PMD, pci_id_ixgbe_crypto_map);
+
new file mode 100644
@@ -0,0 +1,3 @@
+DPDK_16.04 {
+ local: *;
+};
@@ -428,61 +428,61 @@ static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
* The set of PCI devices this driver supports
*/
static const struct rte_pci_id pci_id_ixgbe_map[] = {
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_SFP) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_RNDC) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_560FLR) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_ECNA_DP) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_SFP) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_RNDC) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_560FLR) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_ECNA_DP) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) },
#ifdef RTE_NIC_BYPASS
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) },
#endif
{ .vendor_id = 0, /* sentinel */ },
};
@@ -491,16 +491,16 @@ static const struct rte_pci_id pci_id_ixgbe_map[] = {
* The set of PCI devices this driver supports (for 82599 VF)
*/
static const struct rte_pci_id pci_id_ixgbevf_map[] = {
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) },
+ { RTE_PCI_DEVICE_SH(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) },
{ .vendor_id = 0, /* sentinel */ },
};
@@ -395,7 +395,8 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
static inline void
ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
- uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
+ uint64_t ol_flags, union ixgbe_tx_offload tx_offload,
+ struct rte_mbuf *mb)
{
uint32_t type_tucmd_mlhl;
uint32_t mss_l4len_idx = 0;
@@ -480,6 +481,14 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
<< IXGBE_ADVTXD_TUNNEL_LEN;
}
+ if (mb->ol_flags & PKT_TX_IPSEC_INLINE_CRYPTO)
+ {
+ seqnum_seed |= (IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & mb->inline_ipsec.sa_idx);
+ type_tucmd_mlhl |= mb->inline_ipsec.enc?
+ (IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP | IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0;
+ type_tucmd_mlhl |= (mb->inline_ipsec.pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK);
+ }
+
txq->ctx_cache[ctx_idx].flags = ol_flags;
txq->ctx_cache[ctx_idx].tx_offload.data[0] =
tx_offload_mask.data[0] & tx_offload.data[0];
@@ -855,7 +864,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
- tx_offload);
+ tx_offload, tx_pkt);
txe->last_id = tx_last;
tx_id = txe->next_id;
@@ -872,7 +881,8 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
}
- olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ olinfo_status |= ((pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT) |
+ (((ol_flags & PKT_TX_IPSEC_INLINE_CRYPTO) != 0) * IXGBE_ADVTXD_POPTS_IPSEC));
m_seg = tx_pkt;
do {
@@ -1450,6 +1460,12 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
}
+ if (rx_status & IXGBE_RXD_STAT_SECP) {
+ pkt_flags |= PKT_RX_IPSEC_INLINE_CRYPTO;
+ if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
+ pkt_flags |= PKT_RX_IPSEC_INLINE_CRYPTO_AUTH_FAILED;
+ }
+
return pkt_flags;
}
@@ -126,6 +126,7 @@ desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
{
__m128i ptype0, ptype1, vtag0, vtag1, csum;
__m128i rearm0, rearm1, rearm2, rearm3;
+ __m128i sterr0, sterr1, sterr2, sterr3, tmp1, tmp2;
/* mask everything except rss type */
const __m128i rsstype_msk = _mm_set_epi16(
@@ -172,6 +173,16 @@ desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
0, PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t));
+ const __m128i ipsec_sterr_msk = _mm_set_epi32(
+ 0, IXGBE_RXD_STAT_SECP | IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG, 0, 0);
+ const __m128i ipsec_proc_msk = _mm_set_epi32(
+ 0, IXGBE_RXD_STAT_SECP, 0, 0);
+ const __m128i ipsec_err_flag = _mm_set_epi32(
+ 0, PKT_RX_IPSEC_INLINE_CRYPTO_AUTH_FAILED | PKT_RX_IPSEC_INLINE_CRYPTO, 0, 0);
+ const __m128i ipsec_proc_flag = _mm_set_epi32(
+ 0, PKT_RX_IPSEC_INLINE_CRYPTO, 0, 0);
+
+
ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
@@ -234,6 +245,29 @@ desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
+
+ /*inline ipsec, extract the flags from the descriptors*/
+ sterr0 = _mm_and_si128(descs[0], ipsec_sterr_msk);
+ sterr1 = _mm_and_si128(descs[1], ipsec_sterr_msk);
+ sterr2 = _mm_and_si128(descs[2], ipsec_sterr_msk);
+ sterr3 = _mm_and_si128(descs[3], ipsec_sterr_msk);
+ tmp1 = _mm_cmpeq_epi32(sterr0, ipsec_sterr_msk);
+ tmp2 = _mm_cmpeq_epi32(sterr0, ipsec_proc_msk);
+ sterr0 = _mm_or_si128(_mm_and_si128(tmp1, ipsec_err_flag), _mm_and_si128(tmp2, ipsec_proc_flag));
+ tmp1 = _mm_cmpeq_epi32(sterr1, ipsec_sterr_msk);
+ tmp2 = _mm_cmpeq_epi32(sterr1, ipsec_proc_msk);
+ sterr1 = _mm_or_si128(_mm_and_si128(tmp1, ipsec_err_flag), _mm_and_si128(tmp2, ipsec_proc_flag));
+ tmp1 = _mm_cmpeq_epi32(sterr2, ipsec_sterr_msk);
+ tmp2 = _mm_cmpeq_epi32(sterr2, ipsec_proc_msk);
+ sterr2 = _mm_or_si128(_mm_and_si128(tmp1, ipsec_err_flag), _mm_and_si128(tmp2, ipsec_proc_flag));
+ tmp1 = _mm_cmpeq_epi32(sterr3, ipsec_sterr_msk);
+ tmp2 = _mm_cmpeq_epi32(sterr3, ipsec_proc_msk);
+ sterr3 = _mm_or_si128(_mm_and_si128(tmp1, ipsec_err_flag), _mm_and_si128(tmp2, ipsec_proc_flag));
+ rearm0 = _mm_or_si128(rearm0, sterr0);
+ rearm1 = _mm_or_si128(rearm1, sterr1);
+ rearm2 = _mm_or_si128(rearm2, sterr2);
+ rearm3 = _mm_or_si128(rearm3, sterr3);
+
_mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
_mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
_mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
@@ -72,6 +72,8 @@ extern "C" {
/**< Scheduler Crypto PMD device name */
#define CRYPTODEV_NAME_DPAA2_SEC_PMD cryptodev_dpaa2_sec_pmd
/**< NXP DPAA2 - SEC PMD device name */
+#define CRYPTODEV_NAME_IXGBE_PMD crypto_ixgbe_ipsec
+/**< IXGBE inline ipsec crypto PMD device name */
/** Crypto device type */
enum rte_cryptodev_type {
@@ -82,10 +84,11 @@ enum rte_cryptodev_type {
RTE_CRYPTODEV_SNOW3G_PMD, /**< SNOW 3G PMD */
RTE_CRYPTODEV_KASUMI_PMD, /**< KASUMI PMD */
RTE_CRYPTODEV_ZUC_PMD, /**< ZUC PMD */
- RTE_CRYPTODEV_OPENSSL_PMD, /**< OpenSSL PMD */
+ RTE_CRYPTODEV_OPENSSL_PMD, /**< OpenSSL PMD */
RTE_CRYPTODEV_ARMV8_PMD, /**< ARMv8 crypto PMD */
RTE_CRYPTODEV_SCHEDULER_PMD, /**< Crypto Scheduler PMD */
RTE_CRYPTODEV_DPAA2_SEC_PMD, /**< NXP DPAA2 - SEC PMD */
+ RTE_CRYPTODEV_IXGBE_PMD, /**< IXGBE Inline IPSec PMD */
};
extern const char **rte_cyptodev_names;
@@ -150,6 +150,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += -L$(AESNI_MULTI_BUFFER_LIB_PATH)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += -lrte_pmd_aesni_gcm -lisal_crypto
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += -lrte_pmd_openssl -lcrypto
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += -lrte_pmd_null_crypto
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_IXGBE_CRYPTO) += -lrte_pmd_ixgbe_crypto
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += -lrte_pmd_qat -lcrypto
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += -lrte_pmd_snow3g
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += -L$(LIBSSO_SNOW3G_PATH)/build -lsso_snow3g