[dpdk-dev,v4,10/12] net/ixgbe: enable inline ipsec

Message ID 20171014221734.15511-11-akhil.goyal@nxp.com (mailing list archive)
State Superseded, archived
Headers

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation fail apply patch file failure

Commit Message

Akhil Goyal Oct. 14, 2017, 10:17 p.m. UTC
  From: Radu Nicolau <radu.nicolau@intel.com>

Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
---
 drivers/net/Makefile                   |   2 +-
 drivers/net/ixgbe/Makefile             |   2 +-
 drivers/net/ixgbe/base/ixgbe_osdep.h   |   8 +
 drivers/net/ixgbe/ixgbe_ethdev.c       |  19 +
 drivers/net/ixgbe/ixgbe_ethdev.h       |   6 +-
 drivers/net/ixgbe/ixgbe_flow.c         |  47 +++
 drivers/net/ixgbe/ixgbe_ipsec.c        | 744 +++++++++++++++++++++++++++++++++
 drivers/net/ixgbe/ixgbe_ipsec.h        | 147 +++++++
 drivers/net/ixgbe/ixgbe_rxtx.c         |  53 ++-
 drivers/net/ixgbe/ixgbe_rxtx.h         |  11 +-
 drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c |  50 ++-
 11 files changed, 1079 insertions(+), 10 deletions(-)
 create mode 100644 drivers/net/ixgbe/ixgbe_ipsec.c
 create mode 100644 drivers/net/ixgbe/ixgbe_ipsec.h
  

Comments

Aviad Yehezkel Oct. 15, 2017, 12:51 p.m. UTC | #1
On 10/15/2017 1:17 AM, Akhil Goyal wrote:
> From: Radu Nicolau <radu.nicolau@intel.com>
>
> Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
> Signed-off-by: Declan Doherty <declan.doherty@intel.com>
> ---
>   drivers/net/Makefile                   |   2 +-
>   drivers/net/ixgbe/Makefile             |   2 +-
>   drivers/net/ixgbe/base/ixgbe_osdep.h   |   8 +
>   drivers/net/ixgbe/ixgbe_ethdev.c       |  19 +
>   drivers/net/ixgbe/ixgbe_ethdev.h       |   6 +-
>   drivers/net/ixgbe/ixgbe_flow.c         |  47 +++
>   drivers/net/ixgbe/ixgbe_ipsec.c        | 744 +++++++++++++++++++++++++++++++++
>   drivers/net/ixgbe/ixgbe_ipsec.h        | 147 +++++++
>   drivers/net/ixgbe/ixgbe_rxtx.c         |  53 ++-
>   drivers/net/ixgbe/ixgbe_rxtx.h         |  11 +-
>   drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c |  50 ++-
>   11 files changed, 1079 insertions(+), 10 deletions(-)
>   create mode 100644 drivers/net/ixgbe/ixgbe_ipsec.c
>   create mode 100644 drivers/net/ixgbe/ixgbe_ipsec.h
>
> diff --git a/drivers/net/Makefile b/drivers/net/Makefile
> index 5d2ad2f..339ff36 100644
> --- a/drivers/net/Makefile
> +++ b/drivers/net/Makefile
> @@ -68,7 +68,7 @@ DEPDIRS-fm10k = $(core-libs) librte_hash
>   DIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e
>   DEPDIRS-i40e = $(core-libs) librte_hash
>   DIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe
> -DEPDIRS-ixgbe = $(core-libs) librte_hash
> +DEPDIRS-ixgbe = $(core-libs) librte_hash librte_security
>   DIRS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += liquidio
>   DEPDIRS-liquidio = $(core-libs)
>   DIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4
> diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
> index 95c806d..6e963c7 100644
> --- a/drivers/net/ixgbe/Makefile
> +++ b/drivers/net/ixgbe/Makefile
> @@ -118,11 +118,11 @@ SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_neon.c
>   else
>   SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_sse.c
>   endif
> -
>   ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_BYPASS),y)
>   SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c
>   SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c
>   endif
> +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ipsec.c
>   SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c
>   SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_tm.c
>   
> diff --git a/drivers/net/ixgbe/base/ixgbe_osdep.h b/drivers/net/ixgbe/base/ixgbe_osdep.h
> index 4aab278..b132a0f 100644
> --- a/drivers/net/ixgbe/base/ixgbe_osdep.h
> +++ b/drivers/net/ixgbe/base/ixgbe_osdep.h
> @@ -161,4 +161,12 @@ static inline uint32_t ixgbe_read_addr(volatile void* addr)
>   #define IXGBE_WRITE_REG_ARRAY(hw, reg, index, value) \
>   	IXGBE_PCI_REG_WRITE(IXGBE_PCI_REG_ARRAY_ADDR((hw), (reg), (index)), (value))
>   
> +#define IXGBE_WRITE_REG_THEN_POLL_MASK(hw, reg, val, mask, poll_ms)	\
> +{									\
> +	uint32_t cnt = poll_ms;						\
> +	IXGBE_WRITE_REG(hw, (reg), (val));				\
> +	while (((IXGBE_READ_REG(hw, (reg))) & (mask)) && (cnt--))	\
> +		rte_delay_ms(1);					\
> +}
> +
>   #endif /* _IXGBE_OS_H_ */
> diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
> index 14b9c53..fcabd5e 100644
> --- a/drivers/net/ixgbe/ixgbe_ethdev.c
> +++ b/drivers/net/ixgbe/ixgbe_ethdev.c
> @@ -61,6 +61,7 @@
>   #include <rte_random.h>
>   #include <rte_dev.h>
>   #include <rte_hash_crc.h>
> +#include <rte_security_driver.h>
>   
>   #include "ixgbe_logs.h"
>   #include "base/ixgbe_api.h"
> @@ -1132,6 +1133,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
>   		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
>   	struct ixgbe_bw_conf *bw_conf =
>   		IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private);
> +	struct rte_security_ctx *security_instance;
>   	uint32_t ctrl_ext;
>   	uint16_t csum;
>   	int diag, i;
> @@ -1139,6 +1141,17 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
>   	PMD_INIT_FUNC_TRACE();
>   
>   	eth_dev->dev_ops = &ixgbe_eth_dev_ops;
> +	security_instance = rte_malloc("rte_security_instances_ops",
> +				sizeof(struct rte_security_ctx), 0);
> +	if (security_instance == NULL)
> +		return -ENOMEM;
> +	security_instance->state = RTE_SECURITY_INSTANCE_VALID;
> +	security_instance->device = (void *)eth_dev;
> +	security_instance->ops = &ixgbe_security_ops;
> +	security_instance->sess_cnt = 0;
> +
> +	eth_dev->data->security_ctx = security_instance;
> +
>   	eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
>   	eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
>   	eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
> @@ -1169,6 +1182,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
>   
>   	rte_eth_copy_pci_info(eth_dev, pci_dev);
>   	eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
> +	eth_dev->data->dev_flags |= RTE_ETH_DEV_SECURITY;
>   
>   	/* Vendor and Device ID need to be set before init of shared code */
>   	hw->device_id = pci_dev->id.device_id;
> @@ -1401,6 +1415,8 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
>   	/* Remove all Traffic Manager configuration */
>   	ixgbe_tm_conf_uninit(eth_dev);
>   
> +	rte_free(eth_dev->data->security_ctx);
> +
>   	return 0;
>   }
>   
> @@ -3695,6 +3711,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
>   	    hw->mac.type == ixgbe_mac_X550EM_a)
>   		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
>   
> +	dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
> +	dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
> +
>   	dev_info->default_rxconf = (struct rte_eth_rxconf) {
>   		.rx_thresh = {
>   			.pthresh = IXGBE_DEFAULT_RX_PTHRESH,
> diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
> index e28c856..f5b52c4 100644
> --- a/drivers/net/ixgbe/ixgbe_ethdev.h
> +++ b/drivers/net/ixgbe/ixgbe_ethdev.h
> @@ -38,6 +38,7 @@
>   #include "base/ixgbe_dcb_82599.h"
>   #include "base/ixgbe_dcb_82598.h"
>   #include "ixgbe_bypass.h"
> +#include "ixgbe_ipsec.h"
>   #include <rte_time.h>
>   #include <rte_hash.h>
>   #include <rte_pci.h>
> @@ -486,7 +487,7 @@ struct ixgbe_adapter {
>   	struct ixgbe_filter_info    filter;
>   	struct ixgbe_l2_tn_info     l2_tn;
>   	struct ixgbe_bw_conf        bw_conf;
> -
> +	struct ixgbe_ipsec          ipsec;
>   	bool rx_bulk_alloc_allowed;
>   	bool rx_vec_allowed;
>   	struct rte_timecounter      systime_tc;
> @@ -543,6 +544,9 @@ struct ixgbe_adapter {
>   #define IXGBE_DEV_PRIVATE_TO_TM_CONF(adapter) \
>   	(&((struct ixgbe_adapter *)adapter)->tm_conf)
>   
> +#define IXGBE_DEV_PRIVATE_TO_IPSEC(adapter)\
> +	(&((struct ixgbe_adapter *)adapter)->ipsec)
> +
>   /*
>    * RX/TX function prototypes
>    */
> diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
> index 904c146..13c8243 100644
> --- a/drivers/net/ixgbe/ixgbe_flow.c
> +++ b/drivers/net/ixgbe/ixgbe_flow.c
> @@ -187,6 +187,9 @@ const struct rte_flow_action *next_no_void_action(
>    * END
>    * other members in mask and spec should set to 0x00.
>    * item->last should be NULL.
> + *
> + * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
> + *
>    */
>   static int
>   cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
> @@ -226,6 +229,41 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
>   		return -rte_errno;
>   	}
>   
> +	/**
> +	 *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
> +	 */
> +	act = next_no_void_action(actions, NULL);
> +	if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
> +		const void *conf = act->conf;
> +		/* check if the next not void item is END */
> +		act = next_no_void_action(actions, act);
> +		if (act->type != RTE_FLOW_ACTION_TYPE_END) {
> +			memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ACTION,
> +				act, "Not supported action.");
> +			return -rte_errno;
> +		}
> +
> +		/* get the IP pattern*/
> +		item = next_no_void_pattern(pattern, NULL);
> +		while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
> +				item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
> +			if (item->last ||
> +					item->type == RTE_FLOW_ITEM_TYPE_END) {
> +				rte_flow_error_set(error, EINVAL,
> +					RTE_FLOW_ERROR_TYPE_ITEM,
> +					item, "IP pattern missing.");
> +				return -rte_errno;
> +			}
> +			item = next_no_void_pattern(pattern, item);
> +		}
> +
> +		filter->proto = IPPROTO_ESP;
> +		return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
> +					item->type == RTE_FLOW_ITEM_TYPE_IPV6);
> +	}
> +
>   	/* the first not void item can be MAC or IPv4 */
>   	item = next_no_void_pattern(pattern, NULL);
>   
> @@ -519,6 +557,10 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
>   	if (ret)
>   		return ret;
>   
> +	/* ESP flow not really a flow*/
> +	if (filter->proto == IPPROTO_ESP)
> +		return 0;
> +
>   	/* Ixgbe doesn't support tcp flags. */
>   	if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
>   		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
> @@ -2758,6 +2800,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
>   	memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
>   	ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
>   			actions, &ntuple_filter, error);
> +
> +	/* ESP flow not really a flow*/
> +	if (ntuple_filter.proto == IPPROTO_ESP)
> +		return flow;
> +
>   	if (!ret) {
>   		ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
>   		if (!ret) {
> diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
> new file mode 100644
> index 0000000..6ace305
> --- /dev/null
> +++ b/drivers/net/ixgbe/ixgbe_ipsec.c
> @@ -0,0 +1,744 @@
> +/*-
> + *   BSD LICENSE
> + *
> + *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
> + *   All rights reserved.
> + *
> + *   Redistribution and use in source and binary forms, with or without
> + *   modification, are permitted provided that the following conditions
> + *   are met:
> + *
> + *     * Redistributions of source code must retain the above copyright
> + *       notice, this list of conditions and the following disclaimer.
> + *     * Redistributions in binary form must reproduce the above copyright
> + *       notice, this list of conditions and the following disclaimer in
> + *       the documentation and/or other materials provided with the
> + *       distribution.
> + *     * Neither the name of Intel Corporation nor the names of its
> + *       contributors may be used to endorse or promote products derived
> + *       from this software without specific prior written permission.
> + *
> + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
> + */
> +
> +#include <rte_ethdev.h>
> +#include <rte_ethdev_pci.h>
> +#include <rte_ip.h>
> +#include <rte_jhash.h>
> +#include <rte_security_driver.h>
> +#include <rte_cryptodev.h>
> +#include <rte_flow.h>
> +
> +#include "base/ixgbe_type.h"
> +#include "base/ixgbe_api.h"
> +#include "ixgbe_ethdev.h"
> +#include "ixgbe_ipsec.h"
> +
> +#define RTE_IXGBE_REGISTER_POLL_WAIT_5_MS  5
> +
> +#define IXGBE_WAIT_RREAD \
> +	IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSRXIDX, reg_val, \
> +	IPSRXIDX_READ, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
> +#define IXGBE_WAIT_RWRITE \
> +	IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSRXIDX, reg_val, \
> +	IPSRXIDX_WRITE, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
> +#define IXGBE_WAIT_TREAD \
> +	IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSTXIDX, reg_val, \
> +	IPSRXIDX_READ, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
> +#define IXGBE_WAIT_TWRITE \
> +	IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSTXIDX, reg_val, \
> +	IPSRXIDX_WRITE, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
> +
> +#define CMP_IP(a, b) (\
> +	(a).ipv6[0] == (b).ipv6[0] && \
> +	(a).ipv6[1] == (b).ipv6[1] && \
> +	(a).ipv6[2] == (b).ipv6[2] && \
> +	(a).ipv6[3] == (b).ipv6[3])
> +
> +
> +static void
> +ixgbe_crypto_clear_ipsec_tables(struct rte_eth_dev *dev)
> +{
> +	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +	int i = 0;
> +
> +	/* clear Rx IP table*/
> +	for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
> +		uint16_t index = i << 3;
> +		uint32_t reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_IP | index;
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), 0);
> +		IXGBE_WAIT_RWRITE;
> +	}
> +
> +	/* clear Rx SPI and Rx/Tx SA tables*/
> +	for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
> +		uint32_t index = i << 3;
> +		uint32_t reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | index;
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0);
> +		IXGBE_WAIT_RWRITE;
> +		reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_KEY | index;
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, 0);
> +		IXGBE_WAIT_RWRITE;
> +		reg_val = IPSRXIDX_WRITE | index;
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, 0);
> +		IXGBE_WAIT_TWRITE;
> +	}
> +}
> +
> +static int
> +ixgbe_crypto_add_sa(struct ixgbe_crypto_session *ic_session)
> +{
> +	struct rte_eth_dev *dev = ic_session->dev;
> +	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +	struct ixgbe_ipsec *priv = IXGBE_DEV_PRIVATE_TO_IPSEC(
> +			dev->data->dev_private);
> +	uint32_t reg_val;
> +	int sa_index = -1;
> +
> +	if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
> +		int i, ip_index = -1;
> +
> +		/* Find a match in the IP table*/
> +		for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
> +			if (CMP_IP(priv->rx_ip_tbl[i].ip,
> +				   ic_session->dst_ip)) {
> +				ip_index = i;
> +				break;
> +			}
> +		}
> +		/* If no match, find a free entry in the IP table*/
> +		if (ip_index < 0) {
> +			for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
> +				if (priv->rx_ip_tbl[i].ref_count == 0) {
> +					ip_index = i;
> +					break;
> +				}
> +			}
> +		}
> +
> +		/* Fail if no match and no free entries*/
> +		if (ip_index < 0) {
> +			PMD_DRV_LOG(ERR,
> +				    "No free entry left in the Rx IP table\n");
> +			return -1;
> +		}
> +
> +		/* Find a free entry in the SA table*/
> +		for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
> +			if (priv->rx_sa_tbl[i].used == 0) {
> +				sa_index = i;
> +				break;
> +			}
> +		}
> +		/* Fail if no free entries*/
> +		if (sa_index < 0) {
> +			PMD_DRV_LOG(ERR,
> +				    "No free entry left in the Rx SA table\n");
> +			return -1;
> +		}
> +
> +		priv->rx_ip_tbl[ip_index].ip.ipv6[0] =
> +				ic_session->dst_ip.ipv6[0];
> +		priv->rx_ip_tbl[ip_index].ip.ipv6[1] =
> +				ic_session->dst_ip.ipv6[1];
> +		priv->rx_ip_tbl[ip_index].ip.ipv6[2] =
> +				ic_session->dst_ip.ipv6[2];
> +		priv->rx_ip_tbl[ip_index].ip.ipv6[3] =
> +				ic_session->dst_ip.ipv6[3];
> +		priv->rx_ip_tbl[ip_index].ref_count++;
> +
> +		priv->rx_sa_tbl[sa_index].spi =
> +			rte_cpu_to_be_32(ic_session->spi);
> +		priv->rx_sa_tbl[sa_index].ip_index = ip_index;
> +		priv->rx_sa_tbl[sa_index].key[3] =
> +			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[0]);
> +		priv->rx_sa_tbl[sa_index].key[2] =
> +			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[4]);
> +		priv->rx_sa_tbl[sa_index].key[1] =
> +			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[8]);
> +		priv->rx_sa_tbl[sa_index].key[0] =
> +			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[12]);
> +		priv->rx_sa_tbl[sa_index].salt =
> +			rte_cpu_to_be_32(ic_session->salt);
> +		priv->rx_sa_tbl[sa_index].mode = IPSRXMOD_VALID;
> +		if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION)
> +			priv->rx_sa_tbl[sa_index].mode |=
> +					(IPSRXMOD_PROTO | IPSRXMOD_DECRYPT);
> +		if (ic_session->dst_ip.type == IPv6)
> +			priv->rx_sa_tbl[sa_index].mode |= IPSRXMOD_IPV6;
> +		priv->rx_sa_tbl[sa_index].used = 1;
> +
> +		/* write IP table entry*/
> +		reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
> +				IPSRXIDX_TABLE_IP | (ip_index << 3);
> +		if (priv->rx_ip_tbl[ip_index].ip.type == IPv4) {
> +			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
> +			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
> +			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
> +			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3),
> +					priv->rx_ip_tbl[ip_index].ip.ipv4);
> +		} else {
> +			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0),
> +					priv->rx_ip_tbl[ip_index].ip.ipv6[0]);
> +			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1),
> +					priv->rx_ip_tbl[ip_index].ip.ipv6[1]);
> +			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2),
> +					priv->rx_ip_tbl[ip_index].ip.ipv6[2]);
> +			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3),
> +					priv->rx_ip_tbl[ip_index].ip.ipv6[3]);
> +		}
> +		IXGBE_WAIT_RWRITE;
> +
> +		/* write SPI table entry*/
> +		reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
> +				IPSRXIDX_TABLE_SPI | (sa_index << 3);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI,
> +				priv->rx_sa_tbl[sa_index].spi);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX,
> +				priv->rx_sa_tbl[sa_index].ip_index);
> +		IXGBE_WAIT_RWRITE;
> +
> +		/* write Key table entry*/
> +		reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
> +				IPSRXIDX_TABLE_KEY | (sa_index << 3);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0),
> +				priv->rx_sa_tbl[sa_index].key[0]);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1),
> +				priv->rx_sa_tbl[sa_index].key[1]);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2),
> +				priv->rx_sa_tbl[sa_index].key[2]);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3),
> +				priv->rx_sa_tbl[sa_index].key[3]);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT,
> +				priv->rx_sa_tbl[sa_index].salt);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD,
> +				priv->rx_sa_tbl[sa_index].mode);
> +		IXGBE_WAIT_RWRITE;
> +
> +	} else { /* sess->dir == RTE_CRYPTO_OUTBOUND */
> +		int i;
> +
> +		/* Find a free entry in the SA table*/
> +		for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
> +			if (priv->tx_sa_tbl[i].used == 0) {
> +				sa_index = i;
> +				break;
> +			}
> +		}
> +		/* Fail if no free entries*/
> +		if (sa_index < 0) {
> +			PMD_DRV_LOG(ERR,
> +				    "No free entry left in the Tx SA table\n");
> +			return -1;
> +		}
> +
> +		priv->tx_sa_tbl[sa_index].spi =
> +			rte_cpu_to_be_32(ic_session->spi);
> +		priv->tx_sa_tbl[sa_index].key[3] =
> +			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[0]);
> +		priv->tx_sa_tbl[sa_index].key[2] =
> +			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[4]);
> +		priv->tx_sa_tbl[sa_index].key[1] =
> +			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[8]);
> +		priv->tx_sa_tbl[sa_index].key[0] =
> +			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[12]);
> +		priv->tx_sa_tbl[sa_index].salt =
> +			rte_cpu_to_be_32(ic_session->salt);
> +
> +		reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | (sa_index << 3);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0),
> +				priv->tx_sa_tbl[sa_index].key[0]);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1),
> +				priv->tx_sa_tbl[sa_index].key[1]);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2),
> +				priv->tx_sa_tbl[sa_index].key[2]);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3),
> +				priv->tx_sa_tbl[sa_index].key[3]);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT,
> +				priv->tx_sa_tbl[sa_index].salt);
> +		IXGBE_WAIT_TWRITE;
> +
> +		priv->tx_sa_tbl[i].used = 1;
> +		ic_session->sa_index = sa_index;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +ixgbe_crypto_remove_sa(struct rte_eth_dev *dev,
> +		       struct ixgbe_crypto_session *ic_session)
> +{
> +	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +	struct ixgbe_ipsec *priv =
> +			IXGBE_DEV_PRIVATE_TO_IPSEC(dev->data->dev_private);
> +	uint32_t reg_val;
> +	int sa_index = -1;
> +
> +	if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
> +		int i, ip_index = -1;
> +
> +		/* Find a match in the IP table*/
> +		for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
> +			if (CMP_IP(priv->rx_ip_tbl[i].ip, ic_session->dst_ip)) {
> +				ip_index = i;
> +				break;
> +			}
> +		}
> +
> +		/* Fail if no match*/
> +		if (ip_index < 0) {
> +			PMD_DRV_LOG(ERR,
> +				    "Entry not found in the Rx IP table\n");
> +			return -1;
> +		}
> +
> +		/* Find a free entry in the SA table*/
> +		for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
> +			if (priv->rx_sa_tbl[i].spi ==
> +				  rte_cpu_to_be_32(ic_session->spi)) {
> +				sa_index = i;
> +				break;
> +			}
> +		}
> +		/* Fail if no match*/
> +		if (sa_index < 0) {
> +			PMD_DRV_LOG(ERR,
> +				    "Entry not found in the Rx SA table\n");
> +			return -1;
> +		}
> +
> +		/* Disable and clear Rx SPI and key table table entryes*/
> +		reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | (sa_index << 3);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0);
> +		IXGBE_WAIT_RWRITE;
> +		reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_KEY | (sa_index << 3);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, 0);
> +		IXGBE_WAIT_RWRITE;
> +		priv->rx_sa_tbl[sa_index].used = 0;
> +
> +		/* If last used then clear the IP table entry*/
> +		priv->rx_ip_tbl[ip_index].ref_count--;
> +		if (priv->rx_ip_tbl[ip_index].ref_count == 0) {
> +			reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_IP |
> +					(ip_index << 3);
> +			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
> +			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
> +			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
> +			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), 0);
> +		}
> +	} else { /* session->dir == RTE_CRYPTO_OUTBOUND */
> +		int i;
> +
> +		/* Find a match in the SA table*/
> +		for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
> +			if (priv->tx_sa_tbl[i].spi ==
> +				    rte_cpu_to_be_32(ic_session->spi)) {
> +				sa_index = i;
> +				break;
> +			}
> +		}
> +		/* Fail if no match entries*/
> +		if (sa_index < 0) {
> +			PMD_DRV_LOG(ERR,
> +				    "Entry not found in the Tx SA table\n");
> +			return -1;
> +		}
> +		reg_val = IPSRXIDX_WRITE | (sa_index << 3);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, 0);
> +		IXGBE_WAIT_TWRITE;
> +
> +		priv->tx_sa_tbl[sa_index].used = 0;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +ixgbe_crypto_create_session(void *device,
> +		struct rte_security_session_conf *conf,
> +		struct rte_security_session *session,
> +		struct rte_mempool *mempool)
> +{
> +	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
> +	struct ixgbe_crypto_session *ic_session = NULL;
> +	struct rte_crypto_aead_xform *aead_xform;
> +	struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
> +
> +	if (rte_mempool_get(mempool, (void **)&ic_session)) {
> +		PMD_DRV_LOG(ERR, "Cannot get object from ic_session mempool");
> +		return -ENOMEM;
> +	}
> +
> +	if (conf->crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD ||
> +			conf->crypto_xform->aead.algo !=
> +					RTE_CRYPTO_AEAD_AES_GCM) {
> +		PMD_DRV_LOG(ERR, "Unsupported crypto transformation mode\n");
> +		return -ENOTSUP;
> +	}
> +	aead_xform = &conf->crypto_xform->aead;
> +
> +	if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
> +		if (dev_conf->rxmode.enable_sec) {
> +			ic_session->op = IXGBE_OP_AUTHENTICATED_DECRYPTION;
> +		} else {
> +			PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
> +			return -ENOTSUP;
> +		}
> +	} else {
> +		if (dev_conf->txmode.enable_sec) {
> +			ic_session->op = IXGBE_OP_AUTHENTICATED_ENCRYPTION;
> +		} else {
> +			PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
> +			return -ENOTSUP;
> +		}
> +	}
> +
> +	ic_session->key = aead_xform->key.data;
> +	memcpy(&ic_session->salt,
> +	       &aead_xform->key.data[aead_xform->key.length], 4);
> +	ic_session->spi = conf->ipsec.spi;
> +	ic_session->dev = eth_dev;
> +
> +	set_sec_session_private_data(session, ic_session);
> +
> +	if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {
> +		if (ixgbe_crypto_add_sa(ic_session)) {
> +			PMD_DRV_LOG(ERR, "Failed to add SA\n");
> +			return -EPERM;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +ixgbe_crypto_remove_session(void *device,
> +		struct rte_security_session *session)
> +{
> +	struct rte_eth_dev *eth_dev = device;
> +	struct ixgbe_crypto_session *ic_session =
> +		(struct ixgbe_crypto_session *)
> +		get_sec_session_private_data(session);
> +	struct rte_mempool *mempool = rte_mempool_from_obj(ic_session);
> +
> +	if (eth_dev != ic_session->dev) {
> +		PMD_DRV_LOG(ERR, "Session not bound to this device\n");
> +		return -ENODEV;
> +	}
> +
> +	if (ixgbe_crypto_remove_sa(eth_dev, ic_session)) {
> +		PMD_DRV_LOG(ERR, "Failed to remove session\n");
> +		return -EFAULT;
> +	}
> +
> +	rte_mempool_put(mempool, (void *)ic_session);
> +
> +	return 0;
> +}
> +
> +static int
> +ixgbe_crypto_update_mb(void *device __rte_unused,
> +		struct rte_security_session *session,
> +		       struct rte_mbuf *m, void *params __rte_unused)
> +{
> +	struct ixgbe_crypto_session *ic_session =
> +			get_sec_session_private_data(session);
> +	if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {
> +		struct ixgbe_crypto_tx_desc_md *mdata =
> +			(struct ixgbe_crypto_tx_desc_md *)&m->udata64;
> +		mdata->enc = 1;
> +		mdata->sa_idx = ic_session->sa_index;
> +		mdata->pad_len = *rte_pktmbuf_mtod_offset(m,
> +			uint8_t *, rte_pktmbuf_pkt_len(m) - 18) + 18;
> +	}
> +	return 0;
> +}
> +
> +struct rte_cryptodev_capabilities aes_gmac_crypto_capabilities[] = {
> +	{	/* AES GMAC (128-bit) */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +			{.auth = {
> +				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
> +				.block_size = 16,
> +				.key_size = {
> +					.min = 16,
> +					.max = 16,
> +					.increment = 0
> +				},
> +				.digest_size = {
> +					.min = 12,
> +					.max = 12,
> +					.increment = 0
> +				},
> +				.iv_size = {
> +					.min = 12,
> +					.max = 12,
> +					.increment = 0
> +				}
> +			}, }
> +		}, }
> +	},
> +	{
> +		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED
> +		}, }
> +	},
> +};
> +
> +struct rte_cryptodev_capabilities aes_gcm_gmac_crypto_capabilities[] = {
> +	{	/* AES GMAC (128-bit) */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +			{.auth = {
> +				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
> +				.block_size = 16,
> +				.key_size = {
> +					.min = 16,
> +					.max = 16,
> +					.increment = 0
> +				},
> +				.digest_size = {
> +					.min = 12,
> +					.max = 12,
> +					.increment = 0
> +				},
> +				.iv_size = {
> +					.min = 12,
> +					.max = 12,
> +					.increment = 0
> +				}
> +			}, }
> +		}, }
> +	},
> +	{	/* AES GCM (128-bit) */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
> +			{.aead = {
> +				.algo = RTE_CRYPTO_AEAD_AES_GCM,
> +				.block_size = 16,
> +				.key_size = {
> +					.min = 16,
> +					.max = 16,
> +					.increment = 0
> +				},
> +				.digest_size = {
> +					.min = 8,
> +					.max = 16,
> +					.increment = 4
> +				},
> +				.aad_size = {
> +					.min = 0,
> +					.max = 65535,
> +					.increment = 1
> +				},
> +				.iv_size = {
> +					.min = 12,
> +					.max = 12,
> +					.increment = 0
> +				}
> +			}, }
> +		}, }
> +	},
> +	{
> +		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED
> +		}, }
> +	},
> +};
> +
> +static const struct rte_security_capability ixgbe_security_capabilities[] = {
> +	{ /* IPsec Inline Crypto ESP Transport Egress */
> +		.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
> +		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
> +		.ipsec = {
> +			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
> +			.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
> +			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
> +			.options = { 0 }
> +		},
> +		.crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
> +		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
> +	},
> +	{ /* IPsec Inline Crypto ESP Transport Ingress */
> +		.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
> +		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
> +		.ipsec = {
> +			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
> +			.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
> +			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
> +			.options = { 0 }
> +		},
> +		.crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
> +		.ol_flags = 0
> +	},
> +	{ /* IPsec Inline Crypto ESP Tunnel Egress */
> +		.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
> +		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
> +		.ipsec = {
> +			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
> +			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
> +			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
> +			.options = { 0 }
> +		},
> +		.crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
> +		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
> +	},
> +	{ /* IPsec Inline Crypto ESP Tunnel Ingress */
> +		.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
> +		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
> +		.ipsec = {
> +			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
> +			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
> +			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
> +			.options = { 0 }
> +		},
> +		.crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
> +		.ol_flags = 0
> +	},
> +	{
> +		.action = RTE_SECURITY_ACTION_TYPE_NONE
> +	}
> +};
> +
> +static const struct rte_security_capability *
> +ixgbe_crypto_capabilities_get(void *device __rte_unused)
> +{
> +	return ixgbe_security_capabilities;
> +}
> +
> +
> +int
> +ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
> +{
> +	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +	uint32_t reg;
> +
> +	/* sanity checks */
> +	if (dev->data->dev_conf.rxmode.enable_lro) {
> +		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
> +		return -1;
> +	}
> +	if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
> +		PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
> +		return -1;
> +	}
> +
> +
> +	/* Set IXGBE_SECTXBUFFAF to 0x15 as required in the datasheet*/
> +	IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x15);
> +
> +	/* IFG needs to be set to 3 when we are using security. Otherwise a Tx
> +	 * hang will occur with heavy traffic.
> +	 */
> +	reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
> +	reg = (reg & 0xFFFFFFF0) | 0x3;
> +	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
> +
> +	reg  = IXGBE_READ_REG(hw, IXGBE_HLREG0);
> +	reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
> +	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
> +
> +	if (dev->data->dev_conf.rxmode.enable_sec) {
> +		IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
> +		reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
> +		if (reg != 0) {
> +			PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
> +			return -1;
> +		}
> +	}
> +	if (dev->data->dev_conf.txmode.enable_sec) {
> +		IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
> +				IXGBE_SECTXCTRL_STORE_FORWARD);
> +		reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
> +		if (reg != IXGBE_SECTXCTRL_STORE_FORWARD) {
> +			PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
> +			return -1;
> +		}
> +	}
> +
> +	ixgbe_crypto_clear_ipsec_tables(dev);
> +
> +	return 0;
> +}
> +
> +int
> +ixgbe_crypto_add_ingress_sa_from_flow(const void *sess,
> +				      const void *ip_spec,
> +				      uint8_t is_ipv6)
> +{
> +	struct ixgbe_crypto_session *ic_session
> +		= get_sec_session_private_data(sess);
> +
> +	if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
> +		if (is_ipv6) {
> +			const struct rte_flow_item_ipv6 *ipv6 = ip_spec;
> +			ic_session->src_ip.type = IPv6;
> +			ic_session->dst_ip.type = IPv6;
> +			rte_memcpy(ic_session->src_ip.ipv6,
> +				   ipv6->hdr.src_addr, 16);
> +			rte_memcpy(ic_session->dst_ip.ipv6,
> +				   ipv6->hdr.dst_addr, 16);
> +		} else {
> +			const struct rte_flow_item_ipv4 *ipv4 = ip_spec;
> +			ic_session->src_ip.type = IPv4;
> +			ic_session->dst_ip.type = IPv4;
> +			ic_session->src_ip.ipv4 = ipv4->hdr.src_addr;
> +			ic_session->dst_ip.ipv4 = ipv4->hdr.dst_addr;
> +		}
> +		return ixgbe_crypto_add_sa(ic_session);
> +	}
> +
> +	return 0;
> +}
> +
> +
> +struct rte_security_ops ixgbe_security_ops = {
> +	.session_create = ixgbe_crypto_create_session,
> +	.session_update = NULL,
> +	.session_stats_get = NULL,
> +	.session_destroy = ixgbe_crypto_remove_session,
> +
> +	.set_pkt_metadata = ixgbe_crypto_update_mb,
> +
> +	.capabilities_get = ixgbe_crypto_capabilities_get
> +};
> diff --git a/drivers/net/ixgbe/ixgbe_ipsec.h b/drivers/net/ixgbe/ixgbe_ipsec.h
> new file mode 100644
> index 0000000..9f06235
> --- /dev/null
> +++ b/drivers/net/ixgbe/ixgbe_ipsec.h
> @@ -0,0 +1,147 @@
> +/*-
> + *   BSD LICENSE
> + *
> + *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
> + *   All rights reserved.
> + *
> + *   Redistribution and use in source and binary forms, with or without
> + *   modification, are permitted provided that the following conditions
> + *   are met:
> + *
> + *     * Redistributions of source code must retain the above copyright
> + *       notice, this list of conditions and the following disclaimer.
> + *     * Redistributions in binary form must reproduce the above copyright
> + *       notice, this list of conditions and the following disclaimer in
> + *       the documentation and/or other materials provided with the
> + *       distribution.
> + *     * Neither the name of Intel Corporation nor the names of its
> + *       contributors may be used to endorse or promote products derived
> + *       from this software without specific prior written permission.
> + *
> + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
> + */
> +
> +#ifndef IXGBE_IPSEC_H_
> +#define IXGBE_IPSEC_H_
> +
> +#include <rte_security.h>
> +
> +#define IPSRXIDX_RX_EN                                    0x00000001
> +#define IPSRXIDX_TABLE_IP                                 0x00000002
> +#define IPSRXIDX_TABLE_SPI                                0x00000004
> +#define IPSRXIDX_TABLE_KEY                                0x00000006
> +#define IPSRXIDX_WRITE                                    0x80000000
> +#define IPSRXIDX_READ                                     0x40000000
> +#define IPSRXMOD_VALID                                    0x00000001
> +#define IPSRXMOD_PROTO                                    0x00000004
> +#define IPSRXMOD_DECRYPT                                  0x00000008
> +#define IPSRXMOD_IPV6                                     0x00000010
> +#define IXGBE_ADVTXD_POPTS_IPSEC                          0x00000400
> +#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP                 0x00002000
> +#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN               0x00004000
> +#define IXGBE_RXDADV_IPSEC_STATUS_SECP                    0x00020000
> +#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK                 0x18000000
> +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL         0x08000000
> +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH           0x10000000
> +#define IXGBE_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED    0x18000000
> +
> +#define IPSEC_MAX_RX_IP_COUNT           128
> +#define IPSEC_MAX_SA_COUNT              1024
> +
> +enum ixgbe_operation {
> +	IXGBE_OP_AUTHENTICATED_ENCRYPTION,
> +	IXGBE_OP_AUTHENTICATED_DECRYPTION
> +};
> +
> +enum ixgbe_gcm_key {
> +	IXGBE_GCM_KEY_128,
> +	IXGBE_GCM_KEY_256
> +};
> +
> +/**
> + * Generic IP address structure
> + * TODO: Find better location for this rte_net.h possibly.
> + **/
> +struct ipaddr {
> +	enum ipaddr_type {
> +		IPv4,
> +		IPv6
> +	} type;
> +	/**< IP Address Type - IPv4/IPv6 */
> +
> +	union {
> +		uint32_t ipv4;
> +		uint32_t ipv6[4];
> +	};
> +};
> +
> +/** inline crypto crypto private session structure */
> +struct ixgbe_crypto_session {
> +	enum ixgbe_operation op;
> +	uint8_t *key;
> +	uint32_t salt;
> +	uint32_t sa_index;
> +	uint32_t spi;
> +	struct ipaddr src_ip;
> +	struct ipaddr dst_ip;
> +	struct rte_eth_dev *dev;
> +} __rte_cache_aligned;
> +
> +struct ixgbe_crypto_rx_ip_table {
> +	struct ipaddr ip;
> +	uint16_t ref_count;
> +};
> +struct ixgbe_crypto_rx_sa_table {
> +	uint32_t spi;
> +	uint32_t ip_index;
> +	uint32_t key[4];
> +	uint32_t salt;
> +	uint8_t  mode;
> +	uint8_t  used;
> +};
> +
> +struct ixgbe_crypto_tx_sa_table {
> +	uint32_t spi;
> +	uint32_t key[4];
> +	uint32_t salt;
> +	uint8_t  used;
> +};
> +
> +struct ixgbe_crypto_tx_desc_md {
> +	union {
> +		uint64_t data;
> +		struct {
> +			  uint32_t sa_idx;
> +			  uint8_t pad_len;
> +			  uint8_t enc;
> +		};
> +	};
> +};
> +
> +struct ixgbe_ipsec {
> +	struct ixgbe_crypto_rx_ip_table rx_ip_tbl[IPSEC_MAX_RX_IP_COUNT];
> +	struct ixgbe_crypto_rx_sa_table rx_sa_tbl[IPSEC_MAX_SA_COUNT];
> +	struct ixgbe_crypto_tx_sa_table tx_sa_tbl[IPSEC_MAX_SA_COUNT];
> +};
> +
> +extern struct rte_security_ops ixgbe_security_ops;
> +
> +
> +int ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev);
> +int ixgbe_crypto_add_ingress_sa_from_flow(const void *sess,
> +					  const void *ip_spec,
> +					  uint8_t is_ipv6);
> +
> +
> +
> +#endif /*IXGBE_IPSEC_H_*/
> diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
> index 0038dfb..279e3fa 100644
> --- a/drivers/net/ixgbe/ixgbe_rxtx.c
> +++ b/drivers/net/ixgbe/ixgbe_rxtx.c
> @@ -93,6 +93,7 @@
>   		PKT_TX_TCP_SEG |		 \
>   		PKT_TX_MACSEC |			 \
>   		PKT_TX_OUTER_IP_CKSUM |		 \
> +		PKT_TX_SEC_OFFLOAD |	 \
>   		IXGBE_TX_IEEE1588_TMST)
>   
>   #define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
> @@ -395,7 +396,8 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
>   static inline void
>   ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
>   		volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
> -		uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
> +		uint64_t ol_flags, union ixgbe_tx_offload tx_offload,
> +		struct rte_mbuf *mb)
>   {
>   	uint32_t type_tucmd_mlhl;
>   	uint32_t mss_l4len_idx = 0;
> @@ -479,6 +481,18 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
>   		seqnum_seed |= tx_offload.l2_len
>   			       << IXGBE_ADVTXD_TUNNEL_LEN;
>   	}
> +	if (mb->ol_flags & PKT_TX_SEC_OFFLOAD) {
> +		struct ixgbe_crypto_tx_desc_md *mdata =
> +				(struct ixgbe_crypto_tx_desc_md *)
> +				&mb->udata64;
> +		seqnum_seed |=
> +			(IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & mdata->sa_idx);
> +		type_tucmd_mlhl |= mdata->enc ?
> +				(IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
> +				IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0;
> +		type_tucmd_mlhl |=
> +			(mdata->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK);
> +	}
>   
>   	txq->ctx_cache[ctx_idx].flags = ol_flags;
>   	txq->ctx_cache[ctx_idx].tx_offload.data[0]  =
> @@ -657,6 +671,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>   	uint32_t ctx = 0;
>   	uint32_t new_ctx;
>   	union ixgbe_tx_offload tx_offload;
> +	uint8_t use_ipsec;
>   
>   	tx_offload.data[0] = 0;
>   	tx_offload.data[1] = 0;
> @@ -684,6 +699,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>   		 * are needed for offload functionality.
>   		 */
>   		ol_flags = tx_pkt->ol_flags;
> +		use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
>   
>   		/* If hardware offload required */
>   		tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
> @@ -695,6 +711,13 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>   			tx_offload.tso_segsz = tx_pkt->tso_segsz;
>   			tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
>   			tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
> +			if (use_ipsec) {
> +				struct ixgbe_crypto_tx_desc_md *ipsec_mdata =
> +					(struct ixgbe_crypto_tx_desc_md *)
> +							&tx_pkt->udata64;
> +				tx_offload.sa_idx = ipsec_mdata->sa_idx;
> +				tx_offload.sec_pad_len = ipsec_mdata->pad_len;
> +			}
>   
>   			/* If new context need be built or reuse the exist ctx. */
>   			ctx = what_advctx_update(txq, tx_ol_req,
> @@ -855,7 +878,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>   				}
>   
>   				ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
> -					tx_offload);
> +					tx_offload, tx_pkt);
>   
>   				txe->last_id = tx_last;
>   				tx_id = txe->next_id;
> @@ -873,6 +896,8 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>   		}
>   
>   		olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
> +		if (use_ipsec)
> +			olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC;
>   
>   		m_seg = tx_pkt;
>   		do {
> @@ -1447,6 +1472,12 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
>   		pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
>   	}
>   
> +	if (rx_status & IXGBE_RXD_STAT_SECP) {
> +		pkt_flags |= PKT_RX_SEC_OFFLOAD;
> +		if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
> +			pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
> +	}
> +
>   	return pkt_flags;
>   }
>   
> @@ -2364,8 +2395,9 @@ void __attribute__((cold))
>   ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
>   {
>   	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
> -	if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
> -			&& (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
> +	if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
> +			(txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST) &&
> +			!(dev->data->dev_conf.txmode.enable_sec)) {
>   		PMD_INIT_LOG(DEBUG, "Using simple tx code path");
>   		dev->tx_pkt_prepare = NULL;
>   #ifdef RTE_IXGBE_INC_VECTOR
> @@ -2535,6 +2567,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
>   	txq->txq_flags = tx_conf->txq_flags;
>   	txq->ops = &def_txq_ops;
>   	txq->tx_deferred_start = tx_conf->tx_deferred_start;
> +	txq->using_ipsec = dev->data->dev_conf.txmode.enable_sec;
>   
>   	/*
>   	 * Modification to set VFTDT for virtual function if vf is detected
> @@ -4519,6 +4552,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
>   		struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
>   
>   		rxq->rx_using_sse = rx_using_sse;
> +		rxq->using_ipsec = dev->data->dev_conf.rxmode.enable_sec;
>   	}
>   }
>   
> @@ -5006,6 +5040,17 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
>   			dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
>   		ixgbe_setup_loopback_link_82599(hw);
>   
> +	if (dev->data->dev_conf.rxmode.enable_sec ||
> +			dev->data->dev_conf.txmode.enable_sec) {
> +		ret = ixgbe_crypto_enable_ipsec(dev);
> +		if (ret != 0) {
> +			PMD_DRV_LOG(ERR,
> +				    "ixgbe_crypto_enable_ipsec fails with %d.",
> +				    ret);
> +			return ret;
> +		}
> +	}
> +
>   	return 0;
>   }
>   
> diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
> index 81c527f..4017831 100644
> --- a/drivers/net/ixgbe/ixgbe_rxtx.h
> +++ b/drivers/net/ixgbe/ixgbe_rxtx.h
> @@ -138,8 +138,10 @@ struct ixgbe_rx_queue {
>   	uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
>   	uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
>   	uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
> -	uint16_t            rx_using_sse;
> +	uint8_t            rx_using_sse;
>   	/**< indicates that vector RX is in use */
> +	uint8_t            using_ipsec;
> +	/**< indicates that IPsec RX feature is in use */
>   #ifdef RTE_IXGBE_INC_VECTOR
>   	uint16_t            rxrearm_nb;     /**< number of remaining to be re-armed */
>   	uint16_t            rxrearm_start;  /**< the idx we start the re-arming from */
> @@ -183,6 +185,10 @@ union ixgbe_tx_offload {
>   		/* fields for TX offloading of tunnels */
>   		uint64_t outer_l3_len:8; /**< Outer L3 (IP) Hdr Length. */
>   		uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */
> +
> +		/* inline ipsec related*/
> +		uint64_t sa_idx:8;	/**< TX SA database entry index */
> +		uint64_t sec_pad_len:4;	/**< padding length */
>   	};
>   };
>   
> @@ -247,6 +253,9 @@ struct ixgbe_tx_queue {
>   	struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
>   	const struct ixgbe_txq_ops *ops;       /**< txq ops */
>   	uint8_t             tx_deferred_start; /**< not in global dev start. */
> +	uint8_t		    using_ipsec;
> +	/**< indicates that IPsec TX feature is in use */
> +
>   };
>   
>   struct ixgbe_txq_ops {
> diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
> index e704a7f..c9b1e2e 100644
> --- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
> +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
> @@ -124,10 +124,12 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
>   
>   static inline void
>   desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
> -	struct rte_mbuf **rx_pkts)
> +	struct rte_mbuf **rx_pkts, uint8_t use_ipsec)
>   {
>   	__m128i ptype0, ptype1, vtag0, vtag1, csum;
>   	__m128i rearm0, rearm1, rearm2, rearm3;
> +	__m128i sterr0, sterr1, sterr2, sterr3;
> +	__m128i tmp1, tmp2, tmp3, tmp4;
>   
>   	/* mask everything except rss type */
>   	const __m128i rsstype_msk = _mm_set_epi16(
> @@ -174,6 +176,41 @@ desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
>   		0, PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
>   		PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t));
>   
> +	const __m128i ipsec_sterr_msk = _mm_set_epi32(
> +		0, IXGBE_RXDADV_IPSEC_STATUS_SECP |
> +			IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED,
> +		0, 0);
> +	const __m128i ipsec_proc_msk  = _mm_set_epi32(
> +		0, IXGBE_RXDADV_IPSEC_STATUS_SECP, 0, 0);
> +	const __m128i ipsec_err_flag  = _mm_set_epi32(
> +		0, PKT_RX_SEC_OFFLOAD_FAILED | PKT_RX_SEC_OFFLOAD,
> +		0, 0);
> +	const __m128i ipsec_proc_flag = _mm_set_epi32(
> +		0, PKT_RX_SEC_OFFLOAD, 0, 0);
> +
> +	if (use_ipsec) {
> +		sterr0 = _mm_and_si128(descs[0], ipsec_sterr_msk);
> +		sterr1 = _mm_and_si128(descs[1], ipsec_sterr_msk);
> +		sterr2 = _mm_and_si128(descs[2], ipsec_sterr_msk);
> +		sterr3 = _mm_and_si128(descs[3], ipsec_sterr_msk);
> +		tmp1 = _mm_cmpeq_epi32(sterr0, ipsec_sterr_msk);
> +		tmp2 = _mm_cmpeq_epi32(sterr0, ipsec_proc_msk);
> +		tmp3 = _mm_cmpeq_epi32(sterr1, ipsec_sterr_msk);
> +		tmp4 = _mm_cmpeq_epi32(sterr1, ipsec_proc_msk);
> +		sterr0 = _mm_or_si128(_mm_and_si128(tmp1, ipsec_err_flag),
> +					_mm_and_si128(tmp2, ipsec_proc_flag));
> +		sterr1 = _mm_or_si128(_mm_and_si128(tmp3, ipsec_err_flag),
> +					_mm_and_si128(tmp4, ipsec_proc_flag));
> +		tmp1 = _mm_cmpeq_epi32(sterr2, ipsec_sterr_msk);
> +		tmp2 = _mm_cmpeq_epi32(sterr2, ipsec_proc_msk);
> +		tmp3 = _mm_cmpeq_epi32(sterr3, ipsec_sterr_msk);
> +		tmp4 = _mm_cmpeq_epi32(sterr3, ipsec_proc_msk);
> +		sterr2 = _mm_or_si128(_mm_and_si128(tmp1, ipsec_err_flag),
> +					_mm_and_si128(tmp2, ipsec_proc_flag));
> +		sterr3 = _mm_or_si128(_mm_and_si128(tmp3, ipsec_err_flag),
> +					_mm_and_si128(tmp4, ipsec_proc_flag));
> +	}
> +
>   	ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
>   	ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
>   	vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
> @@ -221,6 +258,13 @@ desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
>   	rearm2 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 4), 0x10);
>   	rearm3 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 2), 0x10);
>   
> +	if (use_ipsec) {
> +		rearm0 = _mm_or_si128(rearm0, sterr0);
> +		rearm1 = _mm_or_si128(rearm1, sterr1);
> +		rearm2 = _mm_or_si128(rearm2, sterr2);
> +		rearm3 = _mm_or_si128(rearm3, sterr3);
> +	}
> +
>   	/* write the rearm data and the olflags in one write */
>   	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
>   			offsetof(struct rte_mbuf, rearm_data) + 8);
> @@ -310,6 +354,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
>   	volatile union ixgbe_adv_rx_desc *rxdp;
>   	struct ixgbe_rx_entry *sw_ring;
>   	uint16_t nb_pkts_recd;
> +	uint8_t use_ipsec = rxq->using_ipsec;
>   	int pos;
>   	uint64_t var;
>   	__m128i shuf_msk;
> @@ -471,7 +516,8 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
>   		sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
>   
>   		/* set ol_flags with vlan packet type */
> -		desc_to_olflags_v(descs, mbuf_init, vlan_flags, &rx_pkts[pos]);
> +		desc_to_olflags_v(descs, mbuf_init, vlan_flags,
> +				  &rx_pkts[pos], use_ipsec);
>   
>   		/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
>   		pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);

Tested-by: Aviad Yehezkel <aviadye@mellanox.com>
  
Thomas Monjalon Oct. 16, 2017, 10:41 a.m. UTC | #2
15/10/2017 14:51, Aviad Yehezkel:
> 
> On 10/15/2017 1:17 AM, Akhil Goyal wrote:
> > From: Radu Nicolau <radu.nicolau@intel.com>
> >
> > Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
> > Signed-off-by: Declan Doherty <declan.doherty@intel.com>
> > ---
> >   drivers/net/Makefile                   |   2 +-
> >   drivers/net/ixgbe/Makefile             |   2 +-
> >   drivers/net/ixgbe/base/ixgbe_osdep.h   |   8 +
> >   drivers/net/ixgbe/ixgbe_ethdev.c       |  19 +
> >   drivers/net/ixgbe/ixgbe_ethdev.h       |   6 +-
> >   drivers/net/ixgbe/ixgbe_flow.c         |  47 +++
> >   drivers/net/ixgbe/ixgbe_ipsec.c        | 744 +++++++++++++++++++++++++++++++++
> >   drivers/net/ixgbe/ixgbe_ipsec.h        | 147 +++++++
> >   drivers/net/ixgbe/ixgbe_rxtx.c         |  53 ++-
> >   drivers/net/ixgbe/ixgbe_rxtx.h         |  11 +-
> >   drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c |  50 ++-
> >   11 files changed, 1079 insertions(+), 10 deletions(-)
> >   create mode 100644 drivers/net/ixgbe/ixgbe_ipsec.c
> >   create mode 100644 drivers/net/ixgbe/ixgbe_ipsec.h
> >
[all code lines cut]

Please Aviad, remove the useless lines when replying.
It is really annoying to scroll the whole patch to find where you replied.

> Tested-by: Aviad Yehezkel <aviadye@mellanox.com>

Really? You have tested the ixgbe driver?

When providing a test acknowledgement, it is more valuable to provide
a brief test report:
	- which hardware
	- which use case
	- results
  
Ananyev, Konstantin Oct. 18, 2017, 9:29 p.m. UTC | #3
Hi Radu,
Few comments from me below.
Konstantin

> -----Original Message-----
> From: Akhil Goyal [mailto:akhil.goyal@nxp.com]
> Sent: Saturday, October 14, 2017 11:18 PM
> To: dev@dpdk.org
> Cc: Doherty, Declan <declan.doherty@intel.com>; De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; hemant.agrawal@nxp.com;
> Nicolau, Radu <radu.nicolau@intel.com>; borisp@mellanox.com; aviadye@mellanox.com; thomas@monjalon.net;
> sandeep.malik@nxp.com; jerin.jacob@caviumnetworks.com; Mcnamara, John <john.mcnamara@intel.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>; shahafs@mellanox.com; olivier.matz@6wind.com
> Subject: [PATCH v4 10/12] net/ixgbe: enable inline ipsec
> 
> From: Radu Nicolau <radu.nicolau@intel.com>
> 
> Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
> Signed-off-by: Declan Doherty <declan.doherty@intel.com>
> ---
>  drivers/net/Makefile                   |   2 +-
>  drivers/net/ixgbe/Makefile             |   2 +-
>  drivers/net/ixgbe/base/ixgbe_osdep.h   |   8 +
>  drivers/net/ixgbe/ixgbe_ethdev.c       |  19 +
>  drivers/net/ixgbe/ixgbe_ethdev.h       |   6 +-
>  drivers/net/ixgbe/ixgbe_flow.c         |  47 +++
>  drivers/net/ixgbe/ixgbe_ipsec.c        | 744 +++++++++++++++++++++++++++++++++
>  drivers/net/ixgbe/ixgbe_ipsec.h        | 147 +++++++
>  drivers/net/ixgbe/ixgbe_rxtx.c         |  53 ++-
>  drivers/net/ixgbe/ixgbe_rxtx.h         |  11 +-
>  drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c |  50 ++-
>  11 files changed, 1079 insertions(+), 10 deletions(-)
>  create mode 100644 drivers/net/ixgbe/ixgbe_ipsec.c
>  create mode 100644 drivers/net/ixgbe/ixgbe_ipsec.h
> 
> diff --git a/drivers/net/Makefile b/drivers/net/Makefile
> index 5d2ad2f..339ff36 100644
> --- a/drivers/net/Makefile
> +++ b/drivers/net/Makefile
> @@ -68,7 +68,7 @@ DEPDIRS-fm10k = $(core-libs) librte_hash
>  DIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e
>  DEPDIRS-i40e = $(core-libs) librte_hash
>  DIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe
> -DEPDIRS-ixgbe = $(core-libs) librte_hash
> +DEPDIRS-ixgbe = $(core-libs) librte_hash librte_security
>  DIRS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += liquidio
>  DEPDIRS-liquidio = $(core-libs)
>  DIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4
> diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
> index 95c806d..6e963c7 100644
> --- a/drivers/net/ixgbe/Makefile
> +++ b/drivers/net/ixgbe/Makefile
> @@ -118,11 +118,11 @@ SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_neon.c
>  else
>  SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_sse.c
>  endif
> -
>  ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_BYPASS),y)
>  SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c
>  SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c
>  endif
> +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ipsec.c
>  SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c
>  SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_tm.c
> 
> diff --git a/drivers/net/ixgbe/base/ixgbe_osdep.h b/drivers/net/ixgbe/base/ixgbe_osdep.h
> index 4aab278..b132a0f 100644
> --- a/drivers/net/ixgbe/base/ixgbe_osdep.h
> +++ b/drivers/net/ixgbe/base/ixgbe_osdep.h
> @@ -161,4 +161,12 @@ static inline uint32_t ixgbe_read_addr(volatile void* addr)
>  #define IXGBE_WRITE_REG_ARRAY(hw, reg, index, value) \
>  	IXGBE_PCI_REG_WRITE(IXGBE_PCI_REG_ARRAY_ADDR((hw), (reg), (index)), (value))
> 
> +#define IXGBE_WRITE_REG_THEN_POLL_MASK(hw, reg, val, mask, poll_ms)	\
> +{									\
> +	uint32_t cnt = poll_ms;						\
> +	IXGBE_WRITE_REG(hw, (reg), (val));				\
> +	while (((IXGBE_READ_REG(hw, (reg))) & (mask)) && (cnt--))	\
> +		rte_delay_ms(1);					\
> +}
> +

As you have a macro that consists from multiple statements, you'll need a do { ..} while (0)  wrapper
around it.
Though I still suggest to make it an inline function - would be much better.

>  #endif /* _IXGBE_OS_H_ */
> diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
> index 14b9c53..fcabd5e 100644
> --- a/drivers/net/ixgbe/ixgbe_ethdev.c
> +++ b/drivers/net/ixgbe/ixgbe_ethdev.c
> @@ -61,6 +61,7 @@
>  #include <rte_random.h>
>  #include <rte_dev.h>
>  #include <rte_hash_crc.h>
> +#include <rte_security_driver.h>
> 
>  #include "ixgbe_logs.h"
>  #include "base/ixgbe_api.h"
> @@ -1132,6 +1133,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
>  		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
>  	struct ixgbe_bw_conf *bw_conf =
>  		IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private);
> +	struct rte_security_ctx *security_instance;
>  	uint32_t ctrl_ext;
>  	uint16_t csum;
>  	int diag, i;
> @@ -1139,6 +1141,17 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
>  	PMD_INIT_FUNC_TRACE();
> 
>  	eth_dev->dev_ops = &ixgbe_eth_dev_ops;
> +	security_instance = rte_malloc("rte_security_instances_ops",
> +				sizeof(struct rte_security_ctx), 0);
> +	if (security_instance == NULL)
> +		return -ENOMEM;
> +	security_instance->state = RTE_SECURITY_INSTANCE_VALID;
> +	security_instance->device = (void *)eth_dev;
> +	security_instance->ops = &ixgbe_security_ops;
> +	security_instance->sess_cnt = 0;
> +
> +	eth_dev->data->security_ctx = security_instance;
> +
>  	eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
>  	eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
>  	eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
> @@ -1169,6 +1182,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
> 
>  	rte_eth_copy_pci_info(eth_dev, pci_dev);
>  	eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
> +	eth_dev->data->dev_flags |= RTE_ETH_DEV_SECURITY;
> 
>  	/* Vendor and Device ID need to be set before init of shared code */
>  	hw->device_id = pci_dev->id.device_id;
> @@ -1401,6 +1415,8 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
>  	/* Remove all Traffic Manager configuration */
>  	ixgbe_tm_conf_uninit(eth_dev);
> 
> +	rte_free(eth_dev->data->security_ctx);
> +
>  	return 0;
>  }
> 
> @@ -3695,6 +3711,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
>  	    hw->mac.type == ixgbe_mac_X550EM_a)
>  		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
> 
> +	dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
> +	dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
> +
>  	dev_info->default_rxconf = (struct rte_eth_rxconf) {
>  		.rx_thresh = {
>  			.pthresh = IXGBE_DEFAULT_RX_PTHRESH,
> diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
> index e28c856..f5b52c4 100644
> --- a/drivers/net/ixgbe/ixgbe_ethdev.h
> +++ b/drivers/net/ixgbe/ixgbe_ethdev.h
> @@ -38,6 +38,7 @@
>  #include "base/ixgbe_dcb_82599.h"
>  #include "base/ixgbe_dcb_82598.h"
>  #include "ixgbe_bypass.h"
> +#include "ixgbe_ipsec.h"
>  #include <rte_time.h>
>  #include <rte_hash.h>
>  #include <rte_pci.h>
> @@ -486,7 +487,7 @@ struct ixgbe_adapter {
>  	struct ixgbe_filter_info    filter;
>  	struct ixgbe_l2_tn_info     l2_tn;
>  	struct ixgbe_bw_conf        bw_conf;
> -
> +	struct ixgbe_ipsec          ipsec;
>  	bool rx_bulk_alloc_allowed;
>  	bool rx_vec_allowed;
>  	struct rte_timecounter      systime_tc;
> @@ -543,6 +544,9 @@ struct ixgbe_adapter {
>  #define IXGBE_DEV_PRIVATE_TO_TM_CONF(adapter) \
>  	(&((struct ixgbe_adapter *)adapter)->tm_conf)
> 
> +#define IXGBE_DEV_PRIVATE_TO_IPSEC(adapter)\
> +	(&((struct ixgbe_adapter *)adapter)->ipsec)
> +
>  /*
>   * RX/TX function prototypes
>   */
> diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
> index 904c146..13c8243 100644
> --- a/drivers/net/ixgbe/ixgbe_flow.c
> +++ b/drivers/net/ixgbe/ixgbe_flow.c
> @@ -187,6 +187,9 @@ const struct rte_flow_action *next_no_void_action(
>   * END
>   * other members in mask and spec should set to 0x00.
>   * item->last should be NULL.
> + *
> + * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
> + *
>   */
>  static int
>  cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
> @@ -226,6 +229,41 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
>  		return -rte_errno;
>  	}
> 
> +	/**
> +	 *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
> +	 */
> +	act = next_no_void_action(actions, NULL);
> +	if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
> +		const void *conf = act->conf;
> +		/* check if the next not void item is END */
> +		act = next_no_void_action(actions, act);
> +		if (act->type != RTE_FLOW_ACTION_TYPE_END) {
> +			memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ACTION,
> +				act, "Not supported action.");
> +			return -rte_errno;
> +		}
> +
> +		/* get the IP pattern*/
> +		item = next_no_void_pattern(pattern, NULL);
> +		while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
> +				item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
> +			if (item->last ||
> +					item->type == RTE_FLOW_ITEM_TYPE_END) {
> +				rte_flow_error_set(error, EINVAL,
> +					RTE_FLOW_ERROR_TYPE_ITEM,
> +					item, "IP pattern missing.");
> +				return -rte_errno;
> +			}
> +			item = next_no_void_pattern(pattern, item);
> +		}
> +
> +		filter->proto = IPPROTO_ESP;
> +		return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
> +					item->type == RTE_FLOW_ITEM_TYPE_IPV6);
> +	}
> +
>  	/* the first not void item can be MAC or IPv4 */
>  	item = next_no_void_pattern(pattern, NULL);
> 
> @@ -519,6 +557,10 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
>  	if (ret)
>  		return ret;
> 
> +	/* ESP flow not really a flow*/
> +	if (filter->proto == IPPROTO_ESP)
> +		return 0;
> +
>  	/* Ixgbe doesn't support tcp flags. */
>  	if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
>  		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
> @@ -2758,6 +2800,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
>  	memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
>  	ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
>  			actions, &ntuple_filter, error);
> +
> +	/* ESP flow not really a flow*/
> +	if (ntuple_filter.proto == IPPROTO_ESP)
> +		return flow;
> +
>  	if (!ret) {
>  		ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
>  		if (!ret) {
> diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
> new file mode 100644
> index 0000000..6ace305
> --- /dev/null
> +++ b/drivers/net/ixgbe/ixgbe_ipsec.c
> @@ -0,0 +1,744 @@
> +/*-
> + *   BSD LICENSE
> + *
> + *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
> + *   All rights reserved.
> + *
> + *   Redistribution and use in source and binary forms, with or without
> + *   modification, are permitted provided that the following conditions
> + *   are met:
> + *
> + *     * Redistributions of source code must retain the above copyright
> + *       notice, this list of conditions and the following disclaimer.
> + *     * Redistributions in binary form must reproduce the above copyright
> + *       notice, this list of conditions and the following disclaimer in
> + *       the documentation and/or other materials provided with the
> + *       distribution.
> + *     * Neither the name of Intel Corporation nor the names of its
> + *       contributors may be used to endorse or promote products derived
> + *       from this software without specific prior written permission.
> + *
> + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
> + */
> +
> +#include <rte_ethdev.h>
> +#include <rte_ethdev_pci.h>
> +#include <rte_ip.h>
> +#include <rte_jhash.h>
> +#include <rte_security_driver.h>
> +#include <rte_cryptodev.h>
> +#include <rte_flow.h>
> +
> +#include "base/ixgbe_type.h"
> +#include "base/ixgbe_api.h"
> +#include "ixgbe_ethdev.h"
> +#include "ixgbe_ipsec.h"
> +
> +#define RTE_IXGBE_REGISTER_POLL_WAIT_5_MS  5
> +
> +#define IXGBE_WAIT_RREAD \
> +	IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSRXIDX, reg_val, \
> +	IPSRXIDX_READ, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
> +#define IXGBE_WAIT_RWRITE \
> +	IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSRXIDX, reg_val, \
> +	IPSRXIDX_WRITE, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
> +#define IXGBE_WAIT_TREAD \
> +	IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSTXIDX, reg_val, \
> +	IPSRXIDX_READ, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
> +#define IXGBE_WAIT_TWRITE \
> +	IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSTXIDX, reg_val, \
> +	IPSRXIDX_WRITE, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
> +
> +#define CMP_IP(a, b) (\
> +	(a).ipv6[0] == (b).ipv6[0] && \
> +	(a).ipv6[1] == (b).ipv6[1] && \
> +	(a).ipv6[2] == (b).ipv6[2] && \
> +	(a).ipv6[3] == (b).ipv6[3])
> +
> +
> +static void
> +ixgbe_crypto_clear_ipsec_tables(struct rte_eth_dev *dev)
> +{
> +	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +	int i = 0;
> +
> +	/* clear Rx IP table*/
> +	for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
> +		uint16_t index = i << 3;
> +		uint32_t reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_IP | index;
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), 0);
> +		IXGBE_WAIT_RWRITE;
> +	}
> +
> +	/* clear Rx SPI and Rx/Tx SA tables*/
> +	for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
> +		uint32_t index = i << 3;
> +		uint32_t reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | index;
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0);
> +		IXGBE_WAIT_RWRITE;
> +		reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_KEY | index;
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, 0);
> +		IXGBE_WAIT_RWRITE;
> +		reg_val = IPSRXIDX_WRITE | index;
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, 0);
> +		IXGBE_WAIT_TWRITE;
> +	}
> +}
> +
> +static int
> +ixgbe_crypto_add_sa(struct ixgbe_crypto_session *ic_session)
> +{
> +	struct rte_eth_dev *dev = ic_session->dev;
> +	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +	struct ixgbe_ipsec *priv = IXGBE_DEV_PRIVATE_TO_IPSEC(
> +			dev->data->dev_private);
> +	uint32_t reg_val;
> +	int sa_index = -1;
> +
> +	if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
> +		int i, ip_index = -1;
> +
> +		/* Find a match in the IP table*/
> +		for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
> +			if (CMP_IP(priv->rx_ip_tbl[i].ip,
> +				   ic_session->dst_ip)) {
> +				ip_index = i;
> +				break;
> +			}
> +		}
> +		/* If no match, find a free entry in the IP table*/
> +		if (ip_index < 0) {
> +			for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
> +				if (priv->rx_ip_tbl[i].ref_count == 0) {
> +					ip_index = i;
> +					break;
> +				}
> +			}
> +		}
> +
> +		/* Fail if no match and no free entries*/
> +		if (ip_index < 0) {
> +			PMD_DRV_LOG(ERR,
> +				    "No free entry left in the Rx IP table\n");
> +			return -1;
> +		}
> +
> +		/* Find a free entry in the SA table*/
> +		for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
> +			if (priv->rx_sa_tbl[i].used == 0) {
> +				sa_index = i;
> +				break;
> +			}
> +		}
> +		/* Fail if no free entries*/
> +		if (sa_index < 0) {
> +			PMD_DRV_LOG(ERR,
> +				    "No free entry left in the Rx SA table\n");
> +			return -1;
> +		}
> +
> +		priv->rx_ip_tbl[ip_index].ip.ipv6[0] =
> +				ic_session->dst_ip.ipv6[0];
> +		priv->rx_ip_tbl[ip_index].ip.ipv6[1] =
> +				ic_session->dst_ip.ipv6[1];
> +		priv->rx_ip_tbl[ip_index].ip.ipv6[2] =
> +				ic_session->dst_ip.ipv6[2];
> +		priv->rx_ip_tbl[ip_index].ip.ipv6[3] =
> +				ic_session->dst_ip.ipv6[3];
> +		priv->rx_ip_tbl[ip_index].ref_count++;
> +
> +		priv->rx_sa_tbl[sa_index].spi =
> +			rte_cpu_to_be_32(ic_session->spi);
> +		priv->rx_sa_tbl[sa_index].ip_index = ip_index;
> +		priv->rx_sa_tbl[sa_index].key[3] =
> +			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[0]);
> +		priv->rx_sa_tbl[sa_index].key[2] =
> +			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[4]);
> +		priv->rx_sa_tbl[sa_index].key[1] =
> +			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[8]);
> +		priv->rx_sa_tbl[sa_index].key[0] =
> +			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[12]);
> +		priv->rx_sa_tbl[sa_index].salt =
> +			rte_cpu_to_be_32(ic_session->salt);
> +		priv->rx_sa_tbl[sa_index].mode = IPSRXMOD_VALID;
> +		if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION)
> +			priv->rx_sa_tbl[sa_index].mode |=
> +					(IPSRXMOD_PROTO | IPSRXMOD_DECRYPT);
> +		if (ic_session->dst_ip.type == IPv6)
> +			priv->rx_sa_tbl[sa_index].mode |= IPSRXMOD_IPV6;
> +		priv->rx_sa_tbl[sa_index].used = 1;
> +
> +		/* write IP table entry*/
> +		reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
> +				IPSRXIDX_TABLE_IP | (ip_index << 3);
> +		if (priv->rx_ip_tbl[ip_index].ip.type == IPv4) {
> +			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
> +			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
> +			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
> +			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3),
> +					priv->rx_ip_tbl[ip_index].ip.ipv4);
> +		} else {
> +			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0),
> +					priv->rx_ip_tbl[ip_index].ip.ipv6[0]);
> +			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1),
> +					priv->rx_ip_tbl[ip_index].ip.ipv6[1]);
> +			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2),
> +					priv->rx_ip_tbl[ip_index].ip.ipv6[2]);
> +			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3),
> +					priv->rx_ip_tbl[ip_index].ip.ipv6[3]);
> +		}
> +		IXGBE_WAIT_RWRITE;
> +
> +		/* write SPI table entry*/
> +		reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
> +				IPSRXIDX_TABLE_SPI | (sa_index << 3);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI,
> +				priv->rx_sa_tbl[sa_index].spi);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX,
> +				priv->rx_sa_tbl[sa_index].ip_index);
> +		IXGBE_WAIT_RWRITE;
> +
> +		/* write Key table entry*/
> +		reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
> +				IPSRXIDX_TABLE_KEY | (sa_index << 3);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0),
> +				priv->rx_sa_tbl[sa_index].key[0]);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1),
> +				priv->rx_sa_tbl[sa_index].key[1]);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2),
> +				priv->rx_sa_tbl[sa_index].key[2]);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3),
> +				priv->rx_sa_tbl[sa_index].key[3]);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT,
> +				priv->rx_sa_tbl[sa_index].salt);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD,
> +				priv->rx_sa_tbl[sa_index].mode);
> +		IXGBE_WAIT_RWRITE;
> +
> +	} else { /* sess->dir == RTE_CRYPTO_OUTBOUND */
> +		int i;
> +
> +		/* Find a free entry in the SA table*/
> +		for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
> +			if (priv->tx_sa_tbl[i].used == 0) {
> +				sa_index = i;
> +				break;
> +			}
> +		}
> +		/* Fail if no free entries*/
> +		if (sa_index < 0) {
> +			PMD_DRV_LOG(ERR,
> +				    "No free entry left in the Tx SA table\n");
> +			return -1;
> +		}
> +
> +		priv->tx_sa_tbl[sa_index].spi =
> +			rte_cpu_to_be_32(ic_session->spi);
> +		priv->tx_sa_tbl[sa_index].key[3] =
> +			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[0]);
> +		priv->tx_sa_tbl[sa_index].key[2] =
> +			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[4]);
> +		priv->tx_sa_tbl[sa_index].key[1] =
> +			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[8]);
> +		priv->tx_sa_tbl[sa_index].key[0] =
> +			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[12]);
> +		priv->tx_sa_tbl[sa_index].salt =
> +			rte_cpu_to_be_32(ic_session->salt);
> +
> +		reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | (sa_index << 3);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0),
> +				priv->tx_sa_tbl[sa_index].key[0]);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1),
> +				priv->tx_sa_tbl[sa_index].key[1]);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2),
> +				priv->tx_sa_tbl[sa_index].key[2]);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3),
> +				priv->tx_sa_tbl[sa_index].key[3]);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT,
> +				priv->tx_sa_tbl[sa_index].salt);
> +		IXGBE_WAIT_TWRITE;
> +
> +		priv->tx_sa_tbl[i].used = 1;
> +		ic_session->sa_index = sa_index;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +ixgbe_crypto_remove_sa(struct rte_eth_dev *dev,
> +		       struct ixgbe_crypto_session *ic_session)
> +{
> +	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +	struct ixgbe_ipsec *priv =
> +			IXGBE_DEV_PRIVATE_TO_IPSEC(dev->data->dev_private);
> +	uint32_t reg_val;
> +	int sa_index = -1;
> +
> +	if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
> +		int i, ip_index = -1;
> +
> +		/* Find a match in the IP table*/
> +		for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
> +			if (CMP_IP(priv->rx_ip_tbl[i].ip, ic_session->dst_ip)) {
> +				ip_index = i;
> +				break;
> +			}
> +		}
> +
> +		/* Fail if no match*/
> +		if (ip_index < 0) {
> +			PMD_DRV_LOG(ERR,
> +				    "Entry not found in the Rx IP table\n");
> +			return -1;
> +		}
> +
> +		/* Find a free entry in the SA table*/
> +		for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
> +			if (priv->rx_sa_tbl[i].spi ==
> +				  rte_cpu_to_be_32(ic_session->spi)) {
> +				sa_index = i;
> +				break;
> +			}
> +		}
> +		/* Fail if no match*/
> +		if (sa_index < 0) {
> +			PMD_DRV_LOG(ERR,
> +				    "Entry not found in the Rx SA table\n");
> +			return -1;
> +		}
> +
> +		/* Disable and clear Rx SPI and key table table entryes*/
> +		reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | (sa_index << 3);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0);
> +		IXGBE_WAIT_RWRITE;
> +		reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_KEY | (sa_index << 3);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, 0);
> +		IXGBE_WAIT_RWRITE;
> +		priv->rx_sa_tbl[sa_index].used = 0;
> +
> +		/* If last used then clear the IP table entry*/
> +		priv->rx_ip_tbl[ip_index].ref_count--;
> +		if (priv->rx_ip_tbl[ip_index].ref_count == 0) {
> +			reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_IP |
> +					(ip_index << 3);
> +			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
> +			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
> +			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
> +			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), 0);
> +		}
> +	} else { /* session->dir == RTE_CRYPTO_OUTBOUND */
> +		int i;
> +
> +		/* Find a match in the SA table*/
> +		for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
> +			if (priv->tx_sa_tbl[i].spi ==
> +				    rte_cpu_to_be_32(ic_session->spi)) {
> +				sa_index = i;
> +				break;
> +			}
> +		}
> +		/* Fail if no match entries*/
> +		if (sa_index < 0) {
> +			PMD_DRV_LOG(ERR,
> +				    "Entry not found in the Tx SA table\n");
> +			return -1;
> +		}
> +		reg_val = IPSRXIDX_WRITE | (sa_index << 3);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), 0);
> +		IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, 0);
> +		IXGBE_WAIT_TWRITE;
> +
> +		priv->tx_sa_tbl[sa_index].used = 0;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +ixgbe_crypto_create_session(void *device,
> +		struct rte_security_session_conf *conf,
> +		struct rte_security_session *session,
> +		struct rte_mempool *mempool)
> +{
> +	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
> +	struct ixgbe_crypto_session *ic_session = NULL;
> +	struct rte_crypto_aead_xform *aead_xform;
> +	struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
> +
> +	if (rte_mempool_get(mempool, (void **)&ic_session)) {
> +		PMD_DRV_LOG(ERR, "Cannot get object from ic_session mempool");
> +		return -ENOMEM;
> +	}
> +
> +	if (conf->crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD ||
> +			conf->crypto_xform->aead.algo !=
> +					RTE_CRYPTO_AEAD_AES_GCM) {
> +		PMD_DRV_LOG(ERR, "Unsupported crypto transformation mode\n");
> +		return -ENOTSUP;
> +	}
> +	aead_xform = &conf->crypto_xform->aead;
> +
> +	if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
> +		if (dev_conf->rxmode.enable_sec) {
> +			ic_session->op = IXGBE_OP_AUTHENTICATED_DECRYPTION;
> +		} else {
> +			PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
> +			return -ENOTSUP;
> +		}
> +	} else {
> +		if (dev_conf->txmode.enable_sec) {
> +			ic_session->op = IXGBE_OP_AUTHENTICATED_ENCRYPTION;
> +		} else {
> +			PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
> +			return -ENOTSUP;
> +		}
> +	}
> +
> +	ic_session->key = aead_xform->key.data;
> +	memcpy(&ic_session->salt,
> +	       &aead_xform->key.data[aead_xform->key.length], 4);
> +	ic_session->spi = conf->ipsec.spi;
> +	ic_session->dev = eth_dev;
> +
> +	set_sec_session_private_data(session, ic_session);
> +
> +	if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {
> +		if (ixgbe_crypto_add_sa(ic_session)) {
> +			PMD_DRV_LOG(ERR, "Failed to add SA\n");
> +			return -EPERM;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +ixgbe_crypto_remove_session(void *device,
> +		struct rte_security_session *session)
> +{
> +	struct rte_eth_dev *eth_dev = device;
> +	struct ixgbe_crypto_session *ic_session =
> +		(struct ixgbe_crypto_session *)
> +		get_sec_session_private_data(session);
> +	struct rte_mempool *mempool = rte_mempool_from_obj(ic_session);
> +
> +	if (eth_dev != ic_session->dev) {
> +		PMD_DRV_LOG(ERR, "Session not bound to this device\n");
> +		return -ENODEV;
> +	}
> +
> +	if (ixgbe_crypto_remove_sa(eth_dev, ic_session)) {
> +		PMD_DRV_LOG(ERR, "Failed to remove session\n");
> +		return -EFAULT;
> +	}
> +
> +	rte_mempool_put(mempool, (void *)ic_session);
> +
> +	return 0;
> +}
> +
> +static int
> +ixgbe_crypto_update_mb(void *device __rte_unused,
> +		struct rte_security_session *session,
> +		       struct rte_mbuf *m, void *params __rte_unused)
> +{
> +	struct ixgbe_crypto_session *ic_session =
> +			get_sec_session_private_data(session);
> +	if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {
> +		struct ixgbe_crypto_tx_desc_md *mdata =
> +			(struct ixgbe_crypto_tx_desc_md *)&m->udata64;
> +		mdata->enc = 1;
> +		mdata->sa_idx = ic_session->sa_index;
> +		mdata->pad_len = *rte_pktmbuf_mtod_offset(m,
> +			uint8_t *, rte_pktmbuf_pkt_len(m) - 18) + 18;

Could you explain what pad_len supposed to contain?
Also what is a magical constant '18'?
Could you create some macro if needed?

> +	}
> +	return 0;
> +}
> +
> +struct rte_cryptodev_capabilities aes_gmac_crypto_capabilities[] = {
> +	{	/* AES GMAC (128-bit) */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +			{.auth = {
> +				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
> +				.block_size = 16,
> +				.key_size = {
> +					.min = 16,
> +					.max = 16,
> +					.increment = 0
> +				},
> +				.digest_size = {
> +					.min = 12,
> +					.max = 12,
> +					.increment = 0
> +				},
> +				.iv_size = {
> +					.min = 12,
> +					.max = 12,
> +					.increment = 0
> +				}
> +			}, }
> +		}, }
> +	},
> +	{
> +		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED
> +		}, }
> +	},
> +};
> +
> +struct rte_cryptodev_capabilities aes_gcm_gmac_crypto_capabilities[] = {
> +	{	/* AES GMAC (128-bit) */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +			{.auth = {
> +				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
> +				.block_size = 16,
> +				.key_size = {
> +					.min = 16,
> +					.max = 16,
> +					.increment = 0
> +				},
> +				.digest_size = {
> +					.min = 12,
> +					.max = 12,
> +					.increment = 0
> +				},
> +				.iv_size = {
> +					.min = 12,
> +					.max = 12,
> +					.increment = 0
> +				}
> +			}, }
> +		}, }
> +	},
> +	{	/* AES GCM (128-bit) */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
> +			{.aead = {
> +				.algo = RTE_CRYPTO_AEAD_AES_GCM,
> +				.block_size = 16,
> +				.key_size = {
> +					.min = 16,
> +					.max = 16,
> +					.increment = 0
> +				},
> +				.digest_size = {
> +					.min = 8,
> +					.max = 16,
> +					.increment = 4
> +				},
> +				.aad_size = {
> +					.min = 0,
> +					.max = 65535,
> +					.increment = 1
> +				},
> +				.iv_size = {
> +					.min = 12,
> +					.max = 12,
> +					.increment = 0
> +				}
> +			}, }
> +		}, }
> +	},
> +	{
> +		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED
> +		}, }
> +	},
> +};
> +
> +static const struct rte_security_capability ixgbe_security_capabilities[] = {
> +	{ /* IPsec Inline Crypto ESP Transport Egress */
> +		.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
> +		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
> +		.ipsec = {
> +			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
> +			.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
> +			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
> +			.options = { 0 }
> +		},
> +		.crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
> +		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
> +	},
> +	{ /* IPsec Inline Crypto ESP Transport Ingress */
> +		.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
> +		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
> +		.ipsec = {
> +			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
> +			.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
> +			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
> +			.options = { 0 }
> +		},
> +		.crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
> +		.ol_flags = 0
> +	},
> +	{ /* IPsec Inline Crypto ESP Tunnel Egress */
> +		.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
> +		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
> +		.ipsec = {
> +			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
> +			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
> +			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
> +			.options = { 0 }
> +		},
> +		.crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
> +		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
> +	},
> +	{ /* IPsec Inline Crypto ESP Tunnel Ingress */
> +		.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
> +		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
> +		.ipsec = {
> +			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
> +			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
> +			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
> +			.options = { 0 }
> +		},
> +		.crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
> +		.ol_flags = 0
> +	},
> +	{
> +		.action = RTE_SECURITY_ACTION_TYPE_NONE
> +	}
> +};
> +
> +static const struct rte_security_capability *
> +ixgbe_crypto_capabilities_get(void *device __rte_unused)
> +{

As a nit: if ixgbe_security_capabilities are not used in any other place - 
you can move its definition inside that function.

> +	return ixgbe_security_capabilities;
> +}
> +
> +
> +int
> +ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
> +{
> +	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +	uint32_t reg;
> +
> +	/* sanity checks */
> +	if (dev->data->dev_conf.rxmode.enable_lro) {
> +		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
> +		return -1;
> +	}
> +	if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
> +		PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
> +		return -1;
> +	}
> +
> +
> +	/* Set IXGBE_SECTXBUFFAF to 0x15 as required in the datasheet*/
> +	IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x15);
> +
> +	/* IFG needs to be set to 3 when we are using security. Otherwise a Tx
> +	 * hang will occur with heavy traffic.
> +	 */
> +	reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
> +	reg = (reg & 0xFFFFFFF0) | 0x3;
> +	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
> +
> +	reg  = IXGBE_READ_REG(hw, IXGBE_HLREG0);
> +	reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
> +	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
> +
> +	if (dev->data->dev_conf.rxmode.enable_sec) {
> +		IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
> +		reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
> +		if (reg != 0) {
> +			PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
> +			return -1;
> +		}
> +	}
> +	if (dev->data->dev_conf.txmode.enable_sec) {
> +		IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
> +				IXGBE_SECTXCTRL_STORE_FORWARD);
> +		reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
> +		if (reg != IXGBE_SECTXCTRL_STORE_FORWARD) {
> +			PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
> +			return -1;
> +		}
> +	}
> +
> +	ixgbe_crypto_clear_ipsec_tables(dev);
> +
> +	return 0;
> +}
> +
> +int
> +ixgbe_crypto_add_ingress_sa_from_flow(const void *sess,
> +				      const void *ip_spec,
> +				      uint8_t is_ipv6)
> +{
> +	struct ixgbe_crypto_session *ic_session
> +		= get_sec_session_private_data(sess);
> +
> +	if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
> +		if (is_ipv6) {
> +			const struct rte_flow_item_ipv6 *ipv6 = ip_spec;
> +			ic_session->src_ip.type = IPv6;
> +			ic_session->dst_ip.type = IPv6;
> +			rte_memcpy(ic_session->src_ip.ipv6,
> +				   ipv6->hdr.src_addr, 16);
> +			rte_memcpy(ic_session->dst_ip.ipv6,
> +				   ipv6->hdr.dst_addr, 16);
> +		} else {
> +			const struct rte_flow_item_ipv4 *ipv4 = ip_spec;
> +			ic_session->src_ip.type = IPv4;
> +			ic_session->dst_ip.type = IPv4;
> +			ic_session->src_ip.ipv4 = ipv4->hdr.src_addr;
> +			ic_session->dst_ip.ipv4 = ipv4->hdr.dst_addr;
> +		}
> +		return ixgbe_crypto_add_sa(ic_session);
> +	}
> +
> +	return 0;
> +}
> +
> +
> +struct rte_security_ops ixgbe_security_ops = {
> +	.session_create = ixgbe_crypto_create_session,
> +	.session_update = NULL,
> +	.session_stats_get = NULL,
> +	.session_destroy = ixgbe_crypto_remove_session,
> +
> +	.set_pkt_metadata = ixgbe_crypto_update_mb,
> +
> +	.capabilities_get = ixgbe_crypto_capabilities_get
> +};
> diff --git a/drivers/net/ixgbe/ixgbe_ipsec.h b/drivers/net/ixgbe/ixgbe_ipsec.h
> new file mode 100644
> index 0000000..9f06235
> --- /dev/null
> +++ b/drivers/net/ixgbe/ixgbe_ipsec.h
> @@ -0,0 +1,147 @@
> +/*-
> + *   BSD LICENSE
> + *
> + *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
> + *   All rights reserved.
> + *
> + *   Redistribution and use in source and binary forms, with or without
> + *   modification, are permitted provided that the following conditions
> + *   are met:
> + *
> + *     * Redistributions of source code must retain the above copyright
> + *       notice, this list of conditions and the following disclaimer.
> + *     * Redistributions in binary form must reproduce the above copyright
> + *       notice, this list of conditions and the following disclaimer in
> + *       the documentation and/or other materials provided with the
> + *       distribution.
> + *     * Neither the name of Intel Corporation nor the names of its
> + *       contributors may be used to endorse or promote products derived
> + *       from this software without specific prior written permission.
> + *
> + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
> + */
> +
> +#ifndef IXGBE_IPSEC_H_
> +#define IXGBE_IPSEC_H_
> +
> +#include <rte_security.h>
> +
> +#define IPSRXIDX_RX_EN                                    0x00000001
> +#define IPSRXIDX_TABLE_IP                                 0x00000002
> +#define IPSRXIDX_TABLE_SPI                                0x00000004
> +#define IPSRXIDX_TABLE_KEY                                0x00000006
> +#define IPSRXIDX_WRITE                                    0x80000000
> +#define IPSRXIDX_READ                                     0x40000000
> +#define IPSRXMOD_VALID                                    0x00000001
> +#define IPSRXMOD_PROTO                                    0x00000004
> +#define IPSRXMOD_DECRYPT                                  0x00000008
> +#define IPSRXMOD_IPV6                                     0x00000010
> +#define IXGBE_ADVTXD_POPTS_IPSEC                          0x00000400
> +#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP                 0x00002000
> +#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN               0x00004000
> +#define IXGBE_RXDADV_IPSEC_STATUS_SECP                    0x00020000
> +#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK                 0x18000000
> +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL         0x08000000
> +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH           0x10000000
> +#define IXGBE_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED    0x18000000
> +
> +#define IPSEC_MAX_RX_IP_COUNT           128
> +#define IPSEC_MAX_SA_COUNT              1024
> +
> +enum ixgbe_operation {
> +	IXGBE_OP_AUTHENTICATED_ENCRYPTION,
> +	IXGBE_OP_AUTHENTICATED_DECRYPTION
> +};
> +
> +enum ixgbe_gcm_key {
> +	IXGBE_GCM_KEY_128,
> +	IXGBE_GCM_KEY_256
> +};
> +
> +/**
> + * Generic IP address structure
> + * TODO: Find better location for this rte_net.h possibly.
> + **/
> +struct ipaddr {
> +	enum ipaddr_type {
> +		IPv4,
> +		IPv6
> +	} type;
> +	/**< IP Address Type - IPv4/IPv6 */
> +
> +	union {
> +		uint32_t ipv4;
> +		uint32_t ipv6[4];
> +	};
> +};
> +
> +/** inline crypto crypto private session structure */
> +struct ixgbe_crypto_session {
> +	enum ixgbe_operation op;
> +	uint8_t *key;
> +	uint32_t salt;
> +	uint32_t sa_index;
> +	uint32_t spi;
> +	struct ipaddr src_ip;
> +	struct ipaddr dst_ip;
> +	struct rte_eth_dev *dev;
> +} __rte_cache_aligned;
> +
> +struct ixgbe_crypto_rx_ip_table {
> +	struct ipaddr ip;
> +	uint16_t ref_count;
> +};
> +struct ixgbe_crypto_rx_sa_table {
> +	uint32_t spi;
> +	uint32_t ip_index;
> +	uint32_t key[4];
> +	uint32_t salt;
> +	uint8_t  mode;
> +	uint8_t  used;
> +};
> +
> +struct ixgbe_crypto_tx_sa_table {
> +	uint32_t spi;
> +	uint32_t key[4];
> +	uint32_t salt;
> +	uint8_t  used;
> +};
> +
> +struct ixgbe_crypto_tx_desc_md {
> +	union {
> +		uint64_t data;
> +		struct {
> +			  uint32_t sa_idx;
> +			  uint8_t pad_len;
> +			  uint8_t enc;
> +		};
> +	};
> +};


Why just not: 
union  ixgbe_crypto_tx_desc_md {
     uint64_t data;
     struct {...};
};
?

> +
> +struct ixgbe_ipsec {
> +	struct ixgbe_crypto_rx_ip_table rx_ip_tbl[IPSEC_MAX_RX_IP_COUNT];
> +	struct ixgbe_crypto_rx_sa_table rx_sa_tbl[IPSEC_MAX_SA_COUNT];
> +	struct ixgbe_crypto_tx_sa_table tx_sa_tbl[IPSEC_MAX_SA_COUNT];
> +};
> +
> +extern struct rte_security_ops ixgbe_security_ops;
> +
> +
> +int ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev);
> +int ixgbe_crypto_add_ingress_sa_from_flow(const void *sess,
> +					  const void *ip_spec,
> +					  uint8_t is_ipv6);
> +
> +
> +
> +#endif /*IXGBE_IPSEC_H_*/
> diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
> index 0038dfb..279e3fa 100644
> --- a/drivers/net/ixgbe/ixgbe_rxtx.c
> +++ b/drivers/net/ixgbe/ixgbe_rxtx.c
> @@ -93,6 +93,7 @@
>  		PKT_TX_TCP_SEG |		 \
>  		PKT_TX_MACSEC |			 \
>  		PKT_TX_OUTER_IP_CKSUM |		 \
> +		PKT_TX_SEC_OFFLOAD |	 \
>  		IXGBE_TX_IEEE1588_TMST)
> 
>  #define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
> @@ -395,7 +396,8 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
>  static inline void
>  ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
>  		volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
> -		uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
> +		uint64_t ol_flags, union ixgbe_tx_offload tx_offload,
> +		struct rte_mbuf *mb)

I don't think you need to pass mb as a parameter to that function:
you already have ol_flags as a parameter and all you need is just struct ixgbe_crypto_tx_desc_md md
here as an extra parameter.

>  {
>  	uint32_t type_tucmd_mlhl;
>  	uint32_t mss_l4len_idx = 0;
> @@ -479,6 +481,18 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
>  		seqnum_seed |= tx_offload.l2_len
>  			       << IXGBE_ADVTXD_TUNNEL_LEN;
>  	}
> +	if (mb->ol_flags & PKT_TX_SEC_OFFLOAD) {
> +		struct ixgbe_crypto_tx_desc_md *mdata =
> +				(struct ixgbe_crypto_tx_desc_md *)
> +				&mb->udata64;
> +		seqnum_seed |=
> +			(IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & mdata->sa_idx);
> +		type_tucmd_mlhl |= mdata->enc ?
> +				(IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
> +				IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0;
> +		type_tucmd_mlhl |=
> +			(mdata->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK);

Shouldn't we also update tx_offload_mask here?

> +	}
> 
>  	txq->ctx_cache[ctx_idx].flags = ol_flags;
>  	txq->ctx_cache[ctx_idx].tx_offload.data[0]  =
> @@ -657,6 +671,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>  	uint32_t ctx = 0;
>  	uint32_t new_ctx;
>  	union ixgbe_tx_offload tx_offload;
> +	uint8_t use_ipsec;
> 
>  	tx_offload.data[0] = 0;
>  	tx_offload.data[1] = 0;
> @@ -684,6 +699,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>  		 * are needed for offload functionality.
>  		 */
>  		ol_flags = tx_pkt->ol_flags;
> +		use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
> 
>  		/* If hardware offload required */
>  		tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
> @@ -695,6 +711,13 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>  			tx_offload.tso_segsz = tx_pkt->tso_segsz;
>  			tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
>  			tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
> +			if (use_ipsec) {
> +				struct ixgbe_crypto_tx_desc_md *ipsec_mdata =
> +					(struct ixgbe_crypto_tx_desc_md *)
> +							&tx_pkt->udata64;
> +				tx_offload.sa_idx = ipsec_mdata->sa_idx;
> +				tx_offload.sec_pad_len = ipsec_mdata->pad_len;
> +			}
> 
>  			/* If new context need be built or reuse the exist ctx. */
>  			ctx = what_advctx_update(txq, tx_ol_req,
> @@ -855,7 +878,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>  				}
> 
>  				ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
> -					tx_offload);
> +					tx_offload, tx_pkt);
> 
>  				txe->last_id = tx_last;
>  				tx_id = txe->next_id;
> @@ -873,6 +896,8 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>  		}
> 
>  		olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
> +		if (use_ipsec)
> +			olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC;
> 
>  		m_seg = tx_pkt;
>  		do {
> @@ -1447,6 +1472,12 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
>  		pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
>  	}
> 
> +	if (rx_status & IXGBE_RXD_STAT_SECP) {
> +		pkt_flags |= PKT_RX_SEC_OFFLOAD;
> +		if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
> +			pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
> +	}
> +
>  	return pkt_flags;
>  }
> 
> @@ -2364,8 +2395,9 @@ void __attribute__((cold))
>  ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
>  {
>  	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
> -	if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
> -			&& (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
> +	if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
> +			(txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST) &&
> +			!(dev->data->dev_conf.txmode.enable_sec)) {
>  		PMD_INIT_LOG(DEBUG, "Using simple tx code path");
>  		dev->tx_pkt_prepare = NULL;
>  #ifdef RTE_IXGBE_INC_VECTOR
> @@ -2535,6 +2567,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
>  	txq->txq_flags = tx_conf->txq_flags;
>  	txq->ops = &def_txq_ops;
>  	txq->tx_deferred_start = tx_conf->tx_deferred_start;
> +	txq->using_ipsec = dev->data->dev_conf.txmode.enable_sec;
> 
>  	/*
>  	 * Modification to set VFTDT for virtual function if vf is detected
> @@ -4519,6 +4552,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
>  		struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
> 
>  		rxq->rx_using_sse = rx_using_sse;
> +		rxq->using_ipsec = dev->data->dev_conf.rxmode.enable_sec;
>  	}
>  }
> 
> @@ -5006,6 +5040,17 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
>  			dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
>  		ixgbe_setup_loopback_link_82599(hw);
> 
> +	if (dev->data->dev_conf.rxmode.enable_sec ||
> +			dev->data->dev_conf.txmode.enable_sec) {
> +		ret = ixgbe_crypto_enable_ipsec(dev);
> +		if (ret != 0) {
> +			PMD_DRV_LOG(ERR,
> +				    "ixgbe_crypto_enable_ipsec fails with %d.",
> +				    ret);
> +			return ret;
> +		}
> +	}
> +
>  	return 0;
>  }
> 
> diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
> index 81c527f..4017831 100644
> --- a/drivers/net/ixgbe/ixgbe_rxtx.h
> +++ b/drivers/net/ixgbe/ixgbe_rxtx.h
> @@ -138,8 +138,10 @@ struct ixgbe_rx_queue {
>  	uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
>  	uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
>  	uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
> -	uint16_t            rx_using_sse;
> +	uint8_t            rx_using_sse;
>  	/**< indicates that vector RX is in use */
> +	uint8_t            using_ipsec;
> +	/**< indicates that IPsec RX feature is in use */
>  #ifdef RTE_IXGBE_INC_VECTOR
>  	uint16_t            rxrearm_nb;     /**< number of remaining to be re-armed */
>  	uint16_t            rxrearm_start;  /**< the idx we start the re-arming from */
> @@ -183,6 +185,10 @@ union ixgbe_tx_offload {
>  		/* fields for TX offloading of tunnels */
>  		uint64_t outer_l3_len:8; /**< Outer L3 (IP) Hdr Length. */
>  		uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */
> +
> +		/* inline ipsec related*/
> +		uint64_t sa_idx:8;	/**< TX SA database entry index */
> +		uint64_t sec_pad_len:4;	/**< padding length */
>  	};
>  };
> 
> @@ -247,6 +253,9 @@ struct ixgbe_tx_queue {
>  	struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
>  	const struct ixgbe_txq_ops *ops;       /**< txq ops */
>  	uint8_t             tx_deferred_start; /**< not in global dev start. */
> +	uint8_t		    using_ipsec;
> +	/**< indicates that IPsec TX feature is in use */
> +
>  };
> 
>  struct ixgbe_txq_ops {
> diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
> index e704a7f..c9b1e2e 100644
> --- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
> +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
> @@ -124,10 +124,12 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
> 
>  static inline void
>  desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
> -	struct rte_mbuf **rx_pkts)
> +	struct rte_mbuf **rx_pkts, uint8_t use_ipsec)
>  {
>  	__m128i ptype0, ptype1, vtag0, vtag1, csum;
>  	__m128i rearm0, rearm1, rearm2, rearm3;
> +	__m128i sterr0, sterr1, sterr2, sterr3;
> +	__m128i tmp1, tmp2, tmp3, tmp4;
> 
>  	/* mask everything except rss type */
>  	const __m128i rsstype_msk = _mm_set_epi16(
> @@ -174,6 +176,41 @@ desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
>  		0, PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
>  		PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t));
> 
> +	const __m128i ipsec_sterr_msk = _mm_set_epi32(
> +		0, IXGBE_RXDADV_IPSEC_STATUS_SECP |
> +			IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED,
> +		0, 0);
> +	const __m128i ipsec_proc_msk  = _mm_set_epi32(
> +		0, IXGBE_RXDADV_IPSEC_STATUS_SECP, 0, 0);
> +	const __m128i ipsec_err_flag  = _mm_set_epi32(
> +		0, PKT_RX_SEC_OFFLOAD_FAILED | PKT_RX_SEC_OFFLOAD,
> +		0, 0);
> +	const __m128i ipsec_proc_flag = _mm_set_epi32(
> +		0, PKT_RX_SEC_OFFLOAD, 0, 0);
> +
> +	if (use_ipsec) {
> +		sterr0 = _mm_and_si128(descs[0], ipsec_sterr_msk);
> +		sterr1 = _mm_and_si128(descs[1], ipsec_sterr_msk);
> +		sterr2 = _mm_and_si128(descs[2], ipsec_sterr_msk);
> +		sterr3 = _mm_and_si128(descs[3], ipsec_sterr_msk);
> +		tmp1 = _mm_cmpeq_epi32(sterr0, ipsec_sterr_msk);
> +		tmp2 = _mm_cmpeq_epi32(sterr0, ipsec_proc_msk);
> +		tmp3 = _mm_cmpeq_epi32(sterr1, ipsec_sterr_msk);
> +		tmp4 = _mm_cmpeq_epi32(sterr1, ipsec_proc_msk);
> +		sterr0 = _mm_or_si128(_mm_and_si128(tmp1, ipsec_err_flag),
> +					_mm_and_si128(tmp2, ipsec_proc_flag));
> +		sterr1 = _mm_or_si128(_mm_and_si128(tmp3, ipsec_err_flag),
> +					_mm_and_si128(tmp4, ipsec_proc_flag));
> +		tmp1 = _mm_cmpeq_epi32(sterr2, ipsec_sterr_msk);
> +		tmp2 = _mm_cmpeq_epi32(sterr2, ipsec_proc_msk);
> +		tmp3 = _mm_cmpeq_epi32(sterr3, ipsec_sterr_msk);
> +		tmp4 = _mm_cmpeq_epi32(sterr3, ipsec_proc_msk);
> +		sterr2 = _mm_or_si128(_mm_and_si128(tmp1, ipsec_err_flag),
> +					_mm_and_si128(tmp2, ipsec_proc_flag));
> +		sterr3 = _mm_or_si128(_mm_and_si128(tmp3, ipsec_err_flag),
> +					_mm_and_si128(tmp4, ipsec_proc_flag));
> +	}
> +
>  	ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
>  	ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
>  	vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
> @@ -221,6 +258,13 @@ desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
>  	rearm2 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 4), 0x10);
>  	rearm3 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 2), 0x10);
> 
> +	if (use_ipsec) {
> +		rearm0 = _mm_or_si128(rearm0, sterr0);
> +		rearm1 = _mm_or_si128(rearm1, sterr1);
> +		rearm2 = _mm_or_si128(rearm2, sterr2);
> +		rearm3 = _mm_or_si128(rearm3, sterr3);
> +	}
> +
>  	/* write the rearm data and the olflags in one write */
>  	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
>  			offsetof(struct rte_mbuf, rearm_data) + 8);
> @@ -310,6 +354,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
>  	volatile union ixgbe_adv_rx_desc *rxdp;
>  	struct ixgbe_rx_entry *sw_ring;
>  	uint16_t nb_pkts_recd;
> +	uint8_t use_ipsec = rxq->using_ipsec;
>  	int pos;
>  	uint64_t var;
>  	__m128i shuf_msk;
> @@ -471,7 +516,8 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
>  		sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
> 
>  		/* set ol_flags with vlan packet type */
> -		desc_to_olflags_v(descs, mbuf_init, vlan_flags, &rx_pkts[pos]);
> +		desc_to_olflags_v(descs, mbuf_init, vlan_flags,
> +				  &rx_pkts[pos], use_ipsec);
> 
>  		/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
>  		pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
> --
> 2.9.3
  
Ananyev, Konstantin Oct. 19, 2017, 9:04 a.m. UTC | #4
> diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
> index 14b9c53..fcabd5e 100644
> --- a/drivers/net/ixgbe/ixgbe_ethdev.c
> +++ b/drivers/net/ixgbe/ixgbe_ethdev.c
> @@ -61,6 +61,7 @@
>  #include <rte_random.h>
>  #include <rte_dev.h>
>  #include <rte_hash_crc.h>
> +#include <rte_security_driver.h>
> 
>  #include "ixgbe_logs.h"
>  #include "base/ixgbe_api.h"
> @@ -1132,6 +1133,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
>  		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
>  	struct ixgbe_bw_conf *bw_conf =
>  		IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private);
> +	struct rte_security_ctx *security_instance;
>  	uint32_t ctrl_ext;
>  	uint16_t csum;
>  	int diag, i;
> @@ -1139,6 +1141,17 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
>  	PMD_INIT_FUNC_TRACE();
> 
>  	eth_dev->dev_ops = &ixgbe_eth_dev_ops;
> +	security_instance = rte_malloc("rte_security_instances_ops",
> +				sizeof(struct rte_security_ctx), 0);
> +	if (security_instance == NULL)
> +		return -ENOMEM;
> +	security_instance->state = RTE_SECURITY_INSTANCE_VALID;
> +	security_instance->device = (void *)eth_dev;
> +	security_instance->ops = &ixgbe_security_ops;
> +	security_instance->sess_cnt = 0;
> +

As another nit - can we move the code above into a separate function
into ixgbe_ipsec.c?
Something like ixgbe_ipsec_ctx_create() or so?
Konstantin

> +	eth_dev->data->security_ctx = security_instance;
> +
>  	eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
>  	eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
>  	eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
> @@ -1169,6 +1182,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
> 
>  	rte_eth_copy_pci_info(eth_dev, pci_dev);
>  	eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
> +	eth_dev->data->dev_flags |= RTE_ETH_DEV_SECURITY;
> 
>  	/* Vendor and Device ID need to be set before init of shared code */
>  	hw->device_id = pci_dev->id.device_id;
> @@ -1401,6 +1415,8 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
>  	/* Remove all Traffic Manager configuration */
>  	ixgbe_tm_conf_uninit(eth_dev);
> 
> +	rte_free(eth_dev->data->security_ctx);
> +
>  	return 0;
>  }
>
  
Radu Nicolau Oct. 19, 2017, 10:51 a.m. UTC | #5
Hi,

Comments inline


On 10/18/2017 10:29 PM, Ananyev, Konstantin wrote:
> Hi Radu,
> Few comments from me below.
> Konstantin
>
>> <snip>
>>
>> +#define IXGBE_WRITE_REG_THEN_POLL_MASK(hw, reg, val, mask, poll_ms)	\
>> +{									\
>> +	uint32_t cnt = poll_ms;						\
>> +	IXGBE_WRITE_REG(hw, (reg), (val));				\
>> +	while (((IXGBE_READ_REG(hw, (reg))) & (mask)) && (cnt--))	\
>> +		rte_delay_ms(1);					\
>> +}
>> +
> As you have a macro that consists from multiple statements, you'll need a do { ..} while (0)  wrapper
> around it.
> Though I still suggest to make it an inline function - would be much better.
I will add do-while wrapper, but making it an inline function there 
brings in a circular dependency.
>
>> <snip>
>> +
>> +static int
>> +ixgbe_crypto_update_mb(void *device __rte_unused,
>> +		struct rte_security_session *session,
>> +		       struct rte_mbuf *m, void *params __rte_unused)
>> +{
>> +	struct ixgbe_crypto_session *ic_session =
>> +			get_sec_session_private_data(session);
>> +	if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {
>> +		struct ixgbe_crypto_tx_desc_md *mdata =
>> +			(struct ixgbe_crypto_tx_desc_md *)&m->udata64;
>> +		mdata->enc = 1;
>> +		mdata->sa_idx = ic_session->sa_index;
>> +		mdata->pad_len = *rte_pktmbuf_mtod_offset(m,
>> +			uint8_t *, rte_pktmbuf_pkt_len(m) - 18) + 18;
> Could you explain what pad_len supposed to contain?
> Also what is a magical constant '18'?
> Could you create some macro if needed?
I added an explanation in the code, we read the payload padding size 
that is stored at the len-18 bytes and add 18 bytes, 2 for ESP trailer 
and 16 for ICV.
>> +	}
>> +	return 0;
>> +}
>> +
>> +struct rte_cryptodev_capabilities aes_gmac_crypto_capabilities[] = {
>> +	{	/* AES GMAC (128-bit) */
>> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
>> +		{.sym = {
>> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
>> +			{.auth = {
>> +				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
>> +				.block_size = 16,
>> +				.key_size = {
>> +					.min = 16,
>> +					.max = 16,
>> +					.increment = 0
>> +				},
>> +				.digest_size = {
>> +					.min = 12,
>> +					.max = 12,
>> +					.increment = 0
>> +				},
>> +				.iv_size = {
>> +					.min = 12,
>> +					.max = 12,
>> +					.increment = 0
>> +				}
>> +			}, }
>> +		}, }
>> +	},
>> +	{
>> +		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
>> +		{.sym = {
>> +			.xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED
>> +		}, }
>> +	},
>> +};
>> +
>> +struct rte_cryptodev_capabilities aes_gcm_gmac_crypto_capabilities[] = {
>> +	{	/* AES GMAC (128-bit) */
>> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
>> +		{.sym = {
>> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
>> +			{.auth = {
>> +				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
>> +				.block_size = 16,
>> +				.key_size = {
>> +					.min = 16,
>> +					.max = 16,
>> +					.increment = 0
>> +				},
>> +				.digest_size = {
>> +					.min = 12,
>> +					.max = 12,
>> +					.increment = 0
>> +				},
>> +				.iv_size = {
>> +					.min = 12,
>> +					.max = 12,
>> +					.increment = 0
>> +				}
>> +			}, }
>> +		}, }
>> +	},
>> +	{	/* AES GCM (128-bit) */
>> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
>> +		{.sym = {
>> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
>> +			{.aead = {
>> +				.algo = RTE_CRYPTO_AEAD_AES_GCM,
>> +				.block_size = 16,
>> +				.key_size = {
>> +					.min = 16,
>> +					.max = 16,
>> +					.increment = 0
>> +				},
>> +				.digest_size = {
>> +					.min = 8,
>> +					.max = 16,
>> +					.increment = 4
>> +				},
>> +				.aad_size = {
>> +					.min = 0,
>> +					.max = 65535,
>> +					.increment = 1
>> +				},
>> +				.iv_size = {
>> +					.min = 12,
>> +					.max = 12,
>> +					.increment = 0
>> +				}
>> +			}, }
>> +		}, }
>> +	},
>> +	{
>> +		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
>> +		{.sym = {
>> +			.xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED
>> +		}, }
>> +	},
>> +};
>> +
>> +static const struct rte_security_capability ixgbe_security_capabilities[] = {
>> +	{ /* IPsec Inline Crypto ESP Transport Egress */
>> +		.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
>> +		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
>> +		.ipsec = {
>> +			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
>> +			.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
>> +			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
>> +			.options = { 0 }
>> +		},
>> +		.crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
>> +		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
>> +	},
>> +	{ /* IPsec Inline Crypto ESP Transport Ingress */
>> +		.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
>> +		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
>> +		.ipsec = {
>> +			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
>> +			.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
>> +			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
>> +			.options = { 0 }
>> +		},
>> +		.crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
>> +		.ol_flags = 0
>> +	},
>> +	{ /* IPsec Inline Crypto ESP Tunnel Egress */
>> +		.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
>> +		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
>> +		.ipsec = {
>> +			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
>> +			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
>> +			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
>> +			.options = { 0 }
>> +		},
>> +		.crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
>> +		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
>> +	},
>> +	{ /* IPsec Inline Crypto ESP Tunnel Ingress */
>> +		.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
>> +		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
>> +		.ipsec = {
>> +			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
>> +			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
>> +			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
>> +			.options = { 0 }
>> +		},
>> +		.crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
>> +		.ol_flags = 0
>> +	},
>> +	{
>> +		.action = RTE_SECURITY_ACTION_TYPE_NONE
>> +	}
>> +};
>> +
>> +static const struct rte_security_capability *
>> +ixgbe_crypto_capabilities_get(void *device __rte_unused)
>> +{
> As a nit: if ixgbe_security_capabilities are not used in any other place -
> you can move its definition inside that function.
Done.
>
>> +};
>> +
>> +struct ixgbe_crypto_tx_desc_md {
>> +	union {
>> +		uint64_t data;
>> +		struct {
>> +			  uint32_t sa_idx;
>> +			  uint8_t pad_len;
>> +			  uint8_t enc;
>> +		};
>> +	};
>> +};
>
> Why just not:
> union  ixgbe_crypto_tx_desc_md {
>       uint64_t data;
>       struct {...};
> };
> ?
Done.
>
>> +
>> +struct ixgbe_ipsec {
>> +	struct ixgbe_crypto_rx_ip_table rx_ip_tbl[IPSEC_MAX_RX_IP_COUNT];
>> +	struct ixgbe_crypto_rx_sa_table rx_sa_tbl[IPSEC_MAX_SA_COUNT];
>> +	struct ixgbe_crypto_tx_sa_table tx_sa_tbl[IPSEC_MAX_SA_COUNT];
>> +};
>> +
>> +extern struct rte_security_ops ixgbe_security_ops;
>> +
>> +
>> +int ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev);
>> +int ixgbe_crypto_add_ingress_sa_from_flow(const void *sess,
>> +					  const void *ip_spec,
>> +					  uint8_t is_ipv6);
>> +
>> +
>> +
>> +#endif /*IXGBE_IPSEC_H_*/
>> diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
>> index 0038dfb..279e3fa 100644
>> --- a/drivers/net/ixgbe/ixgbe_rxtx.c
>> +++ b/drivers/net/ixgbe/ixgbe_rxtx.c
>> @@ -93,6 +93,7 @@
>>   		PKT_TX_TCP_SEG |		 \
>>   		PKT_TX_MACSEC |			 \
>>   		PKT_TX_OUTER_IP_CKSUM |		 \
>> +		PKT_TX_SEC_OFFLOAD |	 \
>>   		IXGBE_TX_IEEE1588_TMST)
>>
>>   #define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
>> @@ -395,7 +396,8 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
>>   static inline void
>>   ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
>>   		volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
>> -		uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
>> +		uint64_t ol_flags, union ixgbe_tx_offload tx_offload,
>> +		struct rte_mbuf *mb)
> I don't think you need to pass mb as a parameter to that function:
> you already have ol_flags as a parameter and all you need is just struct ixgbe_crypto_tx_desc_md md
> here as an extra parameter.
Done.
>
>>   {
>>   	uint32_t type_tucmd_mlhl;
>>   	uint32_t mss_l4len_idx = 0;
>> @@ -479,6 +481,18 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
>>   		seqnum_seed |= tx_offload.l2_len
>>   			       << IXGBE_ADVTXD_TUNNEL_LEN;
>>   	}
>> +	if (mb->ol_flags & PKT_TX_SEC_OFFLOAD) {
>> +		struct ixgbe_crypto_tx_desc_md *mdata =
>> +				(struct ixgbe_crypto_tx_desc_md *)
>> +				&mb->udata64;
>> +		seqnum_seed |=
>> +			(IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & mdata->sa_idx);
>> +		type_tucmd_mlhl |= mdata->enc ?
>> +				(IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
>> +				IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0;
>> +		type_tucmd_mlhl |=
>> +			(mdata->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK);
> Shouldn't we also update tx_offload_mask here?
We do - updated.
>
  
Ananyev, Konstantin Oct. 19, 2017, 11:04 a.m. UTC | #6
> >

> >> <snip>

> >> +

> >> +static int

> >> +ixgbe_crypto_update_mb(void *device __rte_unused,

> >> +		struct rte_security_session *session,

> >> +		       struct rte_mbuf *m, void *params __rte_unused)

> >> +{

> >> +	struct ixgbe_crypto_session *ic_session =

> >> +			get_sec_session_private_data(session);

> >> +	if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {

> >> +		struct ixgbe_crypto_tx_desc_md *mdata =

> >> +			(struct ixgbe_crypto_tx_desc_md *)&m->udata64;

> >> +		mdata->enc = 1;

> >> +		mdata->sa_idx = ic_session->sa_index;

> >> +		mdata->pad_len = *rte_pktmbuf_mtod_offset(m,

> >> +			uint8_t *, rte_pktmbuf_pkt_len(m) - 18) + 18;

> > Could you explain what pad_len supposed to contain?

> > Also what is a magical constant '18'?

> > Could you create some macro if needed?

> I added an explanation in the code, we read the payload padding size

> that is stored at the len-18 bytes and add 18 bytes, 2 for ESP trailer

> and 16 for ICV.


Ok, can we at least have a macros for all these constants?
Another question: you do use pkt_len() here - does it mean that multi-segment
packets are not supported by ixgbe-ipsec? 
Konstantin
  
Radu Nicolau Oct. 19, 2017, 11:57 a.m. UTC | #7
> -----Original Message-----

> From: Ananyev, Konstantin

> Sent: Thursday, October 19, 2017 12:04 PM

> To: Nicolau, Radu <radu.nicolau@intel.com>; Akhil Goyal

> <akhil.goyal@nxp.com>; dev@dpdk.org

> Cc: Doherty, Declan <declan.doherty@intel.com>; De Lara Guarch, Pablo

> <pablo.de.lara.guarch@intel.com>; hemant.agrawal@nxp.com;

> borisp@mellanox.com; aviadye@mellanox.com; thomas@monjalon.net;

> sandeep.malik@nxp.com; jerin.jacob@caviumnetworks.com; Mcnamara,

> John <john.mcnamara@intel.com>; shahafs@mellanox.com;

> olivier.matz@6wind.com

> Subject: RE: [PATCH v4 10/12] net/ixgbe: enable inline ipsec

> 

> 

> 

> > >

> > >> <snip>

> > >> +

> > >> +static int

> > >> +ixgbe_crypto_update_mb(void *device __rte_unused,

> > >> +		struct rte_security_session *session,

> > >> +		       struct rte_mbuf *m, void *params __rte_unused) {

> > >> +	struct ixgbe_crypto_session *ic_session =

> > >> +			get_sec_session_private_data(session);

> > >> +	if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {

> > >> +		struct ixgbe_crypto_tx_desc_md *mdata =

> > >> +			(struct ixgbe_crypto_tx_desc_md *)&m->udata64;

> > >> +		mdata->enc = 1;

> > >> +		mdata->sa_idx = ic_session->sa_index;

> > >> +		mdata->pad_len = *rte_pktmbuf_mtod_offset(m,

> > >> +			uint8_t *, rte_pktmbuf_pkt_len(m) - 18) + 18;

> > > Could you explain what pad_len supposed to contain?

> > > Also what is a magical constant '18'?

> > > Could you create some macro if needed?

> > I added an explanation in the code, we read the payload padding size

> > that is stored at the len-18 bytes and add 18 bytes, 2 for ESP trailer

> > and 16 for ICV.

> 

> Ok, can we at least have a macros for all these constants?

> Another question: you do use pkt_len() here - does it mean that multi-

> segment packets are not supported by ixgbe-ipsec?

> Konstantin

It does support multisegment, but the pad_len has to be set only for single send, it will be ignored otherwise. I have updated the code to set it for single segment packets only.
Also, our test app does not support multisegment packets.
  
Ananyev, Konstantin Oct. 19, 2017, 12:16 p.m. UTC | #8
> -----Original Message-----

> From: Nicolau, Radu

> Sent: Thursday, October 19, 2017 12:57 PM

> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>; Akhil Goyal <akhil.goyal@nxp.com>; dev@dpdk.org

> Cc: Doherty, Declan <declan.doherty@intel.com>; De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; hemant.agrawal@nxp.com;

> borisp@mellanox.com; aviadye@mellanox.com; thomas@monjalon.net; sandeep.malik@nxp.com; jerin.jacob@caviumnetworks.com;

> Mcnamara, John <john.mcnamara@intel.com>; shahafs@mellanox.com; olivier.matz@6wind.com

> Subject: RE: [PATCH v4 10/12] net/ixgbe: enable inline ipsec

> 

> 

> 

> > -----Original Message-----

> > From: Ananyev, Konstantin

> > Sent: Thursday, October 19, 2017 12:04 PM

> > To: Nicolau, Radu <radu.nicolau@intel.com>; Akhil Goyal

> > <akhil.goyal@nxp.com>; dev@dpdk.org

> > Cc: Doherty, Declan <declan.doherty@intel.com>; De Lara Guarch, Pablo

> > <pablo.de.lara.guarch@intel.com>; hemant.agrawal@nxp.com;

> > borisp@mellanox.com; aviadye@mellanox.com; thomas@monjalon.net;

> > sandeep.malik@nxp.com; jerin.jacob@caviumnetworks.com; Mcnamara,

> > John <john.mcnamara@intel.com>; shahafs@mellanox.com;

> > olivier.matz@6wind.com

> > Subject: RE: [PATCH v4 10/12] net/ixgbe: enable inline ipsec

> >

> >

> >

> > > >

> > > >> <snip>

> > > >> +

> > > >> +static int

> > > >> +ixgbe_crypto_update_mb(void *device __rte_unused,

> > > >> +		struct rte_security_session *session,

> > > >> +		       struct rte_mbuf *m, void *params __rte_unused) {

> > > >> +	struct ixgbe_crypto_session *ic_session =

> > > >> +			get_sec_session_private_data(session);

> > > >> +	if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {

> > > >> +		struct ixgbe_crypto_tx_desc_md *mdata =

> > > >> +			(struct ixgbe_crypto_tx_desc_md *)&m->udata64;

> > > >> +		mdata->enc = 1;

> > > >> +		mdata->sa_idx = ic_session->sa_index;

> > > >> +		mdata->pad_len = *rte_pktmbuf_mtod_offset(m,

> > > >> +			uint8_t *, rte_pktmbuf_pkt_len(m) - 18) + 18;

> > > > Could you explain what pad_len supposed to contain?

> > > > Also what is a magical constant '18'?

> > > > Could you create some macro if needed?

> > > I added an explanation in the code, we read the payload padding size

> > > that is stored at the len-18 bytes and add 18 bytes, 2 for ESP trailer

> > > and 16 for ICV.

> >

> > Ok, can we at least have a macros for all these constants?

> > Another question: you do use pkt_len() here - does it mean that multi-

> > segment packets are not supported by ixgbe-ipsec?

> > Konstantin

> It does support multisegment, but the pad_len has to be set only for single send, it will be ignored otherwise. I have updated the code to set

> it for single segment packets only.


Sorry, I didn't understand that.
If that function does support multiseg packets, then it has to go to the last segment via m->next,
If it doesn't, then it should return an error I case of m->nb_seg != 1.
Right?

> Also, our test app does not support multisegment packets.


Ok, I suppose that means, multi-seg case wasn't tested :)
  
Ananyev, Konstantin Oct. 19, 2017, 12:29 p.m. UTC | #9
> -----Original Message-----

> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Ananyev, Konstantin

> Sent: Thursday, October 19, 2017 1:17 PM

> To: Nicolau, Radu <radu.nicolau@intel.com>; Akhil Goyal <akhil.goyal@nxp.com>; dev@dpdk.org

> Cc: Doherty, Declan <declan.doherty@intel.com>; De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; hemant.agrawal@nxp.com;

> borisp@mellanox.com; aviadye@mellanox.com; thomas@monjalon.net; sandeep.malik@nxp.com; jerin.jacob@caviumnetworks.com;

> Mcnamara, John <john.mcnamara@intel.com>; shahafs@mellanox.com; olivier.matz@6wind.com

> Subject: Re: [dpdk-dev] [PATCH v4 10/12] net/ixgbe: enable inline ipsec

> 

> 

> 

> > -----Original Message-----

> > From: Nicolau, Radu

> > Sent: Thursday, October 19, 2017 12:57 PM

> > To: Ananyev, Konstantin <konstantin.ananyev@intel.com>; Akhil Goyal <akhil.goyal@nxp.com>; dev@dpdk.org

> > Cc: Doherty, Declan <declan.doherty@intel.com>; De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; hemant.agrawal@nxp.com;

> > borisp@mellanox.com; aviadye@mellanox.com; thomas@monjalon.net; sandeep.malik@nxp.com; jerin.jacob@caviumnetworks.com;

> > Mcnamara, John <john.mcnamara@intel.com>; shahafs@mellanox.com; olivier.matz@6wind.com

> > Subject: RE: [PATCH v4 10/12] net/ixgbe: enable inline ipsec

> >

> >

> >

> > > -----Original Message-----

> > > From: Ananyev, Konstantin

> > > Sent: Thursday, October 19, 2017 12:04 PM

> > > To: Nicolau, Radu <radu.nicolau@intel.com>; Akhil Goyal

> > > <akhil.goyal@nxp.com>; dev@dpdk.org

> > > Cc: Doherty, Declan <declan.doherty@intel.com>; De Lara Guarch, Pablo

> > > <pablo.de.lara.guarch@intel.com>; hemant.agrawal@nxp.com;

> > > borisp@mellanox.com; aviadye@mellanox.com; thomas@monjalon.net;

> > > sandeep.malik@nxp.com; jerin.jacob@caviumnetworks.com; Mcnamara,

> > > John <john.mcnamara@intel.com>; shahafs@mellanox.com;

> > > olivier.matz@6wind.com

> > > Subject: RE: [PATCH v4 10/12] net/ixgbe: enable inline ipsec

> > >

> > >

> > >

> > > > >

> > > > >> <snip>

> > > > >> +

> > > > >> +static int

> > > > >> +ixgbe_crypto_update_mb(void *device __rte_unused,

> > > > >> +		struct rte_security_session *session,

> > > > >> +		       struct rte_mbuf *m, void *params __rte_unused) {




Another sort of generic question - why not make security_set_pkt_metadata function
to accept  bulk of packets?
In that case o can minimize the cost of function calls, accessing session data, etc.
Though I suppose that could wait till next patch series.
Konstantin


> > > > >> +	struct ixgbe_crypto_session *ic_session =

> > > > >> +			get_sec_session_private_data(session);

> > > > >> +	if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {

> > > > >> +		struct ixgbe_crypto_tx_desc_md *mdata =

> > > > >> +			(struct ixgbe_crypto_tx_desc_md *)&m->udata64;

> > > > >> +		mdata->enc = 1;

> > > > >> +		mdata->sa_idx = ic_session->sa_index;

> > > > >> +		mdata->pad_len = *rte_pktmbuf_mtod_offset(m,

> > > > >> +			uint8_t *, rte_pktmbuf_pkt_len(m) - 18) + 18;

> > > > > Could you explain what pad_len supposed to contain?

> > > > > Also what is a magical constant '18'?

> > > > > Could you create some macro if needed?

> > > > I added an explanation in the code, we read the payload padding size

> > > > that is stored at the len-18 bytes and add 18 bytes, 2 for ESP trailer

> > > > and 16 for ICV.

> > >

> > > Ok, can we at least have a macros for all these constants?

> > > Another question: you do use pkt_len() here - does it mean that multi-

> > > segment packets are not supported by ixgbe-ipsec?

> > > Konstantin

> > It does support multisegment, but the pad_len has to be set only for single send, it will be ignored otherwise. I have updated the code to

> set

> > it for single segment packets only.

> 

> Sorry, I didn't understand that.

> If that function does support multiseg packets, then it has to go to the last segment via m->next,

> If it doesn't, then it should return an error I case of m->nb_seg != 1.

> Right?

> 

> > Also, our test app does not support multisegment packets.

> 

> Ok, I suppose that means, multi-seg case wasn't tested :)

> 

> 

>
  
Radu Nicolau Oct. 19, 2017, 1:09 p.m. UTC | #10
On 10/19/2017 1:16 PM, Ananyev, Konstantin wrote:
>
>> -----Original Message-----
>> From: Nicolau, Radu
>> Sent: Thursday, October 19, 2017 12:57 PM
>> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>; Akhil Goyal <akhil.goyal@nxp.com>; dev@dpdk.org
>> Cc: Doherty, Declan <declan.doherty@intel.com>; De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; hemant.agrawal@nxp.com;
>> borisp@mellanox.com; aviadye@mellanox.com; thomas@monjalon.net; sandeep.malik@nxp.com; jerin.jacob@caviumnetworks.com;
>> Mcnamara, John <john.mcnamara@intel.com>; shahafs@mellanox.com; olivier.matz@6wind.com
>> Subject: RE: [PATCH v4 10/12] net/ixgbe: enable inline ipsec
>>
>>
>>
>>> -----Original Message-----
>>> From: Ananyev, Konstantin
>>> Sent: Thursday, October 19, 2017 12:04 PM
>>> To: Nicolau, Radu <radu.nicolau@intel.com>; Akhil Goyal
>>> <akhil.goyal@nxp.com>; dev@dpdk.org
>>> Cc: Doherty, Declan <declan.doherty@intel.com>; De Lara Guarch, Pablo
>>> <pablo.de.lara.guarch@intel.com>; hemant.agrawal@nxp.com;
>>> borisp@mellanox.com; aviadye@mellanox.com; thomas@monjalon.net;
>>> sandeep.malik@nxp.com; jerin.jacob@caviumnetworks.com; Mcnamara,
>>> John <john.mcnamara@intel.com>; shahafs@mellanox.com;
>>> olivier.matz@6wind.com
>>> Subject: RE: [PATCH v4 10/12] net/ixgbe: enable inline ipsec
>>>
>>>
>>>
>>>>>> <snip>
>>>>>> +
>>>>>> +static int
>>>>>> +ixgbe_crypto_update_mb(void *device __rte_unused,
>>>>>> +		struct rte_security_session *session,
>>>>>> +		       struct rte_mbuf *m, void *params __rte_unused) {
>>>>>> +	struct ixgbe_crypto_session *ic_session =
>>>>>> +			get_sec_session_private_data(session);
>>>>>> +	if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {
>>>>>> +		struct ixgbe_crypto_tx_desc_md *mdata =
>>>>>> +			(struct ixgbe_crypto_tx_desc_md *)&m->udata64;
>>>>>> +		mdata->enc = 1;
>>>>>> +		mdata->sa_idx = ic_session->sa_index;
>>>>>> +		mdata->pad_len = *rte_pktmbuf_mtod_offset(m,
>>>>>> +			uint8_t *, rte_pktmbuf_pkt_len(m) - 18) + 18;
>>>>> Could you explain what pad_len supposed to contain?
>>>>> Also what is a magical constant '18'?
>>>>> Could you create some macro if needed?
>>>> I added an explanation in the code, we read the payload padding size
>>>> that is stored at the len-18 bytes and add 18 bytes, 2 for ESP trailer
>>>> and 16 for ICV.
>>> Ok, can we at least have a macros for all these constants?
>>> Another question: you do use pkt_len() here - does it mean that multi-
>>> segment packets are not supported by ixgbe-ipsec?
>>> Konstantin
>> It does support multisegment, but the pad_len has to be set only for single send, it will be ignored otherwise. I have updated the code to set
>> it for single segment packets only.
> Sorry, I didn't understand that.
> If that function does support multiseg packets, then it has to go to the last segment via m->next,
> If it doesn't, then it should return an error I case of m->nb_seg != 1.
> Right?
No need to return an error, just don't try to read the padding and don't 
set the pad_len in the metadata. My understanding of the datasheet is 
that multisegment egress IPsec is supported only for TCP/UDP packets, 
and the pad_len is ignored even if it's set. So I changed it to only 
process the padding for m->nb_seg == 1.
>
>> Also, our test app does not support multisegment packets.
> Ok, I suppose that means, multi-seg case wasn't tested :)
>
>
>
>
  
Radu Nicolau Oct. 19, 2017, 1:14 p.m. UTC | #11
On 10/19/2017 1:29 PM, Ananyev, Konstantin wrote:
>
>> -----Original Message-----
>> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Ananyev, Konstantin
>> Sent: Thursday, October 19, 2017 1:17 PM
>> To: Nicolau, Radu <radu.nicolau@intel.com>; Akhil Goyal <akhil.goyal@nxp.com>; dev@dpdk.org
>> Cc: Doherty, Declan <declan.doherty@intel.com>; De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; hemant.agrawal@nxp.com;
>> borisp@mellanox.com; aviadye@mellanox.com; thomas@monjalon.net; sandeep.malik@nxp.com; jerin.jacob@caviumnetworks.com;
>> Mcnamara, John <john.mcnamara@intel.com>; shahafs@mellanox.com; olivier.matz@6wind.com
>> Subject: Re: [dpdk-dev] [PATCH v4 10/12] net/ixgbe: enable inline ipsec
>>
>>
>>
>>> -----Original Message-----
>>> From: Nicolau, Radu
>>> Sent: Thursday, October 19, 2017 12:57 PM
>>> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>; Akhil Goyal <akhil.goyal@nxp.com>; dev@dpdk.org
>>> Cc: Doherty, Declan <declan.doherty@intel.com>; De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; hemant.agrawal@nxp.com;
>>> borisp@mellanox.com; aviadye@mellanox.com; thomas@monjalon.net; sandeep.malik@nxp.com; jerin.jacob@caviumnetworks.com;
>>> Mcnamara, John <john.mcnamara@intel.com>; shahafs@mellanox.com; olivier.matz@6wind.com
>>> Subject: RE: [PATCH v4 10/12] net/ixgbe: enable inline ipsec
>>>
>>>
>>>
>>>> -----Original Message-----
>>>> From: Ananyev, Konstantin
>>>> Sent: Thursday, October 19, 2017 12:04 PM
>>>> To: Nicolau, Radu <radu.nicolau@intel.com>; Akhil Goyal
>>>> <akhil.goyal@nxp.com>; dev@dpdk.org
>>>> Cc: Doherty, Declan <declan.doherty@intel.com>; De Lara Guarch, Pablo
>>>> <pablo.de.lara.guarch@intel.com>; hemant.agrawal@nxp.com;
>>>> borisp@mellanox.com; aviadye@mellanox.com; thomas@monjalon.net;
>>>> sandeep.malik@nxp.com; jerin.jacob@caviumnetworks.com; Mcnamara,
>>>> John <john.mcnamara@intel.com>; shahafs@mellanox.com;
>>>> olivier.matz@6wind.com
>>>> Subject: RE: [PATCH v4 10/12] net/ixgbe: enable inline ipsec
>>>>
>>>>
>>>>
>>>>>>> <snip>
>>>>>>> +
>>>>>>> +static int
>>>>>>> +ixgbe_crypto_update_mb(void *device __rte_unused,
>>>>>>> +		struct rte_security_session *session,
>>>>>>> +		       struct rte_mbuf *m, void *params __rte_unused) {
>
>
> Another sort of generic question - why not make security_set_pkt_metadata function
> to accept  bulk of packets?
> In that case o can minimize the cost of function calls, accessing session data, etc.
> Though I suppose that could wait till next patch series.
> Konstantin
It is a good suggestion, but we need to discuss it further; for example 
if it can accept a bulk of packets, will it need also a bulk of metadata 
pointers, or just one for all the packets?
>
>>>>>>> +	struct ixgbe_crypto_session *ic_session =
>>>>>>> +			get_sec_session_private_data(session);
>>>>>>> +	if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {
>>>>>>> +		struct ixgbe_crypto_tx_desc_md *mdata =
>>>>>>> +			(struct ixgbe_crypto_tx_desc_md *)&m->udata64;
>>>>>>> +		mdata->enc = 1;
>>>>>>> +		mdata->sa_idx = ic_session->sa_index;
>>>>>>> +		mdata->pad_len = *rte_pktmbuf_mtod_offset(m,
>>>>>>> +			uint8_t *, rte_pktmbuf_pkt_len(m) - 18) + 18;
>>>>>> Could you explain what pad_len supposed to contain?
>>>>>> Also what is a magical constant '18'?
>>>>>> Could you create some macro if needed?
>>>>> I added an explanation in the code, we read the payload padding size
>>>>> that is stored at the len-18 bytes and add 18 bytes, 2 for ESP trailer
>>>>> and 16 for ICV.
>>>> Ok, can we at least have a macros for all these constants?
>>>> Another question: you do use pkt_len() here - does it mean that multi-
>>>> segment packets are not supported by ixgbe-ipsec?
>>>> Konstantin
>>> It does support multisegment, but the pad_len has to be set only for single send, it will be ignored otherwise. I have updated the code to
>> set
>>> it for single segment packets only.
>> Sorry, I didn't understand that.
>> If that function does support multiseg packets, then it has to go to the last segment via m->next,
>> If it doesn't, then it should return an error I case of m->nb_seg != 1.
>> Right?
>>
>>> Also, our test app does not support multisegment packets.
>> Ok, I suppose that means, multi-seg case wasn't tested :)
>>
>>
>>
  
Ananyev, Konstantin Oct. 19, 2017, 1:22 p.m. UTC | #12
> -----Original Message-----

> From: Nicolau, Radu

> Sent: Thursday, October 19, 2017 2:14 PM

> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>; Akhil Goyal <akhil.goyal@nxp.com>; dev@dpdk.org

> Cc: Doherty, Declan <declan.doherty@intel.com>; De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; hemant.agrawal@nxp.com;

> borisp@mellanox.com; aviadye@mellanox.com; thomas@monjalon.net; sandeep.malik@nxp.com; jerin.jacob@caviumnetworks.com;

> Mcnamara, John <john.mcnamara@intel.com>; shahafs@mellanox.com; olivier.matz@6wind.com

> Subject: Re: [PATCH v4 10/12] net/ixgbe: enable inline ipsec

> 

> 

> 

> On 10/19/2017 1:29 PM, Ananyev, Konstantin wrote:

> >

> >> -----Original Message-----

> >> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Ananyev, Konstantin

> >> Sent: Thursday, October 19, 2017 1:17 PM

> >> To: Nicolau, Radu <radu.nicolau@intel.com>; Akhil Goyal <akhil.goyal@nxp.com>; dev@dpdk.org

> >> Cc: Doherty, Declan <declan.doherty@intel.com>; De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>;

> hemant.agrawal@nxp.com;

> >> borisp@mellanox.com; aviadye@mellanox.com; thomas@monjalon.net; sandeep.malik@nxp.com; jerin.jacob@caviumnetworks.com;

> >> Mcnamara, John <john.mcnamara@intel.com>; shahafs@mellanox.com; olivier.matz@6wind.com

> >> Subject: Re: [dpdk-dev] [PATCH v4 10/12] net/ixgbe: enable inline ipsec

> >>

> >>

> >>

> >>> -----Original Message-----

> >>> From: Nicolau, Radu

> >>> Sent: Thursday, October 19, 2017 12:57 PM

> >>> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>; Akhil Goyal <akhil.goyal@nxp.com>; dev@dpdk.org

> >>> Cc: Doherty, Declan <declan.doherty@intel.com>; De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>;

> hemant.agrawal@nxp.com;

> >>> borisp@mellanox.com; aviadye@mellanox.com; thomas@monjalon.net; sandeep.malik@nxp.com; jerin.jacob@caviumnetworks.com;

> >>> Mcnamara, John <john.mcnamara@intel.com>; shahafs@mellanox.com; olivier.matz@6wind.com

> >>> Subject: RE: [PATCH v4 10/12] net/ixgbe: enable inline ipsec

> >>>

> >>>

> >>>

> >>>> -----Original Message-----

> >>>> From: Ananyev, Konstantin

> >>>> Sent: Thursday, October 19, 2017 12:04 PM

> >>>> To: Nicolau, Radu <radu.nicolau@intel.com>; Akhil Goyal

> >>>> <akhil.goyal@nxp.com>; dev@dpdk.org

> >>>> Cc: Doherty, Declan <declan.doherty@intel.com>; De Lara Guarch, Pablo

> >>>> <pablo.de.lara.guarch@intel.com>; hemant.agrawal@nxp.com;

> >>>> borisp@mellanox.com; aviadye@mellanox.com; thomas@monjalon.net;

> >>>> sandeep.malik@nxp.com; jerin.jacob@caviumnetworks.com; Mcnamara,

> >>>> John <john.mcnamara@intel.com>; shahafs@mellanox.com;

> >>>> olivier.matz@6wind.com

> >>>> Subject: RE: [PATCH v4 10/12] net/ixgbe: enable inline ipsec

> >>>>

> >>>>

> >>>>

> >>>>>>> <snip>

> >>>>>>> +

> >>>>>>> +static int

> >>>>>>> +ixgbe_crypto_update_mb(void *device __rte_unused,

> >>>>>>> +		struct rte_security_session *session,

> >>>>>>> +		       struct rte_mbuf *m, void *params __rte_unused) {

> >

> >

> > Another sort of generic question - why not make security_set_pkt_metadata function

> > to accept  bulk of packets?

> > In that case o can minimize the cost of function calls, accessing session data, etc.

> > Though I suppose that could wait till next patch series.

> > Konstantin

> It is a good suggestion, but we need to discuss it further;


Yes, as I said that's for future.

> for example

> if it can accept a bulk of packets, will it need also a bulk of metadata

> pointers, or just one for all the packets?


By metadata do you mean a session or ...?
Konstantin

> >

> >>>>>>> +	struct ixgbe_crypto_session *ic_session =

> >>>>>>> +			get_sec_session_private_data(session);

> >>>>>>> +	if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {

> >>>>>>> +		struct ixgbe_crypto_tx_desc_md *mdata =

> >>>>>>> +			(struct ixgbe_crypto_tx_desc_md *)&m->udata64;

> >>>>>>> +		mdata->enc = 1;

> >>>>>>> +		mdata->sa_idx = ic_session->sa_index;

> >>>>>>> +		mdata->pad_len = *rte_pktmbuf_mtod_offset(m,

> >>>>>>> +			uint8_t *, rte_pktmbuf_pkt_len(m) - 18) + 18;

> >>>>>> Could you explain what pad_len supposed to contain?

> >>>>>> Also what is a magical constant '18'?

> >>>>>> Could you create some macro if needed?

> >>>>> I added an explanation in the code, we read the payload padding size

> >>>>> that is stored at the len-18 bytes and add 18 bytes, 2 for ESP trailer

> >>>>> and 16 for ICV.

> >>>> Ok, can we at least have a macros for all these constants?

> >>>> Another question: you do use pkt_len() here - does it mean that multi-

> >>>> segment packets are not supported by ixgbe-ipsec?

> >>>> Konstantin

> >>> It does support multisegment, but the pad_len has to be set only for single send, it will be ignored otherwise. I have updated the code

> to

> >> set

> >>> it for single segment packets only.

> >> Sorry, I didn't understand that.

> >> If that function does support multiseg packets, then it has to go to the last segment via m->next,

> >> If it doesn't, then it should return an error I case of m->nb_seg != 1.

> >> Right?

> >>

> >>> Also, our test app does not support multisegment packets.

> >> Ok, I suppose that means, multi-seg case wasn't tested :)

> >>

> >>

> >>
  
Radu Nicolau Oct. 19, 2017, 2:19 p.m. UTC | #13
> -----Original Message-----

> From: Ananyev, Konstantin

> Sent: Thursday, October 19, 2017 2:23 PM

> To: Nicolau, Radu <radu.nicolau@intel.com>; Akhil Goyal

> <akhil.goyal@nxp.com>; dev@dpdk.org

> Cc: Doherty, Declan <declan.doherty@intel.com>; De Lara Guarch, Pablo

> <pablo.de.lara.guarch@intel.com>; hemant.agrawal@nxp.com;

> borisp@mellanox.com; aviadye@mellanox.com; thomas@monjalon.net;

> sandeep.malik@nxp.com; jerin.jacob@caviumnetworks.com; Mcnamara,

> John <john.mcnamara@intel.com>; shahafs@mellanox.com;

> olivier.matz@6wind.com

> Subject: RE: [PATCH v4 10/12] net/ixgbe: enable inline ipsec

> 

> 

> 

> > -----Original Message-----

> > From: Nicolau, Radu

> > Sent: Thursday, October 19, 2017 2:14 PM

> > To: Ananyev, Konstantin <konstantin.ananyev@intel.com>; Akhil Goyal

> > <akhil.goyal@nxp.com>; dev@dpdk.org

> > Cc: Doherty, Declan <declan.doherty@intel.com>; De Lara Guarch, Pablo

> > <pablo.de.lara.guarch@intel.com>; hemant.agrawal@nxp.com;

> > borisp@mellanox.com; aviadye@mellanox.com; thomas@monjalon.net;

> > sandeep.malik@nxp.com; jerin.jacob@caviumnetworks.com; Mcnamara,

> John

> > <john.mcnamara@intel.com>; shahafs@mellanox.com;

> > olivier.matz@6wind.com

> > Subject: Re: [PATCH v4 10/12] net/ixgbe: enable inline ipsec

> >

> >

> >

> > On 10/19/2017 1:29 PM, Ananyev, Konstantin wrote:

> > >

> > >> -----Original Message-----

> > >> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Ananyev,

> > >> Konstantin

> > >> Sent: Thursday, October 19, 2017 1:17 PM

> > >> To: Nicolau, Radu <radu.nicolau@intel.com>; Akhil Goyal

> > >> <akhil.goyal@nxp.com>; dev@dpdk.org

> > >> Cc: Doherty, Declan <declan.doherty@intel.com>; De Lara Guarch,

> > >> Pablo <pablo.de.lara.guarch@intel.com>;

> > hemant.agrawal@nxp.com;

> > >> borisp@mellanox.com; aviadye@mellanox.com;

> thomas@monjalon.net;

> > >> sandeep.malik@nxp.com; jerin.jacob@caviumnetworks.com;

> Mcnamara,

> > >> John <john.mcnamara@intel.com>; shahafs@mellanox.com;

> > >> olivier.matz@6wind.com

> > >> Subject: Re: [dpdk-dev] [PATCH v4 10/12] net/ixgbe: enable inline

> > >> ipsec

> > >>

> > >>

> > >>

> > >>> -----Original Message-----

> > >>> From: Nicolau, Radu

> > >>> Sent: Thursday, October 19, 2017 12:57 PM

> > >>> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>; Akhil

> > >>> Goyal <akhil.goyal@nxp.com>; dev@dpdk.org

> > >>> Cc: Doherty, Declan <declan.doherty@intel.com>; De Lara Guarch,

> > >>> Pablo <pablo.de.lara.guarch@intel.com>;

> > hemant.agrawal@nxp.com;

> > >>> borisp@mellanox.com; aviadye@mellanox.com;

> thomas@monjalon.net;

> > >>> sandeep.malik@nxp.com; jerin.jacob@caviumnetworks.com;

> Mcnamara,

> > >>> John <john.mcnamara@intel.com>; shahafs@mellanox.com;

> > >>> olivier.matz@6wind.com

> > >>> Subject: RE: [PATCH v4 10/12] net/ixgbe: enable inline ipsec

> > >>>

> > >>>

> > >>>

> > >>>> -----Original Message-----

> > >>>> From: Ananyev, Konstantin

> > >>>> Sent: Thursday, October 19, 2017 12:04 PM

> > >>>> To: Nicolau, Radu <radu.nicolau@intel.com>; Akhil Goyal

> > >>>> <akhil.goyal@nxp.com>; dev@dpdk.org

> > >>>> Cc: Doherty, Declan <declan.doherty@intel.com>; De Lara Guarch,

> > >>>> Pablo <pablo.de.lara.guarch@intel.com>; hemant.agrawal@nxp.com;

> > >>>> borisp@mellanox.com; aviadye@mellanox.com;

> thomas@monjalon.net;

> > >>>> sandeep.malik@nxp.com; jerin.jacob@caviumnetworks.com;

> Mcnamara,

> > >>>> John <john.mcnamara@intel.com>; shahafs@mellanox.com;

> > >>>> olivier.matz@6wind.com

> > >>>> Subject: RE: [PATCH v4 10/12] net/ixgbe: enable inline ipsec

> > >>>>

> > >>>>

> > >>>>

> > >>>>>>> <snip>

> > >>>>>>> +

> > >>>>>>> +static int

> > >>>>>>> +ixgbe_crypto_update_mb(void *device __rte_unused,

> > >>>>>>> +		struct rte_security_session *session,

> > >>>>>>> +		       struct rte_mbuf *m, void *params __rte_unused)

> {

> > >

> > >

> > > Another sort of generic question - why not make

> > > security_set_pkt_metadata function to accept  bulk of packets?

> > > In that case o can minimize the cost of function calls, accessing session

> data, etc.

> > > Though I suppose that could wait till next patch series.

> > > Konstantin

> > It is a good suggestion, but we need to discuss it further;

> 

> Yes, as I said that's for future.

> 

> > for example

> > if it can accept a bulk of packets, will it need also a bulk of

> > metadata pointers, or just one for all the packets?

> 

> By metadata do you mean a session or ...?

> Konstantin


No, I mean the void *params parameter, (that was named metadata in earlier patches).
> 

> > >

> > >>>>>>> +	struct ixgbe_crypto_session *ic_session =

> > >>>>>>> +			get_sec_session_private_data(session);

> > >>>>>>> +	if (ic_session->op ==

> IXGBE_OP_AUTHENTICATED_ENCRYPTION) {

> > >>>>>>> +		struct ixgbe_crypto_tx_desc_md *mdata =

> > >>>>>>> +			(struct ixgbe_crypto_tx_desc_md *)&m-

> >udata64;

> > >>>>>>> +		mdata->enc = 1;

> > >>>>>>> +		mdata->sa_idx = ic_session->sa_index;

> > >>>>>>> +		mdata->pad_len = *rte_pktmbuf_mtod_offset(m,

> > >>>>>>> +			uint8_t *, rte_pktmbuf_pkt_len(m) - 18) +

> 18;

> > >>>>>> Could you explain what pad_len supposed to contain?

> > >>>>>> Also what is a magical constant '18'?

> > >>>>>> Could you create some macro if needed?

> > >>>>> I added an explanation in the code, we read the payload padding

> > >>>>> size that is stored at the len-18 bytes and add 18 bytes, 2 for

> > >>>>> ESP trailer and 16 for ICV.

> > >>>> Ok, can we at least have a macros for all these constants?

> > >>>> Another question: you do use pkt_len() here - does it mean that

> > >>>> multi- segment packets are not supported by ixgbe-ipsec?

> > >>>> Konstantin

> > >>> It does support multisegment, but the pad_len has to be set only

> > >>> for single send, it will be ignored otherwise. I have updated the

> > >>> code

> > to

> > >> set

> > >>> it for single segment packets only.

> > >> Sorry, I didn't understand that.

> > >> If that function does support multiseg packets, then it has to go

> > >> to the last segment via m->next, If it doesn't, then it should return an

> error I case of m->nb_seg != 1.

> > >> Right?

> > >>

> > >>> Also, our test app does not support multisegment packets.

> > >> Ok, I suppose that means, multi-seg case wasn't tested :)

> > >>

> > >>

> > >>
  
Ananyev, Konstantin Oct. 19, 2017, 2:36 p.m. UTC | #14
> > > >>>>

> > > >>>>>>> <snip>

> > > >>>>>>> +

> > > >>>>>>> +static int

> > > >>>>>>> +ixgbe_crypto_update_mb(void *device __rte_unused,

> > > >>>>>>> +		struct rte_security_session *session,

> > > >>>>>>> +		       struct rte_mbuf *m, void *params __rte_unused)

> > {

> > > >

> > > >

> > > > Another sort of generic question - why not make

> > > > security_set_pkt_metadata function to accept  bulk of packets?

> > > > In that case o can minimize the cost of function calls, accessing session

> > data, etc.

> > > > Though I suppose that could wait till next patch series.

> > > > Konstantin

> > > It is a good suggestion, but we need to discuss it further;

> >

> > Yes, as I said that's for future.

> >

> > > for example

> > > if it can accept a bulk of packets, will it need also a bulk of

> > > metadata pointers, or just one for all the packets?

> >

> > By metadata do you mean a session or ...?

> > Konstantin

> 

> No, I mean the void *params parameter, (that was named metadata in earlier patches).

> >


As right now it is not used, and I don't really know how you guys foresee to use it in future -
I don't have any strong opinion on it :)
Konstantin
  

Patch

diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 5d2ad2f..339ff36 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -68,7 +68,7 @@  DEPDIRS-fm10k = $(core-libs) librte_hash
 DIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e
 DEPDIRS-i40e = $(core-libs) librte_hash
 DIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe
-DEPDIRS-ixgbe = $(core-libs) librte_hash
+DEPDIRS-ixgbe = $(core-libs) librte_hash librte_security
 DIRS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += liquidio
 DEPDIRS-liquidio = $(core-libs)
 DIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 95c806d..6e963c7 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -118,11 +118,11 @@  SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_neon.c
 else
 SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_sse.c
 endif
-
 ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_BYPASS),y)
 SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c
 SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c
 endif
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ipsec.c
 SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c
 SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_tm.c
 
diff --git a/drivers/net/ixgbe/base/ixgbe_osdep.h b/drivers/net/ixgbe/base/ixgbe_osdep.h
index 4aab278..b132a0f 100644
--- a/drivers/net/ixgbe/base/ixgbe_osdep.h
+++ b/drivers/net/ixgbe/base/ixgbe_osdep.h
@@ -161,4 +161,12 @@  static inline uint32_t ixgbe_read_addr(volatile void* addr)
 #define IXGBE_WRITE_REG_ARRAY(hw, reg, index, value) \
 	IXGBE_PCI_REG_WRITE(IXGBE_PCI_REG_ARRAY_ADDR((hw), (reg), (index)), (value))
 
+#define IXGBE_WRITE_REG_THEN_POLL_MASK(hw, reg, val, mask, poll_ms)	\
+{									\
+	uint32_t cnt = poll_ms;						\
+	IXGBE_WRITE_REG(hw, (reg), (val));				\
+	while (((IXGBE_READ_REG(hw, (reg))) & (mask)) && (cnt--))	\
+		rte_delay_ms(1);					\
+}
+
 #endif /* _IXGBE_OS_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 14b9c53..fcabd5e 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -61,6 +61,7 @@ 
 #include <rte_random.h>
 #include <rte_dev.h>
 #include <rte_hash_crc.h>
+#include <rte_security_driver.h>
 
 #include "ixgbe_logs.h"
 #include "base/ixgbe_api.h"
@@ -1132,6 +1133,7 @@  eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
 	struct ixgbe_bw_conf *bw_conf =
 		IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private);
+	struct rte_security_ctx *security_instance;
 	uint32_t ctrl_ext;
 	uint16_t csum;
 	int diag, i;
@@ -1139,6 +1141,17 @@  eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
 	PMD_INIT_FUNC_TRACE();
 
 	eth_dev->dev_ops = &ixgbe_eth_dev_ops;
+	security_instance = rte_malloc("rte_security_instances_ops",
+				sizeof(struct rte_security_ctx), 0);
+	if (security_instance == NULL)
+		return -ENOMEM;
+	security_instance->state = RTE_SECURITY_INSTANCE_VALID;
+	security_instance->device = (void *)eth_dev;
+	security_instance->ops = &ixgbe_security_ops;
+	security_instance->sess_cnt = 0;
+
+	eth_dev->data->security_ctx = security_instance;
+
 	eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
 	eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
 	eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
@@ -1169,6 +1182,7 @@  eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
 
 	rte_eth_copy_pci_info(eth_dev, pci_dev);
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_SECURITY;
 
 	/* Vendor and Device ID need to be set before init of shared code */
 	hw->device_id = pci_dev->id.device_id;
@@ -1401,6 +1415,8 @@  eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
 	/* Remove all Traffic Manager configuration */
 	ixgbe_tm_conf_uninit(eth_dev);
 
+	rte_free(eth_dev->data->security_ctx);
+
 	return 0;
 }
 
@@ -3695,6 +3711,9 @@  ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	    hw->mac.type == ixgbe_mac_X550EM_a)
 		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 
+	dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+	dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
 			.pthresh = IXGBE_DEFAULT_RX_PTHRESH,
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index e28c856..f5b52c4 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -38,6 +38,7 @@ 
 #include "base/ixgbe_dcb_82599.h"
 #include "base/ixgbe_dcb_82598.h"
 #include "ixgbe_bypass.h"
+#include "ixgbe_ipsec.h"
 #include <rte_time.h>
 #include <rte_hash.h>
 #include <rte_pci.h>
@@ -486,7 +487,7 @@  struct ixgbe_adapter {
 	struct ixgbe_filter_info    filter;
 	struct ixgbe_l2_tn_info     l2_tn;
 	struct ixgbe_bw_conf        bw_conf;
-
+	struct ixgbe_ipsec          ipsec;
 	bool rx_bulk_alloc_allowed;
 	bool rx_vec_allowed;
 	struct rte_timecounter      systime_tc;
@@ -543,6 +544,9 @@  struct ixgbe_adapter {
 #define IXGBE_DEV_PRIVATE_TO_TM_CONF(adapter) \
 	(&((struct ixgbe_adapter *)adapter)->tm_conf)
 
+#define IXGBE_DEV_PRIVATE_TO_IPSEC(adapter)\
+	(&((struct ixgbe_adapter *)adapter)->ipsec)
+
 /*
  * RX/TX function prototypes
  */
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 904c146..13c8243 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -187,6 +187,9 @@  const struct rte_flow_action *next_no_void_action(
  * END
  * other members in mask and spec should set to 0x00.
  * item->last should be NULL.
+ *
+ * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
+ *
  */
 static int
 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
@@ -226,6 +229,41 @@  cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 		return -rte_errno;
 	}
 
+	/**
+	 *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
+	 */
+	act = next_no_void_action(actions, NULL);
+	if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
+		const void *conf = act->conf;
+		/* check if the next not void item is END */
+		act = next_no_void_action(actions, act);
+		if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+			memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION,
+				act, "Not supported action.");
+			return -rte_errno;
+		}
+
+		/* get the IP pattern*/
+		item = next_no_void_pattern(pattern, NULL);
+		while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+				item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+			if (item->last ||
+					item->type == RTE_FLOW_ITEM_TYPE_END) {
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item, "IP pattern missing.");
+				return -rte_errno;
+			}
+			item = next_no_void_pattern(pattern, item);
+		}
+
+		filter->proto = IPPROTO_ESP;
+		return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
+					item->type == RTE_FLOW_ITEM_TYPE_IPV6);
+	}
+
 	/* the first not void item can be MAC or IPv4 */
 	item = next_no_void_pattern(pattern, NULL);
 
@@ -519,6 +557,10 @@  ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
 	if (ret)
 		return ret;
 
+	/* ESP flow not really a flow*/
+	if (filter->proto == IPPROTO_ESP)
+		return 0;
+
 	/* Ixgbe doesn't support tcp flags. */
 	if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
 		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
@@ -2758,6 +2800,11 @@  ixgbe_flow_create(struct rte_eth_dev *dev,
 	memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
 	ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
 			actions, &ntuple_filter, error);
+
+	/* ESP flow not really a flow*/
+	if (ntuple_filter.proto == IPPROTO_ESP)
+		return flow;
+
 	if (!ret) {
 		ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
 		if (!ret) {
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
new file mode 100644
index 0000000..6ace305
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -0,0 +1,744 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
+#include <rte_ip.h>
+#include <rte_jhash.h>
+#include <rte_security_driver.h>
+#include <rte_cryptodev.h>
+#include <rte_flow.h>
+
+#include "base/ixgbe_type.h"
+#include "base/ixgbe_api.h"
+#include "ixgbe_ethdev.h"
+#include "ixgbe_ipsec.h"
+
+#define RTE_IXGBE_REGISTER_POLL_WAIT_5_MS  5
+
+#define IXGBE_WAIT_RREAD \
+	IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSRXIDX, reg_val, \
+	IPSRXIDX_READ, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
+#define IXGBE_WAIT_RWRITE \
+	IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSRXIDX, reg_val, \
+	IPSRXIDX_WRITE, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
+#define IXGBE_WAIT_TREAD \
+	IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSTXIDX, reg_val, \
+	IPSRXIDX_READ, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
+#define IXGBE_WAIT_TWRITE \
+	IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSTXIDX, reg_val, \
+	IPSRXIDX_WRITE, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
+
+#define CMP_IP(a, b) (\
+	(a).ipv6[0] == (b).ipv6[0] && \
+	(a).ipv6[1] == (b).ipv6[1] && \
+	(a).ipv6[2] == (b).ipv6[2] && \
+	(a).ipv6[3] == (b).ipv6[3])
+
+
+static void
+ixgbe_crypto_clear_ipsec_tables(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	int i = 0;
+
+	/* clear Rx IP table*/
+	for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+		uint16_t index = i << 3;
+		uint32_t reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_IP | index;
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), 0);
+		IXGBE_WAIT_RWRITE;
+	}
+
+	/* clear Rx SPI and Rx/Tx SA tables*/
+	for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+		uint32_t index = i << 3;
+		uint32_t reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | index;
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0);
+		IXGBE_WAIT_RWRITE;
+		reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_KEY | index;
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, 0);
+		IXGBE_WAIT_RWRITE;
+		reg_val = IPSRXIDX_WRITE | index;
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, 0);
+		IXGBE_WAIT_TWRITE;
+	}
+}
+
+static int
+ixgbe_crypto_add_sa(struct ixgbe_crypto_session *ic_session)
+{
+	struct rte_eth_dev *dev = ic_session->dev;
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_ipsec *priv = IXGBE_DEV_PRIVATE_TO_IPSEC(
+			dev->data->dev_private);
+	uint32_t reg_val;
+	int sa_index = -1;
+
+	if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
+		int i, ip_index = -1;
+
+		/* Find a match in the IP table*/
+		for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+			if (CMP_IP(priv->rx_ip_tbl[i].ip,
+				   ic_session->dst_ip)) {
+				ip_index = i;
+				break;
+			}
+		}
+		/* If no match, find a free entry in the IP table*/
+		if (ip_index < 0) {
+			for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+				if (priv->rx_ip_tbl[i].ref_count == 0) {
+					ip_index = i;
+					break;
+				}
+			}
+		}
+
+		/* Fail if no match and no free entries*/
+		if (ip_index < 0) {
+			PMD_DRV_LOG(ERR,
+				    "No free entry left in the Rx IP table\n");
+			return -1;
+		}
+
+		/* Find a free entry in the SA table*/
+		for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+			if (priv->rx_sa_tbl[i].used == 0) {
+				sa_index = i;
+				break;
+			}
+		}
+		/* Fail if no free entries*/
+		if (sa_index < 0) {
+			PMD_DRV_LOG(ERR,
+				    "No free entry left in the Rx SA table\n");
+			return -1;
+		}
+
+		priv->rx_ip_tbl[ip_index].ip.ipv6[0] =
+				ic_session->dst_ip.ipv6[0];
+		priv->rx_ip_tbl[ip_index].ip.ipv6[1] =
+				ic_session->dst_ip.ipv6[1];
+		priv->rx_ip_tbl[ip_index].ip.ipv6[2] =
+				ic_session->dst_ip.ipv6[2];
+		priv->rx_ip_tbl[ip_index].ip.ipv6[3] =
+				ic_session->dst_ip.ipv6[3];
+		priv->rx_ip_tbl[ip_index].ref_count++;
+
+		priv->rx_sa_tbl[sa_index].spi =
+			rte_cpu_to_be_32(ic_session->spi);
+		priv->rx_sa_tbl[sa_index].ip_index = ip_index;
+		priv->rx_sa_tbl[sa_index].key[3] =
+			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[0]);
+		priv->rx_sa_tbl[sa_index].key[2] =
+			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[4]);
+		priv->rx_sa_tbl[sa_index].key[1] =
+			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[8]);
+		priv->rx_sa_tbl[sa_index].key[0] =
+			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[12]);
+		priv->rx_sa_tbl[sa_index].salt =
+			rte_cpu_to_be_32(ic_session->salt);
+		priv->rx_sa_tbl[sa_index].mode = IPSRXMOD_VALID;
+		if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION)
+			priv->rx_sa_tbl[sa_index].mode |=
+					(IPSRXMOD_PROTO | IPSRXMOD_DECRYPT);
+		if (ic_session->dst_ip.type == IPv6)
+			priv->rx_sa_tbl[sa_index].mode |= IPSRXMOD_IPV6;
+		priv->rx_sa_tbl[sa_index].used = 1;
+
+		/* write IP table entry*/
+		reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
+				IPSRXIDX_TABLE_IP | (ip_index << 3);
+		if (priv->rx_ip_tbl[ip_index].ip.type == IPv4) {
+			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
+			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
+			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
+			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3),
+					priv->rx_ip_tbl[ip_index].ip.ipv4);
+		} else {
+			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0),
+					priv->rx_ip_tbl[ip_index].ip.ipv6[0]);
+			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1),
+					priv->rx_ip_tbl[ip_index].ip.ipv6[1]);
+			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2),
+					priv->rx_ip_tbl[ip_index].ip.ipv6[2]);
+			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3),
+					priv->rx_ip_tbl[ip_index].ip.ipv6[3]);
+		}
+		IXGBE_WAIT_RWRITE;
+
+		/* write SPI table entry*/
+		reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
+				IPSRXIDX_TABLE_SPI | (sa_index << 3);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI,
+				priv->rx_sa_tbl[sa_index].spi);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX,
+				priv->rx_sa_tbl[sa_index].ip_index);
+		IXGBE_WAIT_RWRITE;
+
+		/* write Key table entry*/
+		reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
+				IPSRXIDX_TABLE_KEY | (sa_index << 3);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0),
+				priv->rx_sa_tbl[sa_index].key[0]);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1),
+				priv->rx_sa_tbl[sa_index].key[1]);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2),
+				priv->rx_sa_tbl[sa_index].key[2]);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3),
+				priv->rx_sa_tbl[sa_index].key[3]);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT,
+				priv->rx_sa_tbl[sa_index].salt);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD,
+				priv->rx_sa_tbl[sa_index].mode);
+		IXGBE_WAIT_RWRITE;
+
+	} else { /* sess->dir == RTE_CRYPTO_OUTBOUND */
+		int i;
+
+		/* Find a free entry in the SA table*/
+		for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+			if (priv->tx_sa_tbl[i].used == 0) {
+				sa_index = i;
+				break;
+			}
+		}
+		/* Fail if no free entries*/
+		if (sa_index < 0) {
+			PMD_DRV_LOG(ERR,
+				    "No free entry left in the Tx SA table\n");
+			return -1;
+		}
+
+		priv->tx_sa_tbl[sa_index].spi =
+			rte_cpu_to_be_32(ic_session->spi);
+		priv->tx_sa_tbl[sa_index].key[3] =
+			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[0]);
+		priv->tx_sa_tbl[sa_index].key[2] =
+			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[4]);
+		priv->tx_sa_tbl[sa_index].key[1] =
+			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[8]);
+		priv->tx_sa_tbl[sa_index].key[0] =
+			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[12]);
+		priv->tx_sa_tbl[sa_index].salt =
+			rte_cpu_to_be_32(ic_session->salt);
+
+		reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | (sa_index << 3);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0),
+				priv->tx_sa_tbl[sa_index].key[0]);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1),
+				priv->tx_sa_tbl[sa_index].key[1]);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2),
+				priv->tx_sa_tbl[sa_index].key[2]);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3),
+				priv->tx_sa_tbl[sa_index].key[3]);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT,
+				priv->tx_sa_tbl[sa_index].salt);
+		IXGBE_WAIT_TWRITE;
+
+		priv->tx_sa_tbl[i].used = 1;
+		ic_session->sa_index = sa_index;
+	}
+
+	return 0;
+}
+
+static int
+ixgbe_crypto_remove_sa(struct rte_eth_dev *dev,
+		       struct ixgbe_crypto_session *ic_session)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_ipsec *priv =
+			IXGBE_DEV_PRIVATE_TO_IPSEC(dev->data->dev_private);
+	uint32_t reg_val;
+	int sa_index = -1;
+
+	if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
+		int i, ip_index = -1;
+
+		/* Find a match in the IP table*/
+		for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+			if (CMP_IP(priv->rx_ip_tbl[i].ip, ic_session->dst_ip)) {
+				ip_index = i;
+				break;
+			}
+		}
+
+		/* Fail if no match*/
+		if (ip_index < 0) {
+			PMD_DRV_LOG(ERR,
+				    "Entry not found in the Rx IP table\n");
+			return -1;
+		}
+
+		/* Find a free entry in the SA table*/
+		for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+			if (priv->rx_sa_tbl[i].spi ==
+				  rte_cpu_to_be_32(ic_session->spi)) {
+				sa_index = i;
+				break;
+			}
+		}
+		/* Fail if no match*/
+		if (sa_index < 0) {
+			PMD_DRV_LOG(ERR,
+				    "Entry not found in the Rx SA table\n");
+			return -1;
+		}
+
+		/* Disable and clear Rx SPI and key table table entryes*/
+		reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | (sa_index << 3);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0);
+		IXGBE_WAIT_RWRITE;
+		reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_KEY | (sa_index << 3);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, 0);
+		IXGBE_WAIT_RWRITE;
+		priv->rx_sa_tbl[sa_index].used = 0;
+
+		/* If last used then clear the IP table entry*/
+		priv->rx_ip_tbl[ip_index].ref_count--;
+		if (priv->rx_ip_tbl[ip_index].ref_count == 0) {
+			reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_IP |
+					(ip_index << 3);
+			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
+			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
+			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
+			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), 0);
+		}
+	} else { /* session->dir == RTE_CRYPTO_OUTBOUND */
+		int i;
+
+		/* Find a match in the SA table*/
+		for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+			if (priv->tx_sa_tbl[i].spi ==
+				    rte_cpu_to_be_32(ic_session->spi)) {
+				sa_index = i;
+				break;
+			}
+		}
+		/* Fail if no match entries*/
+		if (sa_index < 0) {
+			PMD_DRV_LOG(ERR,
+				    "Entry not found in the Tx SA table\n");
+			return -1;
+		}
+		reg_val = IPSRXIDX_WRITE | (sa_index << 3);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, 0);
+		IXGBE_WAIT_TWRITE;
+
+		priv->tx_sa_tbl[sa_index].used = 0;
+	}
+
+	return 0;
+}
+
+static int
+ixgbe_crypto_create_session(void *device,
+		struct rte_security_session_conf *conf,
+		struct rte_security_session *session,
+		struct rte_mempool *mempool)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct ixgbe_crypto_session *ic_session = NULL;
+	struct rte_crypto_aead_xform *aead_xform;
+	struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
+
+	if (rte_mempool_get(mempool, (void **)&ic_session)) {
+		PMD_DRV_LOG(ERR, "Cannot get object from ic_session mempool");
+		return -ENOMEM;
+	}
+
+	if (conf->crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD ||
+			conf->crypto_xform->aead.algo !=
+					RTE_CRYPTO_AEAD_AES_GCM) {
+		PMD_DRV_LOG(ERR, "Unsupported crypto transformation mode\n");
+		return -ENOTSUP;
+	}
+	aead_xform = &conf->crypto_xform->aead;
+
+	if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+		if (dev_conf->rxmode.enable_sec) {
+			ic_session->op = IXGBE_OP_AUTHENTICATED_DECRYPTION;
+		} else {
+			PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
+			return -ENOTSUP;
+		}
+	} else {
+		if (dev_conf->txmode.enable_sec) {
+			ic_session->op = IXGBE_OP_AUTHENTICATED_ENCRYPTION;
+		} else {
+			PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
+			return -ENOTSUP;
+		}
+	}
+
+	ic_session->key = aead_xform->key.data;
+	memcpy(&ic_session->salt,
+	       &aead_xform->key.data[aead_xform->key.length], 4);
+	ic_session->spi = conf->ipsec.spi;
+	ic_session->dev = eth_dev;
+
+	set_sec_session_private_data(session, ic_session);
+
+	if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {
+		if (ixgbe_crypto_add_sa(ic_session)) {
+			PMD_DRV_LOG(ERR, "Failed to add SA\n");
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+static int
+ixgbe_crypto_remove_session(void *device,
+		struct rte_security_session *session)
+{
+	struct rte_eth_dev *eth_dev = device;
+	struct ixgbe_crypto_session *ic_session =
+		(struct ixgbe_crypto_session *)
+		get_sec_session_private_data(session);
+	struct rte_mempool *mempool = rte_mempool_from_obj(ic_session);
+
+	if (eth_dev != ic_session->dev) {
+		PMD_DRV_LOG(ERR, "Session not bound to this device\n");
+		return -ENODEV;
+	}
+
+	if (ixgbe_crypto_remove_sa(eth_dev, ic_session)) {
+		PMD_DRV_LOG(ERR, "Failed to remove session\n");
+		return -EFAULT;
+	}
+
+	rte_mempool_put(mempool, (void *)ic_session);
+
+	return 0;
+}
+
+static int
+ixgbe_crypto_update_mb(void *device __rte_unused,
+		struct rte_security_session *session,
+		       struct rte_mbuf *m, void *params __rte_unused)
+{
+	struct ixgbe_crypto_session *ic_session =
+			get_sec_session_private_data(session);
+	if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {
+		struct ixgbe_crypto_tx_desc_md *mdata =
+			(struct ixgbe_crypto_tx_desc_md *)&m->udata64;
+		mdata->enc = 1;
+		mdata->sa_idx = ic_session->sa_index;
+		mdata->pad_len = *rte_pktmbuf_mtod_offset(m,
+			uint8_t *, rte_pktmbuf_pkt_len(m) - 18) + 18;
+	}
+	return 0;
+}
+
+struct rte_cryptodev_capabilities aes_gmac_crypto_capabilities[] = {
+	{	/* AES GMAC (128-bit) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{
+		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED
+		}, }
+	},
+};
+
+struct rte_cryptodev_capabilities aes_gcm_gmac_crypto_capabilities[] = {
+	{	/* AES GMAC (128-bit) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES GCM (128-bit) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 65535,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{
+		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED
+		}, }
+	},
+};
+
+static const struct rte_security_capability ixgbe_security_capabilities[] = {
+	{ /* IPsec Inline Crypto ESP Transport Egress */
+		.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+		.ipsec = {
+			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+			.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+			.options = { 0 }
+		},
+		.crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+	},
+	{ /* IPsec Inline Crypto ESP Transport Ingress */
+		.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+		.ipsec = {
+			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+			.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+			.options = { 0 }
+		},
+		.crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+		.ol_flags = 0
+	},
+	{ /* IPsec Inline Crypto ESP Tunnel Egress */
+		.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+		.ipsec = {
+			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+			.options = { 0 }
+		},
+		.crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+	},
+	{ /* IPsec Inline Crypto ESP Tunnel Ingress */
+		.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+		.ipsec = {
+			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+			.options = { 0 }
+		},
+		.crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+		.ol_flags = 0
+	},
+	{
+		.action = RTE_SECURITY_ACTION_TYPE_NONE
+	}
+};
+
+static const struct rte_security_capability *
+ixgbe_crypto_capabilities_get(void *device __rte_unused)
+{
+	return ixgbe_security_capabilities;
+}
+
+
+int
+ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t reg;
+
+	/* sanity checks */
+	if (dev->data->dev_conf.rxmode.enable_lro) {
+		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
+		return -1;
+	}
+	if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
+		PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
+		return -1;
+	}
+
+
+	/* Set IXGBE_SECTXBUFFAF to 0x15 as required in the datasheet*/
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x15);
+
+	/* IFG needs to be set to 3 when we are using security. Otherwise a Tx
+	 * hang will occur with heavy traffic.
+	 */
+	reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+	reg = (reg & 0xFFFFFFF0) | 0x3;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+
+	reg  = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+	reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
+	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
+
+	if (dev->data->dev_conf.rxmode.enable_sec) {
+		IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
+		reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+		if (reg != 0) {
+			PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
+			return -1;
+		}
+	}
+	if (dev->data->dev_conf.txmode.enable_sec) {
+		IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
+				IXGBE_SECTXCTRL_STORE_FORWARD);
+		reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+		if (reg != IXGBE_SECTXCTRL_STORE_FORWARD) {
+			PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
+			return -1;
+		}
+	}
+
+	ixgbe_crypto_clear_ipsec_tables(dev);
+
+	return 0;
+}
+
+int
+ixgbe_crypto_add_ingress_sa_from_flow(const void *sess,
+				      const void *ip_spec,
+				      uint8_t is_ipv6)
+{
+	struct ixgbe_crypto_session *ic_session
+		= get_sec_session_private_data(sess);
+
+	if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
+		if (is_ipv6) {
+			const struct rte_flow_item_ipv6 *ipv6 = ip_spec;
+			ic_session->src_ip.type = IPv6;
+			ic_session->dst_ip.type = IPv6;
+			rte_memcpy(ic_session->src_ip.ipv6,
+				   ipv6->hdr.src_addr, 16);
+			rte_memcpy(ic_session->dst_ip.ipv6,
+				   ipv6->hdr.dst_addr, 16);
+		} else {
+			const struct rte_flow_item_ipv4 *ipv4 = ip_spec;
+			ic_session->src_ip.type = IPv4;
+			ic_session->dst_ip.type = IPv4;
+			ic_session->src_ip.ipv4 = ipv4->hdr.src_addr;
+			ic_session->dst_ip.ipv4 = ipv4->hdr.dst_addr;
+		}
+		return ixgbe_crypto_add_sa(ic_session);
+	}
+
+	return 0;
+}
+
+
+struct rte_security_ops ixgbe_security_ops = {
+	.session_create = ixgbe_crypto_create_session,
+	.session_update = NULL,
+	.session_stats_get = NULL,
+	.session_destroy = ixgbe_crypto_remove_session,
+
+	.set_pkt_metadata = ixgbe_crypto_update_mb,
+
+	.capabilities_get = ixgbe_crypto_capabilities_get
+};
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.h b/drivers/net/ixgbe/ixgbe_ipsec.h
new file mode 100644
index 0000000..9f06235
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_ipsec.h
@@ -0,0 +1,147 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef IXGBE_IPSEC_H_
+#define IXGBE_IPSEC_H_
+
+#include <rte_security.h>
+
+#define IPSRXIDX_RX_EN                                    0x00000001
+#define IPSRXIDX_TABLE_IP                                 0x00000002
+#define IPSRXIDX_TABLE_SPI                                0x00000004
+#define IPSRXIDX_TABLE_KEY                                0x00000006
+#define IPSRXIDX_WRITE                                    0x80000000
+#define IPSRXIDX_READ                                     0x40000000
+#define IPSRXMOD_VALID                                    0x00000001
+#define IPSRXMOD_PROTO                                    0x00000004
+#define IPSRXMOD_DECRYPT                                  0x00000008
+#define IPSRXMOD_IPV6                                     0x00000010
+#define IXGBE_ADVTXD_POPTS_IPSEC                          0x00000400
+#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP                 0x00002000
+#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN               0x00004000
+#define IXGBE_RXDADV_IPSEC_STATUS_SECP                    0x00020000
+#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK                 0x18000000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL         0x08000000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH           0x10000000
+#define IXGBE_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED    0x18000000
+
+#define IPSEC_MAX_RX_IP_COUNT           128
+#define IPSEC_MAX_SA_COUNT              1024
+
+enum ixgbe_operation {
+	IXGBE_OP_AUTHENTICATED_ENCRYPTION,
+	IXGBE_OP_AUTHENTICATED_DECRYPTION
+};
+
+enum ixgbe_gcm_key {
+	IXGBE_GCM_KEY_128,
+	IXGBE_GCM_KEY_256
+};
+
+/**
+ * Generic IP address structure
+ * TODO: Find better location for this rte_net.h possibly.
+ **/
+struct ipaddr {
+	enum ipaddr_type {
+		IPv4,
+		IPv6
+	} type;
+	/**< IP Address Type - IPv4/IPv6 */
+
+	union {
+		uint32_t ipv4;
+		uint32_t ipv6[4];
+	};
+};
+
+/** inline crypto crypto private session structure */
+struct ixgbe_crypto_session {
+	enum ixgbe_operation op;
+	uint8_t *key;
+	uint32_t salt;
+	uint32_t sa_index;
+	uint32_t spi;
+	struct ipaddr src_ip;
+	struct ipaddr dst_ip;
+	struct rte_eth_dev *dev;
+} __rte_cache_aligned;
+
+struct ixgbe_crypto_rx_ip_table {
+	struct ipaddr ip;
+	uint16_t ref_count;
+};
+struct ixgbe_crypto_rx_sa_table {
+	uint32_t spi;
+	uint32_t ip_index;
+	uint32_t key[4];
+	uint32_t salt;
+	uint8_t  mode;
+	uint8_t  used;
+};
+
+struct ixgbe_crypto_tx_sa_table {
+	uint32_t spi;
+	uint32_t key[4];
+	uint32_t salt;
+	uint8_t  used;
+};
+
+struct ixgbe_crypto_tx_desc_md {
+	union {
+		uint64_t data;
+		struct {
+			  uint32_t sa_idx;
+			  uint8_t pad_len;
+			  uint8_t enc;
+		};
+	};
+};
+
+struct ixgbe_ipsec {
+	struct ixgbe_crypto_rx_ip_table rx_ip_tbl[IPSEC_MAX_RX_IP_COUNT];
+	struct ixgbe_crypto_rx_sa_table rx_sa_tbl[IPSEC_MAX_SA_COUNT];
+	struct ixgbe_crypto_tx_sa_table tx_sa_tbl[IPSEC_MAX_SA_COUNT];
+};
+
+extern struct rte_security_ops ixgbe_security_ops;
+
+
+int ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev);
+int ixgbe_crypto_add_ingress_sa_from_flow(const void *sess,
+					  const void *ip_spec,
+					  uint8_t is_ipv6);
+
+
+
+#endif /*IXGBE_IPSEC_H_*/
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 0038dfb..279e3fa 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -93,6 +93,7 @@ 
 		PKT_TX_TCP_SEG |		 \
 		PKT_TX_MACSEC |			 \
 		PKT_TX_OUTER_IP_CKSUM |		 \
+		PKT_TX_SEC_OFFLOAD |	 \
 		IXGBE_TX_IEEE1588_TMST)
 
 #define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
@@ -395,7 +396,8 @@  ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 static inline void
 ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
 		volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
-		uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
+		uint64_t ol_flags, union ixgbe_tx_offload tx_offload,
+		struct rte_mbuf *mb)
 {
 	uint32_t type_tucmd_mlhl;
 	uint32_t mss_l4len_idx = 0;
@@ -479,6 +481,18 @@  ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
 		seqnum_seed |= tx_offload.l2_len
 			       << IXGBE_ADVTXD_TUNNEL_LEN;
 	}
+	if (mb->ol_flags & PKT_TX_SEC_OFFLOAD) {
+		struct ixgbe_crypto_tx_desc_md *mdata =
+				(struct ixgbe_crypto_tx_desc_md *)
+				&mb->udata64;
+		seqnum_seed |=
+			(IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & mdata->sa_idx);
+		type_tucmd_mlhl |= mdata->enc ?
+				(IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
+				IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0;
+		type_tucmd_mlhl |=
+			(mdata->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK);
+	}
 
 	txq->ctx_cache[ctx_idx].flags = ol_flags;
 	txq->ctx_cache[ctx_idx].tx_offload.data[0]  =
@@ -657,6 +671,7 @@  ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint32_t ctx = 0;
 	uint32_t new_ctx;
 	union ixgbe_tx_offload tx_offload;
+	uint8_t use_ipsec;
 
 	tx_offload.data[0] = 0;
 	tx_offload.data[1] = 0;
@@ -684,6 +699,7 @@  ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		 * are needed for offload functionality.
 		 */
 		ol_flags = tx_pkt->ol_flags;
+		use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
 
 		/* If hardware offload required */
 		tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
@@ -695,6 +711,13 @@  ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			tx_offload.tso_segsz = tx_pkt->tso_segsz;
 			tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
 			tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
+			if (use_ipsec) {
+				struct ixgbe_crypto_tx_desc_md *ipsec_mdata =
+					(struct ixgbe_crypto_tx_desc_md *)
+							&tx_pkt->udata64;
+				tx_offload.sa_idx = ipsec_mdata->sa_idx;
+				tx_offload.sec_pad_len = ipsec_mdata->pad_len;
+			}
 
 			/* If new context need be built or reuse the exist ctx. */
 			ctx = what_advctx_update(txq, tx_ol_req,
@@ -855,7 +878,7 @@  ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 				}
 
 				ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
-					tx_offload);
+					tx_offload, tx_pkt);
 
 				txe->last_id = tx_last;
 				tx_id = txe->next_id;
@@ -873,6 +896,8 @@  ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		}
 
 		olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+		if (use_ipsec)
+			olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC;
 
 		m_seg = tx_pkt;
 		do {
@@ -1447,6 +1472,12 @@  rx_desc_error_to_pkt_flags(uint32_t rx_status)
 		pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
 	}
 
+	if (rx_status & IXGBE_RXD_STAT_SECP) {
+		pkt_flags |= PKT_RX_SEC_OFFLOAD;
+		if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
+			pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+	}
+
 	return pkt_flags;
 }
 
@@ -2364,8 +2395,9 @@  void __attribute__((cold))
 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
 {
 	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
-	if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
-			&& (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
+	if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
+			(txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST) &&
+			!(dev->data->dev_conf.txmode.enable_sec)) {
 		PMD_INIT_LOG(DEBUG, "Using simple tx code path");
 		dev->tx_pkt_prepare = NULL;
 #ifdef RTE_IXGBE_INC_VECTOR
@@ -2535,6 +2567,7 @@  ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->txq_flags = tx_conf->txq_flags;
 	txq->ops = &def_txq_ops;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
+	txq->using_ipsec = dev->data->dev_conf.txmode.enable_sec;
 
 	/*
 	 * Modification to set VFTDT for virtual function if vf is detected
@@ -4519,6 +4552,7 @@  ixgbe_set_rx_function(struct rte_eth_dev *dev)
 		struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
 
 		rxq->rx_using_sse = rx_using_sse;
+		rxq->using_ipsec = dev->data->dev_conf.rxmode.enable_sec;
 	}
 }
 
@@ -5006,6 +5040,17 @@  ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
 			dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
 		ixgbe_setup_loopback_link_82599(hw);
 
+	if (dev->data->dev_conf.rxmode.enable_sec ||
+			dev->data->dev_conf.txmode.enable_sec) {
+		ret = ixgbe_crypto_enable_ipsec(dev);
+		if (ret != 0) {
+			PMD_DRV_LOG(ERR,
+				    "ixgbe_crypto_enable_ipsec fails with %d.",
+				    ret);
+			return ret;
+		}
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 81c527f..4017831 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -138,8 +138,10 @@  struct ixgbe_rx_queue {
 	uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
 	uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
 	uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
-	uint16_t            rx_using_sse;
+	uint8_t            rx_using_sse;
 	/**< indicates that vector RX is in use */
+	uint8_t            using_ipsec;
+	/**< indicates that IPsec RX feature is in use */
 #ifdef RTE_IXGBE_INC_VECTOR
 	uint16_t            rxrearm_nb;     /**< number of remaining to be re-armed */
 	uint16_t            rxrearm_start;  /**< the idx we start the re-arming from */
@@ -183,6 +185,10 @@  union ixgbe_tx_offload {
 		/* fields for TX offloading of tunnels */
 		uint64_t outer_l3_len:8; /**< Outer L3 (IP) Hdr Length. */
 		uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */
+
+		/* inline ipsec related*/
+		uint64_t sa_idx:8;	/**< TX SA database entry index */
+		uint64_t sec_pad_len:4;	/**< padding length */
 	};
 };
 
@@ -247,6 +253,9 @@  struct ixgbe_tx_queue {
 	struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
 	const struct ixgbe_txq_ops *ops;       /**< txq ops */
 	uint8_t             tx_deferred_start; /**< not in global dev start. */
+	uint8_t		    using_ipsec;
+	/**< indicates that IPsec TX feature is in use */
+
 };
 
 struct ixgbe_txq_ops {
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
index e704a7f..c9b1e2e 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
@@ -124,10 +124,12 @@  ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
 
 static inline void
 desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
-	struct rte_mbuf **rx_pkts)
+	struct rte_mbuf **rx_pkts, uint8_t use_ipsec)
 {
 	__m128i ptype0, ptype1, vtag0, vtag1, csum;
 	__m128i rearm0, rearm1, rearm2, rearm3;
+	__m128i sterr0, sterr1, sterr2, sterr3;
+	__m128i tmp1, tmp2, tmp3, tmp4;
 
 	/* mask everything except rss type */
 	const __m128i rsstype_msk = _mm_set_epi16(
@@ -174,6 +176,41 @@  desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
 		0, PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
 		PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t));
 
+	const __m128i ipsec_sterr_msk = _mm_set_epi32(
+		0, IXGBE_RXDADV_IPSEC_STATUS_SECP |
+			IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED,
+		0, 0);
+	const __m128i ipsec_proc_msk  = _mm_set_epi32(
+		0, IXGBE_RXDADV_IPSEC_STATUS_SECP, 0, 0);
+	const __m128i ipsec_err_flag  = _mm_set_epi32(
+		0, PKT_RX_SEC_OFFLOAD_FAILED | PKT_RX_SEC_OFFLOAD,
+		0, 0);
+	const __m128i ipsec_proc_flag = _mm_set_epi32(
+		0, PKT_RX_SEC_OFFLOAD, 0, 0);
+
+	if (use_ipsec) {
+		sterr0 = _mm_and_si128(descs[0], ipsec_sterr_msk);
+		sterr1 = _mm_and_si128(descs[1], ipsec_sterr_msk);
+		sterr2 = _mm_and_si128(descs[2], ipsec_sterr_msk);
+		sterr3 = _mm_and_si128(descs[3], ipsec_sterr_msk);
+		tmp1 = _mm_cmpeq_epi32(sterr0, ipsec_sterr_msk);
+		tmp2 = _mm_cmpeq_epi32(sterr0, ipsec_proc_msk);
+		tmp3 = _mm_cmpeq_epi32(sterr1, ipsec_sterr_msk);
+		tmp4 = _mm_cmpeq_epi32(sterr1, ipsec_proc_msk);
+		sterr0 = _mm_or_si128(_mm_and_si128(tmp1, ipsec_err_flag),
+					_mm_and_si128(tmp2, ipsec_proc_flag));
+		sterr1 = _mm_or_si128(_mm_and_si128(tmp3, ipsec_err_flag),
+					_mm_and_si128(tmp4, ipsec_proc_flag));
+		tmp1 = _mm_cmpeq_epi32(sterr2, ipsec_sterr_msk);
+		tmp2 = _mm_cmpeq_epi32(sterr2, ipsec_proc_msk);
+		tmp3 = _mm_cmpeq_epi32(sterr3, ipsec_sterr_msk);
+		tmp4 = _mm_cmpeq_epi32(sterr3, ipsec_proc_msk);
+		sterr2 = _mm_or_si128(_mm_and_si128(tmp1, ipsec_err_flag),
+					_mm_and_si128(tmp2, ipsec_proc_flag));
+		sterr3 = _mm_or_si128(_mm_and_si128(tmp3, ipsec_err_flag),
+					_mm_and_si128(tmp4, ipsec_proc_flag));
+	}
+
 	ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
 	ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
 	vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
@@ -221,6 +258,13 @@  desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
 	rearm2 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 4), 0x10);
 	rearm3 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 2), 0x10);
 
+	if (use_ipsec) {
+		rearm0 = _mm_or_si128(rearm0, sterr0);
+		rearm1 = _mm_or_si128(rearm1, sterr1);
+		rearm2 = _mm_or_si128(rearm2, sterr2);
+		rearm3 = _mm_or_si128(rearm3, sterr3);
+	}
+
 	/* write the rearm data and the olflags in one write */
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
 			offsetof(struct rte_mbuf, rearm_data) + 8);
@@ -310,6 +354,7 @@  _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 	volatile union ixgbe_adv_rx_desc *rxdp;
 	struct ixgbe_rx_entry *sw_ring;
 	uint16_t nb_pkts_recd;
+	uint8_t use_ipsec = rxq->using_ipsec;
 	int pos;
 	uint64_t var;
 	__m128i shuf_msk;
@@ -471,7 +516,8 @@  _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 		sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
 
 		/* set ol_flags with vlan packet type */
-		desc_to_olflags_v(descs, mbuf_init, vlan_flags, &rx_pkts[pos]);
+		desc_to_olflags_v(descs, mbuf_init, vlan_flags,
+				  &rx_pkts[pos], use_ipsec);
 
 		/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
 		pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);