@@ -208,8 +208,6 @@ static struct rte_eth_conf port_conf = {
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
- .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_MULTI_SEGS),
},
};
@@ -315,7 +313,8 @@ prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
}
static inline void
-prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port)
+prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
+ const struct lcore_conf *qconf)
{
struct ip *ip;
struct ether_hdr *ethhdr;
@@ -325,14 +324,19 @@ prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port)
ethhdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, ETHER_HDR_LEN);
if (ip->ip_v == IPVERSION) {
- pkt->ol_flags |= PKT_TX_IP_CKSUM | PKT_TX_IPV4;
+ pkt->ol_flags |= qconf->outbound.ipv4_offloads;
pkt->l3_len = sizeof(struct ip);
pkt->l2_len = ETHER_HDR_LEN;
ip->ip_sum = 0;
+
+ /* calculate IPv4 cksum in SW */
+ if ((pkt->ol_flags & PKT_TX_IP_CKSUM) == 0)
+ ip->ip_sum = rte_ipv4_cksum((struct ipv4_hdr *)ip);
+
ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
} else {
- pkt->ol_flags |= PKT_TX_IPV6;
+ pkt->ol_flags |= qconf->outbound.ipv6_offloads;
pkt->l3_len = sizeof(struct ip6_hdr);
pkt->l2_len = ETHER_HDR_LEN;
@@ -346,18 +350,19 @@ prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port)
}
static inline void
-prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port)
+prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
+ const struct lcore_conf *qconf)
{
int32_t i;
const int32_t prefetch_offset = 2;
for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
- prepare_tx_pkt(pkts[i], port);
+ prepare_tx_pkt(pkts[i], port, qconf);
}
/* Process left packets */
for (; i < nb_pkts; i++)
- prepare_tx_pkt(pkts[i], port);
+ prepare_tx_pkt(pkts[i], port, qconf);
}
/* Send burst of packets on an output interface */
@@ -371,7 +376,7 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
queueid = qconf->tx_queue_id[port];
m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
- prepare_tx_burst(m_table, n, port);
+ prepare_tx_burst(m_table, n, port, qconf);
ret = rte_eth_tx_burst(port, queueid, m_table, n);
if (unlikely(ret < n)) {
@@ -1543,7 +1548,7 @@ cryptodevs_init(void)
}
static void
-port_init(uint16_t portid)
+port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf *txconf;
@@ -1584,10 +1589,10 @@ port_init(uint16_t portid)
local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
}
- if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY)
- local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SECURITY;
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SECURITY)
- local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_SECURITY;
+ /* Capabilities will already have been checked.. */
+ local_port_conf.rxmode.offloads |= req_rx_offloads;
+ local_port_conf.txmode.offloads |= req_tx_offloads;
+
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
@@ -1639,6 +1644,13 @@ port_init(uint16_t portid)
qconf = &lcore_conf[lcore_id];
qconf->tx_queue_id[portid] = tx_queueid;
+
+ /* Pre-populate pkt offloads based on capabilities */
+ qconf->outbound.ipv4_offloads = PKT_TX_IPV4;
+ qconf->outbound.ipv6_offloads = PKT_TX_IPV6;
+ if (req_tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+ qconf->outbound.ipv4_offloads |= PKT_TX_IP_CKSUM;
+
tx_queueid++;
/* init RX queues */
@@ -1749,6 +1761,7 @@ main(int32_t argc, char **argv)
uint32_t lcore_id;
uint8_t socket_id;
uint16_t portid;
+ uint64_t req_rx_offloads, req_tx_offloads;
/* init EAL */
ret = rte_eal_init(argc, argv);
@@ -1804,7 +1817,8 @@ main(int32_t argc, char **argv)
if ((enabled_port_mask & (1 << portid)) == 0)
continue;
- port_init(portid);
+ sa_check_offloads(portid, &req_rx_offloads, &req_tx_offloads);
+ port_init(portid, req_rx_offloads, req_tx_offloads);
}
cryptodevs_init();
@@ -146,6 +146,8 @@ struct ipsec_ctx {
struct rte_mempool *session_pool;
struct rte_mbuf *ol_pkts[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
uint16_t ol_pkts_cnt;
+ uint64_t ipv4_offloads;
+ uint64_t ipv6_offloads;
};
struct cdev_key {
@@ -239,4 +241,8 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id);
void
rt_init(struct socket_ctx *ctx, int32_t socket_id);
+int
+sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
+ uint64_t *tx_offloads);
+
#endif /* __IPSEC_H__ */
@@ -1017,3 +1017,59 @@ outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
for (i = 0; i < nb_pkts; i++)
sa[i] = &sa_ctx->sa[sa_idx[i]];
}
+
+/*
+ * Select HW offloads to be used.
+ */
+int
+sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
+ uint64_t *tx_offloads)
+{
+ struct ipsec_sa *rule;
+ uint32_t idx_sa;
+ struct rte_eth_dev_info dev_info;
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+
+ *rx_offloads = 0;
+ *tx_offloads = 0;
+
+ /* Check for inbound rules that use offloads and use this port */
+ for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
+ rule = &sa_in[idx_sa];
+ if ((rule->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+ rule->type ==
+ RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
+ && rule->portid == port_id) {
+ if ((dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY)
+ == 0) {
+ RTE_LOG(WARNING, PORT,
+ "HW RX IPSec is not supported\n");
+ return -EINVAL;
+ }
+ *rx_offloads |= DEV_RX_OFFLOAD_SECURITY;
+ }
+ }
+
+ /* Check for outbound rules that use offloads and use this port */
+ for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
+ rule = &sa_out[idx_sa];
+ if ((rule->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+ rule->type ==
+ RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
+ && rule->portid == port_id) {
+ if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SECURITY)
+ == 0) {
+ RTE_LOG(WARNING, PORT,
+ "HW TX IPSec is not supported\n");
+ return -EINVAL;
+ }
+ *tx_offloads |= DEV_TX_OFFLOAD_SECURITY;
+ /* Enable HW IPv4 cksum as well, if it is available */
+ if (dev_info.tx_offload_capa &
+ DEV_TX_OFFLOAD_IPV4_CKSUM)
+ *tx_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+ }
+ }
+ return 0;
+}