@@ -32,6 +32,7 @@
#include <rte_ring.h>
#include <rte_bus.h>
#include <rte_mbuf_pool_ops.h>
+#include <rte_mbuf_dyn.h>
#include <dpaa_of.h>
#include <rte_dpaa_bus.h>
@@ -55,6 +56,9 @@ unsigned int dpaa_svr_family;
RTE_DEFINE_PER_LCORE(struct dpaa_portal *, dpaa_io);
+#define DPAA_SEQN_DYNFIELD_NAME "dpaa_seqn_dynfield"
+int dpaa_seqn_dynfield_offset = -1;
+
struct fm_eth_port_cfg *
dpaa_get_eth_port_cfg(int dev_id)
{
@@ -251,6 +255,11 @@ dpaa_clean_device_list(void)
int rte_dpaa_portal_init(void *arg)
{
+ static const struct rte_mbuf_dynfield dpaa_seqn_dynfield_desc = {
+ .name = DPAA_SEQN_DYNFIELD_NAME,
+ .size = sizeof(dpaa_seqn_t),
+ .align = __alignof__(dpaa_seqn_t),
+ };
unsigned int cpu, lcore = rte_lcore_id();
int ret;
@@ -264,6 +273,13 @@ int rte_dpaa_portal_init(void *arg)
cpu = rte_lcore_to_cpu_id(lcore);
+ dpaa_seqn_dynfield_offset =
+ rte_mbuf_dynfield_register(&dpaa_seqn_dynfield_desc);
+ if (dpaa_seqn_dynfield_offset < 0) {
+ DPAA_BUS_LOG(ERR, "Failed to register mbuf field for dpaa sequence number\n");
+ return -rte_errno;
+ }
+
/* Initialise bman thread portals */
ret = bman_thread_init();
if (ret) {
@@ -7,6 +7,7 @@
#define __RTE_DPAA_BUS_H__
#include <rte_bus.h>
+#include <rte_mbuf_dyn.h>
#include <rte_mempool.h>
#include <dpaax_iova_table.h>
@@ -16,6 +17,33 @@
#include <fsl_bman.h>
#include <netcfg.h>
+/* This sequence number field is used to store event entry index for
+ * driver specific usage. For parallel mode queues, invalid
+ * index will be set and for atomic mode queues, valid value
+ * ranging from 1 to 16.
+ */
+#define DPAA_INVALID_MBUF_SEQN 0
+
+typedef uint32_t dpaa_seqn_t;
+extern int dpaa_seqn_dynfield_offset;
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Read dpaa sequence number from mbuf.
+ *
+ * @param mbuf Structure to read from.
+ * @return pointer to dpaa sequence number.
+ */
+__rte_experimental
+static inline dpaa_seqn_t *
+dpaa_seqn(const struct rte_mbuf *mbuf)
+{
+ return RTE_MBUF_DYNFIELD(mbuf, dpaa_seqn_dynfield_offset,
+ dpaa_seqn_t *);
+}
+
#define DPAA_MEMPOOL_OPS_NAME "dpaa"
#define DEV_TO_DPAA_DEVICE(ptr) \
@@ -14,6 +14,7 @@ INTERNAL {
dpaa_get_qm_channel_pool;
dpaa_get_link_status;
dpaa_restart_link_autoneg;
+ dpaa_seqn_dynfield_offset;
dpaa_update_link_speed;
dpaa_intr_disable;
dpaa_intr_enable;
@@ -1721,8 +1721,8 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
DPAA_SEC_BURST : nb_ops;
for (loop = 0; loop < frames_to_send; loop++) {
op = *(ops++);
- if (op->sym->m_src->seqn != 0) {
- index = op->sym->m_src->seqn - 1;
+ if (*dpaa_seqn(op->sym->m_src) != 0) {
+ index = *dpaa_seqn(op->sym->m_src) - 1;
if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
/* QM_EQCR_DCA_IDXMASK = 0x0f */
flags[loop] = ((index & 0x0f) << 8);
@@ -3212,7 +3212,7 @@ dpaa_sec_process_atomic_event(void *event,
DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
ev->impl_opaque = index + 1;
- ctx->op->sym->m_src->seqn = (uint32_t)index + 1;
+ *dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1;
*bufs = (void *)ctx->op;
rte_mempool_put(ctx->ctx_pool, (void *)ctx);
@@ -99,7 +99,7 @@ dpaa_event_enqueue_burst(void *port, const struct rte_event ev[],
case RTE_EVENT_OP_RELEASE:
qman_dca_index(ev[i].impl_opaque, 0);
mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
- mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
+ *dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN;
DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
DPAA_PER_LCORE_DQRR_SIZE--;
break;
@@ -206,7 +206,7 @@ dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
qman_dca_index(i, 0);
mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
- mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
+ *dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN;
DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
DPAA_PER_LCORE_DQRR_SIZE--;
}
@@ -276,7 +276,7 @@ dpaa_event_dequeue_burst_intr(void *port, struct rte_event ev[],
if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
qman_dca_index(i, 0);
mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
- mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
+ *dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN;
DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
DPAA_PER_LCORE_DQRR_SIZE--;
}
@@ -22,13 +22,6 @@
#define DPAA_MBUF_HW_ANNOTATION 64
#define DPAA_FD_PTA_SIZE 64
-/* mbuf->seqn will be used to store event entry index for
- * driver specific usage. For parallel mode queues, invalid
- * index will be set and for atomic mode queues, valid value
- * ranging from 1 to 16.
- */
-#define DPAA_INVALID_MBUF_SEQN 0
-
/* we will re-use the HEADROOM for annotation in RX */
#define DPAA_HW_BUF_RESERVE 0
#define DPAA_PACKET_LAYOUT_ALIGN 64
@@ -649,7 +649,7 @@ dpaa_rx_cb_parallel(void *event,
ev->queue_id = fq->ev.queue_id;
ev->priority = fq->ev.priority;
ev->impl_opaque = (uint8_t)DPAA_INVALID_MBUF_SEQN;
- mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
+ *dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN;
*bufs = mbuf;
return qman_cb_dqrr_consume;
@@ -683,7 +683,7 @@ dpaa_rx_cb_atomic(void *event,
DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
DPAA_PER_LCORE_DQRR_MBUF(index) = mbuf;
ev->impl_opaque = index + 1;
- mbuf->seqn = (uint32_t)index + 1;
+ *dpaa_seqn(mbuf) = (uint32_t)index + 1;
*bufs = mbuf;
return qman_cb_dqrr_defer;
@@ -1078,7 +1078,7 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
if (dpaa_svr_family == SVR_LS1043A_FAMILY &&
(mbuf->data_off & 0x7F) != 0x0)
realloc_mbuf = 1;
- seqn = mbuf->seqn;
+ seqn = *dpaa_seqn(mbuf);
if (seqn != DPAA_INVALID_MBUF_SEQN) {
index = seqn - 1;
if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {