[dpdk-dev,v2,21/46] net/liquidio: initialize Rx queue
Checks
Commit Message
Initialize Rx queue registers and allocate packet buffers for Rx queue.
Signed-off-by: Shijith Thotton <shijith.thotton@caviumnetworks.com>
Signed-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Signed-off-by: Derek Chickles <derek.chickles@caviumnetworks.com>
Signed-off-by: Venkat Koppula <venkat.koppula@caviumnetworks.com>
Signed-off-by: Srisivasubramanian S <ssrinivasan@caviumnetworks.com>
Signed-off-by: Mallesham Jatharakonda <mjatharakonda@oneconvergence.com>
---
drivers/net/liquidio/base/lio_23xx_vf.c | 22 ++++++++
drivers/net/liquidio/base/lio_hw_defs.h | 2 +
drivers/net/liquidio/lio_rxtx.c | 96 +++++++++++++++++++++++++++++++++
drivers/net/liquidio/lio_rxtx.h | 20 +++++++
drivers/net/liquidio/lio_struct.h | 1 +
5 files changed, 141 insertions(+)
@@ -233,6 +233,27 @@
}
static void
+cn23xx_vf_setup_oq_regs(struct lio_device *lio_dev, uint32_t oq_no)
+{
+ struct lio_droq *droq = lio_dev->droq[oq_no];
+
+ PMD_INIT_FUNC_TRACE();
+
+ lio_write_csr64(lio_dev, CN23XX_SLI_OQ_BASE_ADDR64(oq_no),
+ droq->desc_ring_dma);
+ lio_write_csr(lio_dev, CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count);
+
+ lio_write_csr(lio_dev, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
+ (droq->buffer_size | (OCTEON_RH_SIZE << 16)));
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ droq->pkts_sent_reg = (uint8_t *)lio_dev->hw_addr +
+ CN23XX_SLI_OQ_PKTS_SENT(oq_no);
+ droq->pkts_credit_reg = (uint8_t *)lio_dev->hw_addr +
+ CN23XX_SLI_OQ_PKTS_CREDIT(oq_no);
+}
+
+static void
cn23xx_vf_free_mbox(struct lio_device *lio_dev)
{
PMD_INIT_FUNC_TRACE();
@@ -436,6 +457,7 @@
return -1;
lio_dev->fn_list.setup_iq_regs = cn23xx_vf_setup_iq_regs;
+ lio_dev->fn_list.setup_oq_regs = cn23xx_vf_setup_oq_regs;
lio_dev->fn_list.setup_mbox = cn23xx_vf_setup_mbox;
lio_dev->fn_list.free_mbox = cn23xx_vf_free_mbox;
@@ -116,6 +116,8 @@ enum octeon_tag_type {
/* This subcode is sent by core PCI driver to indicate cores are ready. */
#define LIO_OPCODE_IF_CFG 0x09
+#define LIO_MAX_RX_PKTLEN (64 * 1024)
+
/* Interface flags communicated between host driver and core app. */
enum lio_ifflags {
LIO_IFFLAG_UNICAST = 0x10
@@ -41,6 +41,96 @@
#include "lio_rxtx.h"
static void
+lio_droq_compute_max_packet_bufs(struct lio_droq *droq)
+{
+ uint32_t count = 0;
+
+ do {
+ count += droq->buffer_size;
+ } while (count < LIO_MAX_RX_PKTLEN);
+}
+
+static void
+lio_droq_reset_indices(struct lio_droq *droq)
+{
+ droq->read_idx = 0;
+ droq->write_idx = 0;
+ droq->refill_idx = 0;
+ droq->refill_count = 0;
+ rte_atomic64_set(&droq->pkts_pending, 0);
+}
+
+static void
+lio_droq_destroy_ring_buffers(struct lio_droq *droq)
+{
+ uint32_t i;
+
+ for (i = 0; i < droq->max_count; i++) {
+ if (droq->recv_buf_list[i].buffer) {
+ rte_pktmbuf_free((struct rte_mbuf *)
+ droq->recv_buf_list[i].buffer);
+ droq->recv_buf_list[i].buffer = NULL;
+ }
+ }
+
+ lio_droq_reset_indices(droq);
+}
+
+static void *
+lio_recv_buffer_alloc(struct lio_device *lio_dev, int q_no)
+{
+ struct lio_droq *droq = lio_dev->droq[q_no];
+ struct rte_mempool *mpool = droq->mpool;
+ struct rte_mbuf *m;
+
+ m = rte_pktmbuf_alloc(mpool);
+ if (m == NULL) {
+ lio_dev_err(lio_dev, "Cannot allocate\n");
+ return NULL;
+ }
+
+ rte_mbuf_refcnt_set(m, 1);
+ m->next = NULL;
+ m->data_off = RTE_PKTMBUF_HEADROOM;
+ m->nb_segs = 1;
+ m->pool = mpool;
+
+ return m;
+}
+
+static int
+lio_droq_setup_ring_buffers(struct lio_device *lio_dev,
+ struct lio_droq *droq)
+{
+ struct lio_droq_desc *desc_ring = droq->desc_ring;
+ uint32_t i;
+ void *buf;
+
+ for (i = 0; i < droq->max_count; i++) {
+ buf = lio_recv_buffer_alloc(lio_dev, droq->q_no);
+ if (buf == NULL) {
+ lio_dev_err(lio_dev, "buffer alloc failed\n");
+ lio_droq_destroy_ring_buffers(droq);
+ return -ENOMEM;
+ }
+
+ droq->recv_buf_list[i].buffer = buf;
+ droq->info_list[i].length = 0;
+
+ /* map ring buffers into memory */
+ desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
+ desc_ring[i].buffer_ptr =
+ lio_map_ring(droq->recv_buf_list[i].buffer);
+ }
+
+ lio_droq_reset_indices(droq);
+
+ lio_droq_compute_max_packet_bufs(droq);
+
+ return 0;
+}
+
+static void
lio_dma_zone_free(struct lio_device *lio_dev, const struct rte_memzone *mz)
{
const struct rte_memzone *mz_tmp;
@@ -75,6 +165,7 @@
lio_dev_dbg(lio_dev, "OQ[%d]\n", q_no);
+ lio_droq_destroy_ring_buffers(droq);
rte_free(droq->recv_buf_list);
droq->recv_buf_list = NULL;
lio_dma_zone_free(lio_dev, droq->info_mz);
@@ -172,10 +263,15 @@
goto init_droq_fail;
}
+ if (lio_droq_setup_ring_buffers(lio_dev, droq))
+ goto init_droq_fail;
+
droq->refill_threshold = c_refill_threshold;
rte_spinlock_init(&droq->lock);
+ lio_dev->fn_list.setup_oq_regs(lio_dev, q_no);
+
lio_dev->io_qmask.oq |= (1ULL << q_no);
return 0;
@@ -495,6 +495,26 @@ enum {
}
}
+static inline uint64_t
+lio_map_ring(void *buf)
+{
+ phys_addr_t dma_addr;
+
+ dma_addr = rte_mbuf_data_dma_addr_default(((struct rte_mbuf *)buf));
+
+ return (uint64_t)dma_addr;
+}
+
+static inline uint64_t
+lio_map_ring_info(struct lio_droq *droq, uint32_t i)
+{
+ phys_addr_t dma_addr;
+
+ dma_addr = droq->info_list_dma + (i * LIO_DROQ_INFO_SIZE);
+
+ return (uint64_t)dma_addr;
+}
+
/* Macro to increment index.
* Index is incremented by count; if the sum exceeds
* max, index is wrapped-around to the start.
@@ -306,6 +306,7 @@ struct lio_io_enable {
struct lio_fn_list {
void (*setup_iq_regs)(struct lio_device *, uint32_t);
+ void (*setup_oq_regs)(struct lio_device *, uint32_t);
int (*setup_mbox)(struct lio_device *);
void (*free_mbox)(struct lio_device *);