@@ -109,6 +109,22 @@ static const efx_ev_ops_t __efx_ev_ef10_ops = {
};
#endif /* EFX_OPTS_EF10() */
+#if EFSYS_OPT_RIVERHEAD
+static const efx_ev_ops_t __efx_ev_rhead_ops = {
+ rhead_ev_init, /* eevo_init */
+ rhead_ev_fini, /* eevo_fini */
+ rhead_ev_qcreate, /* eevo_qcreate */
+ rhead_ev_qdestroy, /* eevo_qdestroy */
+ rhead_ev_qprime, /* eevo_qprime */
+ rhead_ev_qpost, /* eevo_qpost */
+ rhead_ev_qpoll, /* eevo_qpoll */
+ rhead_ev_qmoderate, /* eevo_qmoderate */
+#if EFSYS_OPT_QSTATS
+ rhead_ev_qstats_update, /* eevo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_RIVERHEAD */
+
__checkReturn efx_rc_t
efx_ev_init(
@@ -150,6 +166,12 @@ efx_ev_init(
break;
#endif /* EFSYS_OPT_MEDFORD2 */
+#if EFSYS_OPT_RIVERHEAD
+ case EFX_FAMILY_RIVERHEAD:
+ eevop = &__efx_ev_rhead_ops;
+ break;
+#endif /* EFSYS_OPT_RIVERHEAD */
+
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
@@ -1408,7 +1408,7 @@ efx_mcdi_get_workarounds(
__out_opt uint32_t *implementedp,
__out_opt uint32_t *enabledp);
-#if EFX_OPTS_EF10()
+#if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
LIBEFX_INTERNAL
extern __checkReturn efx_rc_t
@@ -1428,7 +1428,7 @@ efx_mcdi_fini_evq(
__in efx_nic_t *enp,
__in uint32_t instance);
-#endif /* EFX_OPTS_EF10() */
+#endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
#endif /* EFSYS_OPT_MCDI */
@@ -2443,7 +2443,20 @@ efx_mcdi_phy_module_get_info(
return (rc);
}
+#if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
+
+#define INIT_EVQ_MAXNBUFS MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM
+
#if EFX_OPTS_EF10()
+# if (INIT_EVQ_MAXNBUFS < EF10_EVQ_MAXNBUFS)
+# error "INIT_EVQ_MAXNBUFS too small"
+# endif
+#endif /* EFX_OPTS_EF10 */
+#if EFSYS_OPT_RIVERHEAD
+# if (INIT_EVQ_MAXNBUFS < RHEAD_EVQ_MAXNBUFS)
+# error "INIT_EVQ_MAXNBUFS too small"
+# endif
+#endif /* EFSYS_OPT_RIVERHEAD */
__checkReturn efx_rc_t
efx_mcdi_init_evq(
@@ -2459,7 +2472,7 @@ efx_mcdi_init_evq(
const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
efx_mcdi_req_t req;
EFX_MCDI_DECLARE_BUF(payload,
- MC_CMD_INIT_EVQ_V2_IN_LEN(EF10_EVQ_MAXNBUFS),
+ MC_CMD_INIT_EVQ_V2_IN_LEN(INIT_EVQ_MAXNBUFS),
MC_CMD_INIT_EVQ_V2_OUT_LEN);
boolean_t interrupting;
int ev_cut_through;
@@ -2472,7 +2485,7 @@ efx_mcdi_init_evq(
efx_rc_t rc;
npages = efx_evq_nbufs(enp, nevs);
- if (npages > EF10_EVQ_MAXNBUFS) {
+ if (npages > INIT_EVQ_MAXNBUFS) {
rc = EINVAL;
goto fail1;
}
@@ -2667,6 +2680,6 @@ efx_mcdi_fini_evq(
return (rc);
}
-#endif /* EFX_OPTS_EF10() */
+#endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
#endif /* EFSYS_OPT_MCDI */
@@ -52,6 +52,7 @@ sources = [
'hunt_nic.c',
'medford_nic.c',
'medford2_nic.c',
+ 'rhead_ev.c',
'rhead_intr.c',
'rhead_nic.c',
]
new file mode 100644
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright(c) 2019-2020 Xilinx, Inc.
+ * Copyright(c) 2018-2019 Solarflare Communications Inc.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_RIVERHEAD
+
+/*
+ * Non-interrupting event queue requires interrupting event queue to
+ * refer to for wake-up events even if wake ups are never used.
+ * It could be even non-allocated event queue.
+ */
+#define EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX (0)
+
+
+ __checkReturn efx_rc_t
+rhead_ev_init(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return (0);
+}
+
+ void
+rhead_ev_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+ __checkReturn efx_rc_t
+rhead_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __in efx_evq_t *eep)
+{
+ uint32_t irq;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */
+
+ /* Set up the handler table */
+ eep->ee_rx = NULL; /* FIXME */
+ eep->ee_tx = NULL; /* FIXME */
+ eep->ee_driver = NULL; /* FIXME */
+ eep->ee_drv_gen = NULL; /* FIXME */
+ eep->ee_mcdi = NULL; /* FIXME */
+
+ /* Set up the event queue */
+ /* INIT_EVQ expects function-relative vector number */
+ if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
+ EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
+ irq = index;
+ } else if (index == EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX) {
+ irq = index;
+ flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
+ EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
+ } else {
+ irq = EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX;
+ }
+
+ /*
+ * Interrupts may be raised for events immediately after the queue is
+ * created. See bug58606.
+ */
+ rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags,
+ B_FALSE);
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+rhead_ev_qdestroy(
+ __in efx_evq_t *eep)
+{
+ efx_nic_t *enp = eep->ee_enp;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_RIVERHEAD);
+
+ (void) efx_mcdi_fini_evq(enp, eep->ee_index);
+}
+
+ __checkReturn efx_rc_t
+rhead_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ uint32_t rptr;
+ efx_dword_t dword;
+
+ rptr = count & eep->ee_mask;
+
+ EFX_POPULATE_DWORD_2(dword, ERF_GZ_EVQ_ID, eep->ee_index,
+ ERF_GZ_IDX, rptr);
+ /* EVQ_INT_PRIME lives function control window only on Riverhead */
+ EFX_BAR_WRITED(enp, ER_GZ_EVQ_INT_PRIME, &dword, B_FALSE);
+
+ return (0);
+}
+
+ void
+rhead_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data)
+{
+ _NOTE(ARGUNUSED(eep, data))
+
+ /* Not implemented yet */
+ EFSYS_ASSERT(B_FALSE);
+}
+
+/*
+ * Poll event queue in batches. Size of the batch is equal to cache line
+ * size divided by event size.
+ *
+ * Event queue is written by NIC and read by CPU. If CPU starts reading
+ * of events on the cache line, read all remaining events in a tight
+ * loop while event is present.
+ */
+#define EF100_EV_BATCH 8
+
+/*
+ * Check if event is present.
+ *
+ * Riverhead EvQs use a phase bit to indicate the presence of valid events,
+ * by flipping the phase bit on each wrap of the write index.
+ */
+#define EF100_EV_PRESENT(_qword, _phase_bit) \
+ (EFX_QWORD_FIELD((_qword), ESF_GZ_EV_EVQ_PHASE) == _phase_bit)
+
+ void
+rhead_ev_qpoll(
+ __in efx_evq_t *eep,
+ __inout unsigned int *countp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_qword_t ev[EF100_EV_BATCH];
+ unsigned int batch;
+ unsigned int phase_bit;
+ unsigned int total;
+ unsigned int count;
+ unsigned int index;
+ size_t offset;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+ EFSYS_ASSERT(countp != NULL);
+ EFSYS_ASSERT(eecp != NULL);
+
+ count = *countp;
+ do {
+ /* Read up until the end of the batch period */
+ batch = EF100_EV_BATCH - (count & (EF100_EV_BATCH - 1));
+ phase_bit = (count & (eep->ee_mask + 1)) != 0;
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ for (total = 0; total < batch; ++total) {
+ EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
+
+ if (!EF100_EV_PRESENT(ev[total], phase_bit))
+ break;
+
+ EFSYS_PROBE3(event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
+
+ offset += sizeof (efx_qword_t);
+ }
+
+ /* Process the batch of events */
+ for (index = 0; index < total; ++index) {
+ boolean_t should_abort;
+ uint32_t code;
+
+ EFX_EV_QSTAT_INCR(eep, EV_ALL);
+
+ code = EFX_QWORD_FIELD(ev[index], ESF_GZ_E_TYPE);
+ switch (code) {
+ default:
+ EFSYS_PROBE3(bad_event,
+ unsigned int, eep->ee_index,
+ uint32_t,
+ EFX_QWORD_FIELD(ev[index], EFX_DWORD_1),
+ uint32_t,
+ EFX_QWORD_FIELD(ev[index], EFX_DWORD_0));
+
+ EFSYS_ASSERT(eecp->eec_exception != NULL);
+ (void) eecp->eec_exception(arg,
+ EFX_EXCEPTION_EV_ERROR, code);
+ should_abort = B_TRUE;
+ }
+ if (should_abort) {
+ /* Ignore subsequent events */
+ total = index + 1;
+
+ /*
+ * Poison batch to ensure the outer
+ * loop is broken out of.
+ */
+ EFSYS_ASSERT(batch <= EF100_EV_BATCH);
+ batch += (EF100_EV_BATCH << 1);
+ EFSYS_ASSERT(total != batch);
+ break;
+ }
+ }
+
+ /*
+ * There is no necessity to clear processed events since
+ * phase bit which is flipping on each write index wrap
+ * is used for event presence indication.
+ */
+
+ count += total;
+
+ } while (total == batch);
+
+ *countp = count;
+}
+
+ __checkReturn efx_rc_t
+rhead_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us)
+{
+ _NOTE(ARGUNUSED(eep, us))
+
+ return (ENOTSUP);
+}
+
+
+#if EFSYS_OPT_QSTATS
+ void
+rhead_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
+{
+ unsigned int id;
+
+ for (id = 0; id < EV_NQSTATS; id++) {
+ efsys_stat_t *essp = &stat[id];
+
+ EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
+ eep->ee_stat[id] = 0;
+ }
+}
+#endif /* EFSYS_OPT_QSTATS */
+
+#endif /* EFSYS_OPT_RIVERHEAD */
@@ -12,6 +12,13 @@ extern "C" {
#endif
+/*
+ * Riverhead requires physically contiguous event rings (so, just one
+ * DMA address is sufficient to represent it), but MCDI interface is still
+ * in terms of 4k size 4k-aligned DMA buffers.
+ */
+#define RHEAD_EVQ_MAXNBUFS 32
+
#define RHEAD_EVQ_MAXNEVS 16384
#define RHEAD_EVQ_MINNEVS 256
@@ -98,6 +105,72 @@ rhead_nic_unprobe(
__in efx_nic_t *enp);
+/* EV */
+
+LIBEFX_INTERNAL
+extern __checkReturn efx_rc_t
+rhead_ev_init(
+ __in efx_nic_t *enp);
+
+LIBEFX_INTERNAL
+extern void
+rhead_ev_fini(
+ __in efx_nic_t *enp);
+
+LIBEFX_INTERNAL
+extern __checkReturn efx_rc_t
+rhead_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __in efx_evq_t *eep);
+
+LIBEFX_INTERNAL
+extern void
+rhead_ev_qdestroy(
+ __in efx_evq_t *eep);
+
+LIBEFX_INTERNAL
+extern __checkReturn efx_rc_t
+rhead_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+LIBEFX_INTERNAL
+extern void
+rhead_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data);
+
+LIBEFX_INTERNAL
+extern void
+rhead_ev_qpoll(
+ __in efx_evq_t *eep,
+ __inout unsigned int *countp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+LIBEFX_INTERNAL
+extern __checkReturn efx_rc_t
+rhead_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us);
+
+#if EFSYS_OPT_QSTATS
+
+LIBEFX_INTERNAL
+extern void
+rhead_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
+
+#endif /* EFSYS_OPT_QSTATS */
+
+
/* INTR */
LIBEFX_INTERNAL