new file mode 100644
@@ -0,0 +1,670 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <errno.h>
+#include <assert.h>
+#include <unistd.h>
+#include <string.h>
+
+#include <rte_malloc.h>
+#include <rte_errno.h>
+
+#include "base/dlb_resource.h"
+#include "base/dlb_osdep.h"
+#include "base/dlb_regs.h"
+#include "dlb_main.h"
+#include "../dlb_user.h"
+
+
+#define PF_ID_ZERO 0 /* PF ONLY! */
+#define NO_OWNER_VF 0 /* PF ONLY! */
+#define NOT_VF_REQ false /* PF ONLY! */
+
+unsigned int dlb_unregister_timeout_s = DLB_DEFAULT_UNREGISTER_TIMEOUT_S;
+
+#define DLB_PCI_CFG_SPACE_SIZE 256
+#define DLB_PCI_CAP_POINTER 0x34
+#define DLB_PCI_CAP_NEXT(hdr) (((hdr) >> 8) & 0xFC)
+#define DLB_PCI_CAP_ID(hdr) ((hdr) & 0xFF)
+#define DLB_PCI_EXT_CAP_NEXT(hdr) (((hdr) >> 20) & 0xFFC)
+#define DLB_PCI_EXT_CAP_ID(hdr) ((hdr) & 0xFFFF)
+#define DLB_PCI_EXT_CAP_ID_ERR 1
+#define DLB_PCI_ERR_UNCOR_MASK 8
+#define DLB_PCI_ERR_UNC_UNSUP 0x00100000
+
+#define DLB_PCI_EXP_DEVCTL 8
+#define DLB_PCI_LNKCTL 16
+#define DLB_PCI_SLTCTL 24
+#define DLB_PCI_RTCTL 28
+#define DLB_PCI_EXP_DEVCTL2 40
+#define DLB_PCI_LNKCTL2 48
+#define DLB_PCI_SLTCTL2 56
+#define DLB_PCI_CMD 4
+#define DLB_PCI_X_CMD 2
+#define DLB_PCI_EXP_DEVSTA 10
+#define DLB_PCI_EXP_DEVSTA_TRPND 0x20
+#define DLB_PCI_EXP_DEVCTL_BCR_FLR 0x8000
+#define DLB_PCI_PASID_CTRL 6
+#define DLB_PCI_PASID_CAP 4
+
+#define DLB_PCI_CAP_ID_EXP 0x10
+#define DLB_PCI_CAP_ID_MSIX 0x11
+#define DLB_PCI_EXT_CAP_ID_PAS 0x1B
+#define DLB_PCI_EXT_CAP_ID_PRI 0x13
+#define DLB_PCI_EXT_CAP_ID_ACS 0xD
+
+#define DLB_PCI_PASID_CAP_EXEC 0x2
+#define DLB_PCI_PASID_CAP_PRIV 0x4
+#define DLB_PCI_PASID_CTRL_ENABLE 0x1
+#define DLB_PCI_PRI_CTRL_ENABLE 0x1
+#define DLB_PCI_PRI_ALLOC_REQ 0xC
+#define DLB_PCI_PRI_CTRL 0x4
+#define DLB_PCI_MSIX_FLAGS 0x2
+#define DLB_PCI_MSIX_FLAGS_ENABLE 0x8000
+#define DLB_PCI_MSIX_FLAGS_MASKALL 0x4000
+#define DLB_PCI_ERR_ROOT_STATUS 0x30
+#define DLB_PCI_ERR_COR_STATUS 0x10
+#define DLB_PCI_ERR_UNCOR_STATUS 0x4
+#define DLB_PCI_COMMAND_INTX_DISABLE 0x400
+#define DLB_PCI_ACS_CAP 0x4
+#define DLB_PCI_ACS_CTRL 0x6
+#define DLB_PCI_ACS_SV 0x1
+#define DLB_PCI_ACS_RR 0x4
+#define DLB_PCI_ACS_CR 0x8
+#define DLB_PCI_ACS_UF 0x10
+#define DLB_PCI_ACS_EC 0x20
+
+static inline void
+dlb_movntdq(void *qe4, void *pp_addr)
+{
+ /* Move entire 64B cache line of QEs, 128 bits (16B) at a time. */
+ long long *_qe = (long long *)qe4;
+ __v2di src_data0 = (__v2di){_qe[0], _qe[1]};
+ __v2di src_data1 = (__v2di){_qe[2], _qe[3]};
+ __v2di src_data2 = (__v2di){_qe[4], _qe[5]};
+ __v2di src_data3 = (__v2di){_qe[6], _qe[7]};
+ __builtin_ia32_movntdq((__v2di *)pp_addr + 0, (__v2di)src_data0);
+ rte_wmb();
+ __builtin_ia32_movntdq((__v2di *)pp_addr + 1, (__v2di)src_data1);
+ rte_wmb();
+ __builtin_ia32_movntdq((__v2di *)pp_addr + 2, (__v2di)src_data2);
+ rte_wmb();
+ __builtin_ia32_movntdq((__v2di *)pp_addr + 3, (__v2di)src_data3);
+ rte_wmb();
+}
+
+static inline void
+dlb_movdir64b(void *qe4, void *pp_addr)
+{
+ asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
+ :
+ : "a" (pp_addr), "d" (qe4));
+}
+
+
+static int dlb_pci_find_ext_capability(struct rte_pci_device *pdev, uint32_t id)
+{
+ uint32_t hdr;
+ size_t sz;
+ int pos;
+
+ pos = DLB_PCI_CFG_SPACE_SIZE;
+ sz = sizeof(hdr);
+
+ while (pos > 0xFF) {
+ if (rte_pci_read_config(pdev, &hdr, sz, pos) != (int)sz)
+ return -1;
+
+ if (DLB_PCI_EXT_CAP_ID(hdr) == id)
+ return pos;
+
+ pos = DLB_PCI_EXT_CAP_NEXT(hdr);
+ }
+
+ return -1;
+}
+
+static int dlb_pci_find_capability(struct rte_pci_device *pdev, uint32_t id)
+{
+ uint8_t pos;
+ int ret;
+ uint16_t hdr;
+
+ ret = rte_pci_read_config(pdev, &pos, 1, DLB_PCI_CAP_POINTER);
+ pos &= 0xFC;
+
+ if (ret != 1)
+ return -1;
+
+ while (pos > 0x3F) {
+ ret = rte_pci_read_config(pdev, &hdr, 2, pos);
+ if (ret != 2)
+ return -1;
+
+ if (DLB_PCI_CAP_ID(hdr) == id)
+ return pos;
+
+ if (DLB_PCI_CAP_ID(hdr) == 0xFF)
+ return -1;
+
+ pos = DLB_PCI_CAP_NEXT(hdr);
+ }
+
+ return -1;
+}
+
+static int dlb_mask_ur_err(struct rte_pci_device *pdev)
+{
+ uint32_t mask;
+ size_t sz = sizeof(mask);
+ int pos = dlb_pci_find_ext_capability(pdev, DLB_PCI_EXT_CAP_ID_ERR);
+
+ if (pos < 0) {
+ printf("[%s()] failed to find the aer capability\n",
+ __func__);
+ return pos;
+ }
+
+ pos += DLB_PCI_ERR_UNCOR_MASK;
+
+ if (rte_pci_read_config(pdev, &mask, sz, pos) != (int)sz) {
+ printf("[%s()] Failed to read uncorrectable error mask reg\n",
+ __func__);
+ return -1;
+ }
+
+ /* Mask Unsupported Request errors */
+ mask |= DLB_PCI_ERR_UNC_UNSUP;
+
+ if (rte_pci_write_config(pdev, &mask, sz, pos) != (int)sz) {
+ printf("[%s()] Failed to write uncorrectable error mask reg at offset %d\n",
+ __func__, pos);
+ return -1;
+ }
+
+ return 0;
+}
+
+struct dlb_dev *
+dlb_probe(struct rte_pci_device *pdev)
+{
+ struct dlb_dev *dlb_dev;
+ int ret = 0;
+
+ DLB_INFO(dlb_dev, "probe\n");
+
+ dlb_dev = rte_malloc("DLB_PF", sizeof(struct dlb_dev),
+ RTE_CACHE_LINE_SIZE);
+
+ if (!dlb_dev) {
+ ret = -ENOMEM;
+ goto dlb_dev_malloc_fail;
+ }
+
+ /* PCI Bus driver has already mapped bar space into process.
+ * Save off our IO register and FUNC addresses.
+ */
+
+ /* BAR 0 */
+ if (pdev->mem_resource[0].addr == NULL) {
+ DLB_ERR(dlb_dev, "probe: BAR 0 addr (csr_kva) is NULL\n");
+ ret = -EINVAL;
+ goto pci_mmap_bad_addr;
+ }
+ dlb_dev->hw.func_kva = (void *)(uintptr_t)pdev->mem_resource[0].addr;
+ dlb_dev->hw.func_phys_addr = pdev->mem_resource[0].phys_addr;
+
+ DLB_INFO(dlb_dev, "DLB FUNC VA=%p, PA=%p, len=%"PRIu64"\n",
+ (void *)dlb_dev->hw.func_kva,
+ (void *)dlb_dev->hw.func_phys_addr,
+ pdev->mem_resource[0].len);
+
+ /* BAR 2 */
+ if (pdev->mem_resource[2].addr == NULL) {
+ DLB_ERR(dlb_dev, "probe: BAR 2 addr (func_kva) is NULL\n");
+ ret = -EINVAL;
+ goto pci_mmap_bad_addr;
+ }
+ dlb_dev->hw.csr_kva = (void *)(uintptr_t)pdev->mem_resource[2].addr;
+ dlb_dev->hw.csr_phys_addr = pdev->mem_resource[2].phys_addr;
+
+ DLB_INFO(dlb_dev, "DLB CSR VA=%p, PA=%p, len=%"PRIu64"\n",
+ (void *)dlb_dev->hw.csr_kva,
+ (void *)dlb_dev->hw.csr_phys_addr,
+ pdev->mem_resource[2].len);
+
+ dlb_dev->pdev = pdev;
+
+ ret = dlb_pf_reset(dlb_dev);
+ if (ret)
+ goto dlb_reset_fail;
+
+ /* DLB incorrectly sends URs in response to certain messages. Mask UR
+ * errors to prevent these from being propagated to the MCA.
+ */
+ ret = dlb_mask_ur_err(pdev);
+ if (ret)
+ goto mask_ur_err_fail;
+
+ ret = dlb_pf_init_interrupts(dlb_dev);
+ if (ret)
+ goto init_interrupts_fail;
+
+ ret = dlb_pf_init_driver_state(dlb_dev);
+ if (ret)
+ goto init_driver_state_fail;
+
+ ret = dlb_resource_init(&dlb_dev->hw);
+ if (ret)
+ goto resource_init_fail;
+
+ dlb_dev->revision = os_get_dev_revision(&dlb_dev->hw);
+
+ dlb_pf_init_hardware(dlb_dev);
+
+ return dlb_dev;
+
+resource_init_fail:
+ dlb_resource_free(&dlb_dev->hw);
+init_driver_state_fail:
+ dlb_pf_free_interrupts(dlb_dev);
+init_interrupts_fail:
+mask_ur_err_fail:
+dlb_reset_fail:
+pci_mmap_bad_addr:
+ rte_free(dlb_dev);
+dlb_dev_malloc_fail:
+ rte_errno = ret;
+ return NULL;
+}
+
+int
+dlb_pf_reset(struct dlb_dev *dlb_dev)
+{
+ int ret = 0;
+ int i = 0;
+ uint32_t dword[16];
+ uint16_t cmd;
+ off_t off;
+
+ uint16_t dev_ctl_word;
+ uint16_t dev_ctl2_word;
+ uint16_t lnk_word;
+ uint16_t lnk_word2;
+ uint16_t slt_word;
+ uint16_t slt_word2;
+ uint16_t rt_ctl_word;
+ uint32_t pri_reqs_dword;
+ uint16_t pri_ctrl_word;
+
+ int pcie_cap_offset;
+ int pasid_cap_offset;
+ int pri_cap_offset;
+ int msix_cap_offset;
+ int err_cap_offset;
+ int acs_cap_offset;
+ int wait_count;
+
+ uint16_t devsta_busy_word;
+ uint16_t devctl_word;
+ uint16_t pasid_ctrl_word;
+ uint16_t pasid_features;
+
+ struct rte_pci_device *pdev = dlb_dev->pdev;
+
+ /* Save PCI config state */
+
+ for (i = 0; i < 16; i++) {
+ if (rte_pci_read_config(pdev, &dword[i], 4, i * 4) != 4)
+ return ret;
+ }
+
+ pcie_cap_offset = dlb_pci_find_capability(pdev, DLB_PCI_CAP_ID_EXP);
+
+ if (pcie_cap_offset < 0) {
+ printf("[%s()] failed to find the pcie capability\n",
+ __func__);
+ return pcie_cap_offset;
+ }
+
+ off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL;
+ if (rte_pci_read_config(pdev, &dev_ctl_word, 2, off) != 2)
+ dev_ctl_word = 0;
+
+ off = pcie_cap_offset + DLB_PCI_LNKCTL;
+ if (rte_pci_read_config(pdev, &lnk_word, 2, off) != 2)
+ lnk_word = 0;
+
+ off = pcie_cap_offset + DLB_PCI_SLTCTL;
+ if (rte_pci_read_config(pdev, &slt_word, 2, off) != 2)
+ slt_word = 0;
+
+ off = pcie_cap_offset + DLB_PCI_RTCTL;
+ if (rte_pci_read_config(pdev, &rt_ctl_word, 2, off) != 2)
+ rt_ctl_word = 0;
+
+ off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL2;
+ if (rte_pci_read_config(pdev, &dev_ctl2_word, 2, off) != 2)
+ dev_ctl2_word = 0;
+
+ off = pcie_cap_offset + DLB_PCI_LNKCTL2;
+ if (rte_pci_read_config(pdev, &lnk_word2, 2, off) != 2)
+ lnk_word2 = 0;
+
+ off = pcie_cap_offset + DLB_PCI_SLTCTL2;
+ if (rte_pci_read_config(pdev, &slt_word2, 2, off) != 2)
+ slt_word2 = 0;
+
+ pri_cap_offset = dlb_pci_find_ext_capability(pdev,
+ DLB_PCI_EXT_CAP_ID_PRI);
+ if (pri_cap_offset >= 0) {
+ off = pri_cap_offset + DLB_PCI_PRI_ALLOC_REQ;
+ if (rte_pci_read_config(pdev, &pri_reqs_dword, 4, off) != 4)
+ pri_reqs_dword = 0;
+ }
+
+ /* clear the PCI command register before issuing the FLR */
+
+ off = DLB_PCI_CMD;
+ cmd = 0;
+ if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
+ printf("[%s()] failed to write pci config space at offset %d\n",
+ __func__, (int)off);
+ return -1;
+ }
+
+ /* issue the FLR */
+ for (wait_count = 0; wait_count < 4; wait_count++) {
+ int sleep_time;
+
+ off = pcie_cap_offset + DLB_PCI_EXP_DEVSTA;
+ ret = rte_pci_read_config(pdev, &devsta_busy_word, 2, off);
+ if (ret != 2) {
+ printf("[%s()] failed to read the pci device status\n",
+ __func__);
+ return ret;
+ }
+
+ if (!(devsta_busy_word & DLB_PCI_EXP_DEVSTA_TRPND))
+ break;
+
+ sleep_time = (1 << (wait_count)) * 100;
+ rte_delay_ms(sleep_time);
+ }
+
+ if (wait_count == 4) {
+ printf("[%s()] wait for pci pending transactions timed out\n",
+ __func__);
+ return -1;
+ }
+
+ off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL;
+ ret = rte_pci_read_config(pdev, &devctl_word, 2, off);
+ if (ret != 2) {
+ printf("[%s()] failed to read the pcie device control\n",
+ __func__);
+ return ret;
+ }
+
+ devctl_word |= DLB_PCI_EXP_DEVCTL_BCR_FLR;
+
+ if (rte_pci_write_config(pdev, &devctl_word, 2, off) != 2) {
+ printf("[%s()] failed to write the pcie device control at offset %d\n",
+ __func__, (int)off);
+ return -1;
+ }
+
+ rte_delay_ms(100);
+
+ /* Restore PCI config state */
+
+ if (pcie_cap_offset >= 0) {
+ off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL;
+ if (rte_pci_write_config(pdev, &dev_ctl_word, 2, off) != 2) {
+ printf("[%s()] failed to write the pcie device control at offset %d\n",
+ __func__, (int)off);
+ return -1;
+ }
+
+ off = pcie_cap_offset + DLB_PCI_LNKCTL;
+ if (rte_pci_write_config(pdev, &lnk_word, 2, off) != 2) {
+ printf("[%s()] failed to write pci config space at offset %d\n",
+ __func__, (int)off);
+ return -1;
+ }
+
+ off = pcie_cap_offset + DLB_PCI_SLTCTL;
+ if (rte_pci_write_config(pdev, &slt_word, 2, off) != 2) {
+ printf("[%s()] failed to write pci config space at offset %d\n",
+ __func__, (int)off);
+ return -1;
+ }
+
+ off = pcie_cap_offset + DLB_PCI_RTCTL;
+ if (rte_pci_write_config(pdev, &rt_ctl_word, 2, off) != 2) {
+ printf("[%s()] failed to write pci config space at offset %d\n",
+ __func__, (int)off);
+ return -1;
+ }
+
+ off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL2;
+ if (rte_pci_write_config(pdev, &dev_ctl2_word, 2, off) != 2) {
+ printf("[%s()] failed to write pci config space at offset %d\n",
+ __func__, (int)off);
+ return -1;
+ }
+
+ off = pcie_cap_offset + DLB_PCI_LNKCTL2;
+ if (rte_pci_write_config(pdev, &lnk_word2, 2, off) != 2) {
+ printf("[%s()] failed to write pci config space at offset %d\n",
+ __func__, (int)off);
+ return -1;
+ }
+
+ off = pcie_cap_offset + DLB_PCI_SLTCTL2;
+ if (rte_pci_write_config(pdev, &slt_word2, 2, off) != 2) {
+ printf("[%s()] failed to write pci config space at offset %d\n",
+ __func__, (int)off);
+ return -1;
+ }
+ }
+
+ pasid_cap_offset = dlb_pci_find_ext_capability(pdev,
+ DLB_PCI_EXT_CAP_ID_PAS);
+ if (pasid_cap_offset >= 0) {
+ off = pasid_cap_offset + DLB_PCI_PASID_CAP;
+ if (rte_pci_read_config(pdev, &pasid_features, 2, off) != 2)
+ pasid_features = 0;
+
+ pasid_features &= DLB_PCI_PASID_CAP_EXEC;
+ pasid_features &= DLB_PCI_PASID_CAP_PRIV;
+ pasid_ctrl_word = DLB_PCI_PASID_CTRL_ENABLE | pasid_features;
+
+ off = pasid_cap_offset + DLB_PCI_PASID_CTRL;
+ if (rte_pci_write_config(pdev, &pasid_ctrl_word, 2, off) != 2) {
+ printf("[%s()] failed to write pci config space at offset %d\n",
+ __func__, (int)off);
+ return -1;
+ }
+ }
+
+ if (pri_cap_offset >= 0) {
+ pri_ctrl_word = DLB_PCI_PRI_CTRL_ENABLE;
+
+ off = pri_cap_offset + DLB_PCI_PRI_ALLOC_REQ;
+ if (rte_pci_write_config(pdev, &pri_reqs_dword, 4, off) != 4) {
+ printf("[%s()] failed to write pci config space at offset %d\n",
+ __func__, (int)off);
+ return -1;
+ }
+
+ off = pri_cap_offset + DLB_PCI_PRI_CTRL;
+ if (rte_pci_write_config(pdev, &pri_ctrl_word, 2, off) != 2) {
+ printf("[%s()] failed to write pci config space at offset %d\n",
+ __func__, (int)off);
+ return -1;
+ }
+ }
+
+ err_cap_offset = dlb_pci_find_ext_capability(pdev,
+ DLB_PCI_EXT_CAP_ID_ERR);
+ if (err_cap_offset >= 0) {
+ uint32_t tmp;
+
+ off = err_cap_offset + DLB_PCI_ERR_ROOT_STATUS;
+ if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
+ tmp = 0;
+
+ if (rte_pci_write_config(pdev, &tmp, 4, off) != 4) {
+ printf("[%s()] failed to write pci config space at offset %d\n",
+ __func__, (int)off);
+ return -1;
+ }
+
+ off = err_cap_offset + DLB_PCI_ERR_COR_STATUS;
+ if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
+ tmp = 0;
+
+ if (rte_pci_write_config(pdev, &tmp, 4, off) != 4) {
+ printf("[%s()] failed to write pci config space at offset %d\n",
+ __func__, (int)off);
+ return -1;
+ }
+
+ off = err_cap_offset + DLB_PCI_ERR_UNCOR_STATUS;
+ if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
+ tmp = 0;
+
+ if (rte_pci_write_config(pdev, &tmp, 4, off) != 4) {
+ printf("[%s()] failed to write pci config space at offset %d\n",
+ __func__, (int)off);
+ return -1;
+ }
+ }
+
+ for (i = 16; i > 0; i--) {
+ off = (i - 1) * 4;
+ if (rte_pci_write_config(pdev, &dword[i - 1], 4, off) != 4) {
+ printf("[%s()] failed to write pci config space at offset %d\n",
+ __func__, (int)off);
+ return -1;
+ }
+ }
+
+ off = DLB_PCI_CMD;
+ if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
+ cmd &= ~DLB_PCI_COMMAND_INTX_DISABLE;
+ if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
+ printf("[%s()] failed to write pci config space\n",
+ __func__);
+ return -1;
+ }
+ }
+
+ msix_cap_offset = dlb_pci_find_capability(pdev, DLB_PCI_CAP_ID_MSIX);
+ if (msix_cap_offset >= 0) {
+ off = msix_cap_offset + DLB_PCI_MSIX_FLAGS;
+ if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
+ cmd |= DLB_PCI_MSIX_FLAGS_ENABLE;
+ cmd |= DLB_PCI_MSIX_FLAGS_MASKALL;
+ if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
+ printf("[%s()] failed to write msix flags\n",
+ __func__);
+ return -1;
+ }
+ }
+
+ off = msix_cap_offset + DLB_PCI_MSIX_FLAGS;
+ if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
+ cmd &= ~DLB_PCI_MSIX_FLAGS_MASKALL;
+ if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
+ printf("[%s()] failed to write msix flags\n",
+ __func__);
+ return -1;
+ }
+ }
+ }
+
+ acs_cap_offset = dlb_pci_find_ext_capability(pdev,
+ DLB_PCI_EXT_CAP_ID_ACS);
+ if (acs_cap_offset >= 0) {
+ uint16_t acs_cap, acs_ctrl, acs_mask;
+ off = acs_cap_offset + DLB_PCI_ACS_CAP;
+ if (rte_pci_read_config(pdev, &acs_cap, 2, off) != 2)
+ acs_cap = 0;
+
+ off = acs_cap_offset + DLB_PCI_ACS_CTRL;
+ if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)
+ acs_ctrl = 0;
+
+ acs_mask = DLB_PCI_ACS_SV | DLB_PCI_ACS_RR;
+ acs_mask |= (DLB_PCI_ACS_CR | DLB_PCI_ACS_UF);
+ acs_ctrl |= (acs_cap & acs_mask);
+
+ if (rte_pci_write_config(pdev, &acs_ctrl, 2, off) != 2) {
+ printf("[%s()] failed to write pci config space at offset %d\n",
+ __func__, (int)off);
+ return -1;
+ }
+
+ off = acs_cap_offset + DLB_PCI_ACS_CTRL;
+ if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)
+ acs_ctrl = 0;
+
+ acs_mask = DLB_PCI_ACS_RR | DLB_PCI_ACS_CR | DLB_PCI_ACS_EC;
+ acs_ctrl &= ~acs_mask;
+
+ off = acs_cap_offset + DLB_PCI_ACS_CTRL;
+ if (rte_pci_write_config(pdev, &acs_ctrl, 2, off) != 2) {
+ printf("[%s()] failed to write pci config space at offset %d\n",
+ __func__, (int)off);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*******************************/
+/****** Driver management ******/
+/*******************************/
+
+int
+dlb_pf_init_driver_state(struct dlb_dev *dlb_dev)
+{
+ int i;
+
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_MOVDIR64B))
+ dlb_dev->enqueue_four = dlb_movdir64b;
+ else
+ dlb_dev->enqueue_four = dlb_movntdq;
+
+ /* Initialize software state */
+ for (i = 0; i < DLB_MAX_NUM_LDB_PORTS; i++)
+ dlb_list_init_head(&dlb_dev->ldb_port_pages[i].list);
+
+ for (i = 0; i < DLB_MAX_NUM_DIR_PORTS; i++)
+ dlb_list_init_head(&dlb_dev->dir_port_pages[i].list);
+
+ rte_spinlock_init(&dlb_dev->resource_mutex);
+ rte_spinlock_init(&dlb_dev->measurement_lock);
+
+ return 0;
+}
+
+void
+dlb_pf_init_hardware(struct dlb_dev *dlb_dev)
+{
+ dlb_disable_dp_vasr_feature(&dlb_dev->hw);
+
+ dlb_enable_excess_tokens_alarm(&dlb_dev->hw);
+
+ if (dlb_dev->revision >= DLB_REV_B0) {
+ dlb_hw_enable_sparse_ldb_cq_mode(&dlb_dev->hw);
+ dlb_hw_enable_sparse_dir_cq_mode(&dlb_dev->hw);
+ }
+}
new file mode 100644
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#ifndef __DLB_MAIN_H
+#define __DLB_MAIN_H
+
+#include <rte_debug.h>
+#include <rte_log.h>
+#include <rte_spinlock.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
+#endif
+
+#include "base/dlb_hw_types.h"
+#include "../dlb_user.h"
+
+#define DLB_DEFAULT_UNREGISTER_TIMEOUT_S 5
+
+struct dlb_dev;
+
+struct dlb_port_page {
+ struct dlb_list_head list;
+ unsigned long offs;
+ struct iova *iova;
+ struct page *page;
+ uintptr_t dma_addr;
+ unsigned long dma_size;
+ int refcnt;
+};
+
+struct dlb_port_memory {
+ struct dlb_list_head list;
+ struct dlb_port_page *pages;
+ void *cq_base;
+ uintptr_t cq_dma_base;
+ void *pc_base;
+ uintptr_t pc_dma_base;
+ int domain_id;
+ bool valid;
+};
+
+struct dlb_dev {
+ struct rte_pci_device *pdev;
+ struct dlb_hw hw;
+ /* struct list_head list; */
+ struct device *dlb_device;
+ struct dlb_port_memory ldb_port_pages[DLB_MAX_NUM_LDB_PORTS];
+ struct dlb_port_memory dir_port_pages[DLB_MAX_NUM_DIR_PORTS];
+ /* The enqueue_four function enqueues four HCWs (one cache-line worth)
+ * to the DLB, using whichever mechanism is supported by the platform
+ * on which this driver is running.
+ */
+ void (*enqueue_four)(void *qe4, void *pp_addr);
+ bool domain_reset_failed;
+ /* The resource mutex serializes access to driver data structures and
+ * hardware registers.
+ */
+ rte_spinlock_t resource_mutex;
+ rte_spinlock_t measurement_lock;
+ bool worker_launched;
+ u8 revision;
+};
+
+struct dlb_dev *dlb_probe(struct rte_pci_device *pdev);
+void dlb_reset_done(struct dlb_dev *dlb_dev);
+
+/* pf_ops */
+int dlb_pf_init_driver_state(struct dlb_dev *dev);
+void dlb_pf_free_driver_state(struct dlb_dev *dev);
+int dlb_pf_init_interrupts(struct dlb_dev *dev);
+int dlb_hw_enable_ldb_cq_interrupts(struct dlb_dev *dev,
+ int port_id,
+ u16 thresh);
+int dlb_hw_enable_dir_cq_interrupts(struct dlb_dev *dev,
+ int port_id,
+ u16 thresh);
+int dlb_pf_arm_cq_interrupt(struct dlb_dev *dev,
+ int domain_id,
+ int port_id,
+ bool is_ldb);
+void dlb_pf_reinit_interrupts(struct dlb_dev *dev);
+void dlb_pf_free_interrupts(struct dlb_dev *dev);
+void dlb_pf_init_hardware(struct dlb_dev *dev);
+int dlb_pf_reset(struct dlb_dev *dlb_dev);
+
+#endif /* __DLB_MAIN_H */
new file mode 100644
@@ -0,0 +1,839 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <sys/mman.h>
+#include <sys/fcntl.h>
+#include <sys/time.h>
+#include <errno.h>
+#include <assert.h>
+#include <unistd.h>
+#include <string.h>
+#include <rte_debug.h>
+#include <rte_log.h>
+#include <rte_dev.h>
+#include <rte_devargs.h>
+#include <rte_mbuf.h>
+#include <rte_ring.h>
+#include <rte_errno.h>
+#include <rte_kvargs.h>
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_eventdev.h>
+#include <rte_eventdev_pmd.h>
+#include <rte_eventdev_pmd_pci.h>
+#include <rte_memory.h>
+#include <rte_string_fns.h>
+
+#include "../dlb_priv.h"
+#include "../dlb_iface.h"
+#include "../dlb_inline_fns.h"
+#include "dlb_main.h"
+#include "base/dlb_hw_types.h"
+#include "base/dlb_osdep.h"
+#include "base/dlb_resource.h"
+
+extern struct dlb_dev *dlb_probe(struct rte_pci_device *pdev);
+extern struct process_local_port_data dlb_port[][NUM_DLB_PORT_TYPES];
+
+static const char *event_dlb_pf_name = "dlb_event"; /* Same as vdev name */
+
+static void
+dlb_pf_low_level_io_init(struct dlb_eventdev *dlb __rte_unused)
+{
+ int i;
+
+ /* Addresses will be initialized at port create */
+ for (i = 0; i < DLB_MAX_NUM_PORTS; i++) {
+ /* First directed ports */
+
+ /* producer port */
+ dlb_port[i][DLB_DIR].pp_addr = NULL;
+
+ /* popcount */
+ dlb_port[i][DLB_DIR].ldb_popcount = NULL;
+ dlb_port[i][DLB_DIR].dir_popcount = NULL;
+
+ /* consumer queue */
+ dlb_port[i][DLB_DIR].cq_base = NULL;
+ dlb_port[i][DLB_DIR].mmaped = true;
+
+ /* Now load balanced ports */
+
+ /* producer port */
+ dlb_port[i][DLB_LDB].pp_addr = NULL;
+
+ /* popcount */
+ dlb_port[i][DLB_LDB].ldb_popcount = NULL;
+ dlb_port[i][DLB_LDB].dir_popcount = NULL;
+
+ /* consumer queue */
+ dlb_port[i][DLB_LDB].cq_base = NULL;
+ dlb_port[i][DLB_LDB].mmaped = true;
+ }
+}
+
+static int
+dlb_pf_open(struct dlb_hw_dev *handle, const char *name)
+{
+ RTE_SET_USED(handle);
+ RTE_SET_USED(name);
+
+ return 0;
+}
+
+static void
+dlb_pf_domain_close(struct dlb_eventdev *dlb)
+{
+ struct dlb_dev *dlb_dev = (struct dlb_dev *)dlb->qm_instance.pf_dev;
+ int ret;
+
+ ret = dlb_reset_domain(&dlb_dev->hw,
+ dlb->qm_instance.domain_id,
+ false,
+ 0);
+ if (ret)
+ DLB_LOG_ERR("dlb_pf_reset_domain err %d", ret);
+}
+
+static int
+dlb_pf_get_device_version(struct dlb_hw_dev *handle,
+ uint8_t *revision)
+{
+ struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+
+ *revision = dlb_dev->revision;
+
+ return 0;
+}
+
+#define PF_ID_ZERO 0 /* PF ONLY! */
+#define NO_OWNER_VF 0 /* PF ONLY! */
+#define NOT_VF_REQ false /* PF ONLY! */
+
+static int
+dlb_pf_get_num_resources(struct dlb_hw_dev *handle,
+ struct dlb_get_num_resources_args *rsrcs)
+{
+ struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+
+ return dlb_hw_get_num_resources(&dlb_dev->hw, rsrcs, false, 0);
+}
+
+static int
+dlb_pf_sched_domain_create(struct dlb_hw_dev *handle,
+ struct dlb_create_sched_domain_args *arg)
+{
+ struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+ struct dlb_cmd_response response = {0};
+ int ret;
+
+ DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+ if (dlb_dev->domain_reset_failed) {
+ response.status = DLB_ST_DOMAIN_RESET_FAILED;
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = dlb_hw_create_sched_domain(&dlb_dev->hw, arg, &response,
+ NOT_VF_REQ, PF_ID_ZERO);
+ if (ret)
+ goto done;
+
+done:
+
+ *(struct dlb_cmd_response *)arg->response = response;
+
+ DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int
+dlb_pf_ldb_credit_pool_create(struct dlb_hw_dev *handle,
+ struct dlb_create_ldb_pool_args *cfg)
+{
+ struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+ struct dlb_cmd_response response = {0};
+ int ret;
+
+ DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+ ret = dlb_hw_create_ldb_pool(&dlb_dev->hw,
+ handle->domain_id,
+ cfg,
+ &response,
+ NOT_VF_REQ,
+ PF_ID_ZERO);
+
+ *(struct dlb_cmd_response *)cfg->response = response;
+
+ DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int
+dlb_pf_dir_credit_pool_create(struct dlb_hw_dev *handle,
+ struct dlb_create_dir_pool_args *cfg)
+{
+ struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+ struct dlb_cmd_response response = {0};
+ int ret;
+
+ DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+ ret = dlb_hw_create_dir_pool(&dlb_dev->hw,
+ handle->domain_id,
+ cfg,
+ &response,
+ NOT_VF_REQ,
+ PF_ID_ZERO);
+
+ *(struct dlb_cmd_response *)cfg->response = response;
+
+ DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int
+dlb_pf_ldb_queue_create(struct dlb_hw_dev *handle,
+ struct dlb_create_ldb_queue_args *cfg)
+{
+ struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+ struct dlb_cmd_response response = {0};
+ int ret;
+
+ DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+ ret = dlb_hw_create_ldb_queue(&dlb_dev->hw,
+ handle->domain_id,
+ cfg,
+ &response,
+ NOT_VF_REQ,
+ PF_ID_ZERO);
+
+ *(struct dlb_cmd_response *)cfg->response = response;
+
+ DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int
+dlb_pf_dir_queue_create(struct dlb_hw_dev *handle,
+ struct dlb_create_dir_queue_args *cfg)
+{
+ struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+ struct dlb_cmd_response response = {0};
+ int ret;
+
+ DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+ ret = dlb_hw_create_dir_queue(&dlb_dev->hw,
+ handle->domain_id,
+ cfg,
+ &response,
+ NOT_VF_REQ,
+ PF_ID_ZERO);
+
+ *(struct dlb_cmd_response *)cfg->response = response;
+
+ DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static void *
+dlb_alloc_coherent_aligned(rte_iova_t *phys, size_t size, int align)
+{
+ const struct rte_memzone *mz;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ uint32_t core_id = rte_lcore_id();
+ unsigned int socket_id;
+
+ snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
+ (unsigned long)rte_get_timer_cycles());
+ if (core_id == (unsigned int)LCORE_ID_ANY)
+ core_id = rte_get_master_lcore();
+ socket_id = rte_lcore_to_socket_id(core_id);
+ mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, align);
+ if (!mz) {
+ rte_panic("Unable to allocate DMA memory of size %zu bytes - %s\n",
+ size, rte_strerror(rte_errno));
+ *phys = 0;
+ return NULL;
+ }
+ *phys = mz->iova;
+ return mz->addr;
+}
+
+static int
+dlb_pf_ldb_port_create(struct dlb_hw_dev *handle,
+ struct dlb_create_ldb_port_args *cfg,
+ enum dlb_cq_poll_modes poll_mode)
+{
+ struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+ struct dlb_cmd_response response = {0};
+ struct dlb_port_memory port_memory;
+ int ret;
+ uint8_t *port_base;
+ int alloc_sz, qe_sz;
+ rte_iova_t pp_dma_base;
+ rte_iova_t pc_dma_base;
+ rte_iova_t cq_dma_base;
+ int is_dir = false;
+
+ DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+ if (poll_mode == DLB_CQ_POLL_MODE_STD)
+ qe_sz = sizeof(struct dlb_dequeue_qe);
+ else
+ qe_sz = RTE_CACHE_LINE_SIZE;
+
+ /* Calculate the port memory required, including two cache lines for
+ * credit pop counts. Round up to the nearest cache line.
+ */
+ alloc_sz = 2 * RTE_CACHE_LINE_SIZE + cfg->cq_depth * qe_sz;
+ alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
+
+ port_base = dlb_alloc_coherent_aligned(&pc_dma_base,
+ alloc_sz,
+ PAGE_SIZE);
+ if (port_base == NULL)
+ return -ENOMEM;
+
+ /* Lock the page in memory */
+ ret = rte_mem_lock_page(port_base);
+ if (ret < 0)
+ rte_panic("dlb pf pmd could not lock page for device i/o\n");
+
+ memset(port_base, 0, alloc_sz);
+ cq_dma_base = (uintptr_t)(pc_dma_base + (2 * RTE_CACHE_LINE_SIZE));
+
+ ret = dlb_hw_create_ldb_port(&dlb_dev->hw,
+ handle->domain_id,
+ cfg,
+ pc_dma_base,
+ cq_dma_base,
+ &response,
+ NOT_VF_REQ,
+ PF_ID_ZERO);
+ if (ret)
+ goto create_port_err;
+
+ pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);
+ dlb_port[response.id][DLB_LDB].pp_addr =
+ (void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id));
+
+ dlb_port[response.id][DLB_LDB].cq_base =
+ (void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));
+
+ dlb_port[response.id][DLB_LDB].ldb_popcount =
+ (void *)(uintptr_t)port_base;
+ dlb_port[response.id][DLB_LDB].dir_popcount = (void *)(uintptr_t)
+ (port_base + RTE_CACHE_LINE_SIZE);
+
+ memset(&port_memory, 0, sizeof(port_memory));
+ dlb_list_init_head(&port_memory.list);
+
+ /* Fill out the per-port memory tracking structure */
+ dlb_dev->ldb_port_pages[response.id].pages = port_memory.pages;
+ dlb_dev->ldb_port_pages[response.id].domain_id = handle->domain_id;
+ dlb_dev->ldb_port_pages[response.id].cq_base =
+ (void *)(uintptr_t)cq_dma_base;
+ dlb_dev->ldb_port_pages[response.id].pc_base =
+ (void *)(uintptr_t)pc_dma_base;
+ dlb_dev->ldb_port_pages[response.id].cq_dma_base = cq_dma_base;
+ dlb_dev->ldb_port_pages[response.id].pc_dma_base = pc_dma_base;
+ dlb_dev->ldb_port_pages[response.id].valid = true;
+ dlb_list_splice(&port_memory.list,
+ &dlb_dev->ldb_port_pages[response.id].list);
+
+ ret = dlb_hw_enable_ldb_cq_interrupts(dlb_dev,
+ response.id,
+ cfg->cq_depth_threshold);
+ if (ret) /* Internal error, don't unwind port creation */
+ goto create_port_err;
+
+ *(struct dlb_cmd_response *)cfg->response = response;
+
+ DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+create_port_err:
+
+ return ret;
+}
+
+static int
+dlb_pf_dir_port_create(struct dlb_hw_dev *handle,
+ struct dlb_create_dir_port_args *cfg,
+ enum dlb_cq_poll_modes poll_mode)
+{
+ struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+ struct dlb_cmd_response response = {0};
+ struct dlb_port_memory port_memory;
+ int ret;
+ uint8_t *port_base;
+ int alloc_sz, qe_sz;
+ rte_iova_t pp_dma_base;
+ rte_iova_t pc_dma_base;
+ rte_iova_t cq_dma_base;
+ int is_dir = true;
+
+ DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+ if (poll_mode == DLB_CQ_POLL_MODE_STD)
+ qe_sz = sizeof(struct dlb_dequeue_qe);
+ else
+ qe_sz = RTE_CACHE_LINE_SIZE;
+
+ /* Calculate the port memory required, including two cache lines for
+ * credit pop counts. Round up to the nearest cache line.
+ */
+ alloc_sz = 2 * RTE_CACHE_LINE_SIZE + cfg->cq_depth * qe_sz;
+ alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
+
+ port_base = dlb_alloc_coherent_aligned(&pc_dma_base,
+ alloc_sz,
+ PAGE_SIZE);
+ if (port_base == NULL)
+ return -ENOMEM;
+
+ /* Lock the page in memory */
+ ret = rte_mem_lock_page(port_base);
+ if (ret < 0)
+ rte_panic("dlb pf pmd could not lock page for device i/o\n");
+
+ memset(port_base, 0, alloc_sz);
+ cq_dma_base = (uintptr_t)(pc_dma_base + (2 * RTE_CACHE_LINE_SIZE));
+
+ ret = dlb_hw_create_dir_port(&dlb_dev->hw,
+ handle->domain_id,
+ cfg,
+ pc_dma_base,
+ cq_dma_base,
+ &response,
+ NOT_VF_REQ,
+ PF_ID_ZERO);
+ if (ret)
+ goto create_port_err;
+
+ pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);
+ dlb_port[response.id][DLB_DIR].pp_addr =
+ (void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id));
+
+ dlb_port[response.id][DLB_DIR].cq_base =
+ (void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));
+
+ dlb_port[response.id][DLB_DIR].ldb_popcount =
+ (void *)(uintptr_t)port_base;
+ dlb_port[response.id][DLB_DIR].dir_popcount = (void *)(uintptr_t)
+ (port_base + RTE_CACHE_LINE_SIZE);
+
+ memset(&port_memory, 0, sizeof(port_memory));
+ dlb_list_init_head(&port_memory.list);
+
+ /* Fill out the per-port memory tracking structure */
+ dlb_dev->dir_port_pages[response.id].pages = port_memory.pages;
+ dlb_dev->dir_port_pages[response.id].domain_id = handle->domain_id;
+ dlb_dev->dir_port_pages[response.id].cq_base =
+ (void *)(uintptr_t)cq_dma_base;
+ dlb_dev->dir_port_pages[response.id].pc_base =
+ (void *)(uintptr_t)pc_dma_base;
+ dlb_dev->dir_port_pages[response.id].cq_dma_base = cq_dma_base;
+ dlb_dev->dir_port_pages[response.id].pc_dma_base = pc_dma_base;
+ dlb_dev->dir_port_pages[response.id].valid = true;
+ dlb_list_splice(&port_memory.list,
+ &dlb_dev->dir_port_pages[response.id].list);
+
+ ret = dlb_hw_enable_dir_cq_interrupts(dlb_dev,
+ response.id,
+ cfg->cq_depth_threshold);
+ if (ret) /* Internal error, don't unwind port creation */
+ goto create_port_err;
+
+ *(struct dlb_cmd_response *)cfg->response = response;
+
+ DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+create_port_err:
+
+ return ret;
+}
+
+static int
+dlb_pf_map_qid(struct dlb_hw_dev *handle,
+ struct dlb_map_qid_args *cfg)
+{
+ struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+ struct dlb_cmd_response response = {0};
+ int ret;
+
+ DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+ ret = dlb_hw_map_qid(&dlb_dev->hw,
+ handle->domain_id,
+ cfg,
+ &response,
+ NOT_VF_REQ,
+ PF_ID_ZERO);
+
+ *(struct dlb_cmd_response *)cfg->response = response;
+
+ DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int
+dlb_pf_unmap_qid(struct dlb_hw_dev *handle,
+ struct dlb_unmap_qid_args *cfg)
+{
+ struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+ struct dlb_cmd_response response = {0};
+ int ret;
+
+ DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+ ret = dlb_hw_unmap_qid(&dlb_dev->hw,
+ handle->domain_id,
+ cfg,
+ &response,
+ NOT_VF_REQ,
+ PF_ID_ZERO);
+
+ *(struct dlb_cmd_response *)cfg->response = response;
+
+ DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int
+dlb_pf_sched_domain_start(struct dlb_hw_dev *handle,
+ struct dlb_start_domain_args *cfg)
+{
+ struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+ struct dlb_cmd_response response = {0};
+ int ret;
+
+ DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+ ret = dlb_hw_start_domain(&dlb_dev->hw,
+ handle->domain_id,
+ cfg,
+ &response,
+ NOT_VF_REQ,
+ PF_ID_ZERO);
+
+ *(struct dlb_cmd_response *)cfg->response = response;
+
+ DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int
+dlb_pf_block_on_cq_interrupt(struct dlb_hw_dev *handle,
+ int port_id, bool is_ldb,
+ volatile void *cq_va, uint8_t cq_gen,
+ bool arm)
+{
+ RTE_SET_USED(handle);
+ RTE_SET_USED(port_id);
+ RTE_SET_USED(is_ldb);
+ RTE_SET_USED(cq_va);
+ RTE_SET_USED(cq_gen);
+ RTE_SET_USED(arm);
+
+ return 0;
+}
+
+static int
+dlb_pf_pending_port_unmaps(struct dlb_hw_dev *handle,
+ struct dlb_pending_port_unmaps_args *args)
+{
+ struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+ struct dlb_cmd_response response = {0};
+ int ret;
+
+ DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+ ret = dlb_hw_pending_port_unmaps(&dlb_dev->hw,
+ handle->domain_id,
+ args,
+ &response,
+ NOT_VF_REQ,
+ PF_ID_ZERO);
+
+ *(struct dlb_cmd_response *)args->response = response;
+
+ DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int
+dlb_pf_get_ldb_queue_depth(struct dlb_hw_dev *handle,
+ struct dlb_get_ldb_queue_depth_args *args)
+{
+ struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+ struct dlb_cmd_response response = {0};
+ int ret;
+
+ DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+ ret = dlb_hw_get_ldb_queue_depth(&dlb_dev->hw,
+ handle->domain_id,
+ args,
+ &response,
+ NOT_VF_REQ,
+ PF_ID_ZERO);
+
+ *(struct dlb_cmd_response *)args->response = response;
+
+ DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int
+dlb_pf_get_dir_queue_depth(struct dlb_hw_dev *handle,
+ struct dlb_get_dir_queue_depth_args *args)
+{
+ struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+ struct dlb_cmd_response response = {0};
+ int ret = 0;
+
+ DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+ ret = dlb_hw_get_dir_queue_depth(&dlb_dev->hw,
+ handle->domain_id,
+ args,
+ &response,
+ NOT_VF_REQ,
+ PF_ID_ZERO);
+
+ *(struct dlb_cmd_response *)args->response = response;
+
+ DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int
+dlb_pf_get_cq_poll_mode(struct dlb_hw_dev *handle,
+ enum dlb_cq_poll_modes *mode)
+{
+ struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+
+ if (dlb_dev->revision >= DLB_REV_B0)
+ *mode = DLB_CQ_POLL_MODE_SPARSE;
+ else
+ *mode = DLB_CQ_POLL_MODE_STD;
+
+ return 0;
+}
+
+static int
+dlb_pf_get_sn_allocation(struct dlb_hw_dev *handle,
+ struct dlb_get_sn_allocation_args *args)
+{
+ struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+ struct dlb_cmd_response response = {0};
+ int ret;
+
+ ret = dlb_get_group_sequence_numbers(&dlb_dev->hw, args->group);
+
+ response.id = ret;
+ response.status = 0;
+
+ *(struct dlb_cmd_response *)args->response = response;
+
+ return ret;
+}
+
+static int
+dlb_pf_set_sn_allocation(struct dlb_hw_dev *handle,
+ struct dlb_set_sn_allocation_args *args)
+{
+ struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+ struct dlb_cmd_response response = {0};
+ int ret;
+
+ ret = dlb_set_group_sequence_numbers(&dlb_dev->hw, args->group,
+ args->num);
+
+ response.status = 0;
+
+ *(struct dlb_cmd_response *)args->response = response;
+
+ return ret;
+}
+
+static int
+dlb_pf_get_sn_occupancy(struct dlb_hw_dev *handle,
+ struct dlb_get_sn_occupancy_args *args)
+{
+ struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+ struct dlb_cmd_response response = {0};
+ int ret;
+
+ ret = dlb_get_group_sequence_number_occupancy(&dlb_dev->hw,
+ args->group);
+
+ response.id = ret;
+ response.status = 0;
+
+ *(struct dlb_cmd_response *)args->response = response;
+
+ return ret;
+}
+
+static void
+dlb_pf_iface_fn_ptrs_init(void)
+{
+ dlb_iface_low_level_io_init = dlb_pf_low_level_io_init;
+ dlb_iface_open = dlb_pf_open;
+ dlb_iface_domain_close = dlb_pf_domain_close;
+ dlb_iface_get_driver_version = NULL; /*dlb_pf_get_driver_version;*/
+ dlb_iface_get_device_version = dlb_pf_get_device_version;
+ dlb_iface_get_num_resources = dlb_pf_get_num_resources;
+ dlb_iface_sched_domain_create = dlb_pf_sched_domain_create;
+ dlb_iface_ldb_credit_pool_create = dlb_pf_ldb_credit_pool_create;
+ dlb_iface_dir_credit_pool_create = dlb_pf_dir_credit_pool_create;
+ dlb_iface_ldb_queue_create = dlb_pf_ldb_queue_create;
+ dlb_iface_dir_queue_create = dlb_pf_dir_queue_create;
+ dlb_iface_ldb_port_create = dlb_pf_ldb_port_create;
+ dlb_iface_dir_port_create = dlb_pf_dir_port_create;
+ dlb_iface_map_qid = dlb_pf_map_qid;
+ dlb_iface_unmap_qid = dlb_pf_unmap_qid;
+ dlb_iface_sched_domain_start = dlb_pf_sched_domain_start;
+ dlb_iface_block_on_cq_interrupt = dlb_pf_block_on_cq_interrupt;
+ dlb_iface_pending_port_unmaps = dlb_pf_pending_port_unmaps;
+ dlb_iface_get_ldb_queue_depth = dlb_pf_get_ldb_queue_depth;
+ dlb_iface_get_dir_queue_depth = dlb_pf_get_dir_queue_depth;
+ dlb_iface_get_cq_poll_mode = dlb_pf_get_cq_poll_mode;
+ dlb_iface_get_sn_allocation = dlb_pf_get_sn_allocation;
+ dlb_iface_set_sn_allocation = dlb_pf_set_sn_allocation;
+ dlb_iface_get_sn_occupancy = dlb_pf_get_sn_occupancy;
+}
+
+/* PCI DEV HOOKS */
+static int
+dlb_eventdev_pci_init(struct rte_eventdev *eventdev)
+{
+ int ret = 0;
+ struct rte_pci_device *pci_dev;
+ struct dlb_devargs dlb_args = {
+ .socket_id = rte_socket_id(),
+ .max_num_events = DLB_MAX_NUM_LDB_CREDITS,
+ .num_dir_credits_override = -1,
+ .defer_sched = 0,
+ .num_atm_inflights = DLB_NUM_ATOMIC_INFLIGHTS_PER_QUEUE,
+ };
+ struct dlb_eventdev *dlb;
+
+ DLB_LOG_DBG("Enter with dev_id=%d socket_id=%d",
+ eventdev->data->dev_id, eventdev->data->socket_id);
+
+ dlb_entry_points_init(eventdev);
+
+ dlb_pf_iface_fn_ptrs_init();
+
+ pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ dlb = dlb_pmd_priv(eventdev); /* rte_zmalloc_socket mem */
+
+ /* Probe the DLB PF layer */
+ dlb->qm_instance.pf_dev = dlb_probe(pci_dev);
+
+ if (dlb->qm_instance.pf_dev == NULL) {
+ DLB_LOG_ERR("DLB PF Probe failed with error %d\n",
+ rte_errno);
+ ret = -rte_errno;
+ goto dlb_probe_failed;
+ }
+
+ /* Were we invoked with runtime parameters? */
+ if (pci_dev->device.devargs) {
+ ret = dlb_parse_params(pci_dev->device.devargs->args,
+ pci_dev->device.devargs->name,
+ &dlb_args);
+ if (ret) {
+ DLB_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d\n",
+ ret, rte_errno);
+ goto dlb_probe_failed;
+ }
+ }
+
+ ret = dlb_primary_eventdev_probe(eventdev,
+ event_dlb_pf_name,
+ &dlb_args,
+ DLB_NOT_VDEV);
+ } else {
+ ret = dlb_secondary_eventdev_probe(eventdev,
+ event_dlb_pf_name,
+ DLB_NOT_VDEV);
+ }
+ if (ret)
+ goto dlb_probe_failed;
+
+ DLB_LOG_INFO("DLB PF Probe success\n");
+
+ return 0;
+
+dlb_probe_failed:
+
+ DLB_LOG_INFO("DLB PF Probe failed, ret=%d\n", ret);
+
+ return ret;
+}
+
+#define EVENTDEV_INTEL_VENDOR_ID 0x8086
+
+static const struct rte_pci_id pci_id_dlb_map[] = {
+ {
+ RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
+ DLB_PF_DEV_ID)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static int
+event_dlb_pci_probe(struct rte_pci_driver *pci_drv,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
+ sizeof(struct dlb_eventdev), dlb_eventdev_pci_init,
+ event_dlb_pf_name);
+}
+
+static int
+event_dlb_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_event_pmd_pci_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver pci_eventdev_dlb_pmd = {
+ .id_table = pci_id_dlb_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = event_dlb_pci_probe,
+ .remove = event_dlb_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(event_dlb_pf, pci_eventdev_dlb_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(event_dlb_pf, pci_id_dlb_map);