[v2] event: Remove Support For DLB V1

Message ID 20210316210812.15614-1-timothy.mcdaniel@intel.com (mailing list archive)
State Accepted, archived
Delegated to: Jerin Jacob
Headers
Series [v2] event: Remove Support For DLB V1 |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/iol-abi-testing success Testing PASS
ci/Intel-compilation fail Compilation issues
ci/iol-testing success Testing PASS
ci/intel-Testing success Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS

Commit Message

Timothy McDaniel March 16, 2021, 9:08 p.m. UTC
  Removed source code, rte_config defines, and documentation.
Removed from build.
Updated release notes to announce removal.

Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
---
 config/rte_config.h                          |    6 -
 doc/api/doxy-api-index.md                    |    1 -
 doc/api/doxy-api.conf.in                     |    1 -
 doc/guides/eventdevs/dlb.rst                 |  341 -
 doc/guides/eventdevs/index.rst               |    1 -
 doc/guides/rel_notes/release_21_05.rst       |    3 +
 drivers/event/dlb/dlb.c                      | 4082 -----------
 drivers/event/dlb/dlb_iface.c                |   79 -
 drivers/event/dlb/dlb_iface.h                |   82 -
 drivers/event/dlb/dlb_inline_fns.h           |   36 -
 drivers/event/dlb/dlb_log.h                  |   25 -
 drivers/event/dlb/dlb_priv.h                 |  513 --
 drivers/event/dlb/dlb_selftest.c             | 1544 ----
 drivers/event/dlb/dlb_user.h                 |  814 ---
 drivers/event/dlb/dlb_xstats.c               | 1212 ---
 drivers/event/dlb/meson.build                |   22 -
 drivers/event/dlb/pf/base/dlb_hw_types.h     |  334 -
 drivers/event/dlb/pf/base/dlb_osdep.h        |  310 -
 drivers/event/dlb/pf/base/dlb_osdep_bitmap.h |  441 --
 drivers/event/dlb/pf/base/dlb_osdep_list.h   |  131 -
 drivers/event/dlb/pf/base/dlb_osdep_types.h  |   31 -
 drivers/event/dlb/pf/base/dlb_regs.h         | 2368 ------
 drivers/event/dlb/pf/base/dlb_resource.c     | 6904 ------------------
 drivers/event/dlb/pf/base/dlb_resource.h     |  876 ---
 drivers/event/dlb/pf/dlb_main.c              |  552 --
 drivers/event/dlb/pf/dlb_main.h              |   47 -
 drivers/event/dlb/pf/dlb_pf.c                |  752 --
 drivers/event/dlb/rte_pmd_dlb.c              |   38 -
 drivers/event/dlb/rte_pmd_dlb.h              |   77 -
 drivers/event/dlb/version.map                |    9 -
 drivers/event/meson.build                    |    2 +-
 31 files changed, 4 insertions(+), 21630 deletions(-)
 delete mode 100644 doc/guides/eventdevs/dlb.rst
 delete mode 100644 drivers/event/dlb/dlb.c
 delete mode 100644 drivers/event/dlb/dlb_iface.c
 delete mode 100644 drivers/event/dlb/dlb_iface.h
 delete mode 100644 drivers/event/dlb/dlb_inline_fns.h
 delete mode 100644 drivers/event/dlb/dlb_log.h
 delete mode 100644 drivers/event/dlb/dlb_priv.h
 delete mode 100644 drivers/event/dlb/dlb_selftest.c
 delete mode 100644 drivers/event/dlb/dlb_user.h
 delete mode 100644 drivers/event/dlb/dlb_xstats.c
 delete mode 100644 drivers/event/dlb/meson.build
 delete mode 100644 drivers/event/dlb/pf/base/dlb_hw_types.h
 delete mode 100644 drivers/event/dlb/pf/base/dlb_osdep.h
 delete mode 100644 drivers/event/dlb/pf/base/dlb_osdep_bitmap.h
 delete mode 100644 drivers/event/dlb/pf/base/dlb_osdep_list.h
 delete mode 100644 drivers/event/dlb/pf/base/dlb_osdep_types.h
 delete mode 100644 drivers/event/dlb/pf/base/dlb_regs.h
 delete mode 100644 drivers/event/dlb/pf/base/dlb_resource.c
 delete mode 100644 drivers/event/dlb/pf/base/dlb_resource.h
 delete mode 100644 drivers/event/dlb/pf/dlb_main.c
 delete mode 100644 drivers/event/dlb/pf/dlb_main.h
 delete mode 100644 drivers/event/dlb/pf/dlb_pf.c
 delete mode 100644 drivers/event/dlb/rte_pmd_dlb.c
 delete mode 100644 drivers/event/dlb/rte_pmd_dlb.h
 delete mode 100644 drivers/event/dlb/version.map
  

Comments

Jerin Jacob March 21, 2021, 9:36 a.m. UTC | #1
On 3/17/21, Timothy McDaniel <timothy.mcdaniel@intel.com> wrote:
> Removed source code, rte_config defines, and documentation.
> Removed from build.
> Updated release notes to announce removal.
>
> Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>

Applied to dpdk-next-net-eventdev/for-main after following changes. Thanks

1) Removed stale dlb section in MAINTAINERS file
2) Removed stale dlb reference from app/test/test_eventdev.c
3) Updated git commit message to
event/dlb: remove dlb driver

Remove event/dlb driver from dpdk code base.
Updated release note's removal section to reflect the same.

Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>



> ---
>  config/rte_config.h                          |    6 -
>  doc/api/doxy-api-index.md                    |    1 -
>  doc/api/doxy-api.conf.in                     |    1 -
>  doc/guides/eventdevs/dlb.rst                 |  341 -
>  doc/guides/eventdevs/index.rst               |    1 -
>  doc/guides/rel_notes/release_21_05.rst       |    3 +
>  drivers/event/dlb/dlb.c                      | 4082 -----------
>  drivers/event/dlb/dlb_iface.c                |   79 -
>  drivers/event/dlb/dlb_iface.h                |   82 -
>  drivers/event/dlb/dlb_inline_fns.h           |   36 -
>  drivers/event/dlb/dlb_log.h                  |   25 -
>  drivers/event/dlb/dlb_priv.h                 |  513 --
>  drivers/event/dlb/dlb_selftest.c             | 1544 ----
>  drivers/event/dlb/dlb_user.h                 |  814 ---
>  drivers/event/dlb/dlb_xstats.c               | 1212 ---
>  drivers/event/dlb/meson.build                |   22 -
>  drivers/event/dlb/pf/base/dlb_hw_types.h     |  334 -
>  drivers/event/dlb/pf/base/dlb_osdep.h        |  310 -
>  drivers/event/dlb/pf/base/dlb_osdep_bitmap.h |  441 --
>  drivers/event/dlb/pf/base/dlb_osdep_list.h   |  131 -
>  drivers/event/dlb/pf/base/dlb_osdep_types.h  |   31 -
>  drivers/event/dlb/pf/base/dlb_regs.h         | 2368 ------
>  drivers/event/dlb/pf/base/dlb_resource.c     | 6904 ------------------
>  drivers/event/dlb/pf/base/dlb_resource.h     |  876 ---
>  drivers/event/dlb/pf/dlb_main.c              |  552 --
>  drivers/event/dlb/pf/dlb_main.h              |   47 -
>  drivers/event/dlb/pf/dlb_pf.c                |  752 --
>  drivers/event/dlb/rte_pmd_dlb.c              |   38 -
>  drivers/event/dlb/rte_pmd_dlb.h              |   77 -
>  drivers/event/dlb/version.map                |    9 -
>  drivers/event/meson.build                    |    2 +-
>  31 files changed, 4 insertions(+), 21630 deletions(-)
>  delete mode 100644 doc/guides/eventdevs/dlb.rst
>  delete mode 100644 drivers/event/dlb/dlb.c
>  delete mode 100644 drivers/event/dlb/dlb_iface.c
>  delete mode 100644 drivers/event/dlb/dlb_iface.h
>  delete mode 100644 drivers/event/dlb/dlb_inline_fns.h
>  delete mode 100644 drivers/event/dlb/dlb_log.h
>  delete mode 100644 drivers/event/dlb/dlb_priv.h
>  delete mode 100644 drivers/event/dlb/dlb_selftest.c
>  delete mode 100644 drivers/event/dlb/dlb_user.h
>  delete mode 100644 drivers/event/dlb/dlb_xstats.c
>  delete mode 100644 drivers/event/dlb/meson.build
>  delete mode 100644 drivers/event/dlb/pf/base/dlb_hw_types.h
>  delete mode 100644 drivers/event/dlb/pf/base/dlb_osdep.h
>  delete mode 100644 drivers/event/dlb/pf/base/dlb_osdep_bitmap.h
>  delete mode 100644 drivers/event/dlb/pf/base/dlb_osdep_list.h
>  delete mode 100644 drivers/event/dlb/pf/base/dlb_osdep_types.h
>  delete mode 100644 drivers/event/dlb/pf/base/dlb_regs.h
>  delete mode 100644 drivers/event/dlb/pf/base/dlb_resource.c
>  delete mode 100644 drivers/event/dlb/pf/base/dlb_resource.h
>  delete mode 100644 drivers/event/dlb/pf/dlb_main.c
>  delete mode 100644 drivers/event/dlb/pf/dlb_main.h
>  delete mode 100644 drivers/event/dlb/pf/dlb_pf.c
>  delete mode 100644 drivers/event/dlb/rte_pmd_dlb.c
>  delete mode 100644 drivers/event/dlb/rte_pmd_dlb.h
>  delete mode 100644 drivers/event/dlb/version.map
>
> diff --git a/config/rte_config.h b/config/rte_config.h
> index 55a2fc50e..aedb68c42 100644
> --- a/config/rte_config.h
> +++ b/config/rte_config.h
> @@ -138,12 +138,6 @@
>  /* QEDE PMD defines */
>  #define RTE_LIBRTE_QEDE_FW ""
>
> -/* DLB PMD defines */
> -#define RTE_LIBRTE_PMD_DLB_POLL_INTERVAL 1000
> -#define RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE  0
> -#undef RTE_LIBRTE_PMD_DLB_QUELL_STATS
> -#define RTE_LIBRTE_PMD_DLB_SW_CREDIT_QUANTA 32
> -
>  /* DLB2 defines */
>  #define RTE_LIBRTE_PMD_DLB2_POLL_INTERVAL 1000
>  #define RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE  0
> diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
> index 748514e24..38376149c 100644
> --- a/doc/api/doxy-api-index.md
> +++ b/doc/api/doxy-api-index.md
> @@ -55,7 +55,6 @@ The public API headers are grouped by topics:
>    [dpaa2_cmdif]        (@ref rte_pmd_dpaa2_cmdif.h),
>    [dpaa2_qdma]         (@ref rte_pmd_dpaa2_qdma.h),
>    [crypto_scheduler]   (@ref rte_cryptodev_scheduler.h),
> -  [dlb]                (@ref rte_pmd_dlb.h),
>    [dlb2]               (@ref rte_pmd_dlb2.h)
>
>  - **memory**:
> diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
> index 5c883b613..49d1c3ac4 100644
> --- a/doc/api/doxy-api.conf.in
> +++ b/doc/api/doxy-api.conf.in
> @@ -7,7 +7,6 @@ USE_MDFILE_AS_MAINPAGE  =
> @TOPDIR@/doc/api/doxy-api-index.md
>  INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
>                            @TOPDIR@/drivers/bus/vdev \
>                            @TOPDIR@/drivers/crypto/scheduler \
> -                          @TOPDIR@/drivers/event/dlb \
>                            @TOPDIR@/drivers/event/dlb2 \
>                            @TOPDIR@/drivers/mempool/dpaa2 \
>                            @TOPDIR@/drivers/net/ark \
> diff --git a/doc/guides/eventdevs/dlb.rst b/doc/guides/eventdevs/dlb.rst
> deleted file mode 100644
> index d44afcdcf..000000000
> --- a/doc/guides/eventdevs/dlb.rst
> +++ /dev/null
> @@ -1,341 +0,0 @@
> -..  SPDX-License-Identifier: BSD-3-Clause
> -    Copyright(c) 2020 Intel Corporation.
> -
> -Driver for the Intel® Dynamic Load Balancer (DLB)
> -=================================================
> -
> -The DPDK dlb poll mode driver supports the Intel® Dynamic Load Balancer.
> -
> -Prerequisites
> --------------
> -
> -Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to
> setup
> -the basic DPDK environment.
> -
> -Configuration
> --------------
> -
> -The DLB PF PMD is a user-space PMD that uses VFIO to gain direct
> -device access. To use this operation mode, the PCIe PF device must be
> bound
> -to a DPDK-compatible VFIO driver, such as vfio-pci.
> -
> -Eventdev API Notes
> -------------------
> -
> -The DLB provides the functions of a DPDK event device; specifically, it
> -supports atomic, ordered, and parallel scheduling events from queues to
> ports.
> -However, the DLB hardware is not a perfect match to the eventdev API. Some
> DLB
> -features are abstracted by the PMD (e.g. directed ports), some are only
> -accessible as vdev command-line parameters, and certain eventdev features
> are
> -not supported (e.g. the event flow ID is not maintained during
> scheduling).
> -
> -In general the dlb PMD is designed for ease-of-use and does not require a
> -detailed understanding of the hardware, but these details are important
> when
> -writing high-performance code. This section describes the places where the
> -eventdev API and DLB misalign.
> -
> -Scheduling Domain Configuration
> -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> -
> -There are 32 scheduling domainis the DLB.
> -When one is configured, it allocates load-balanced and
> -directed queues, ports, credits, and other hardware resources. Some
> -resource allocations are user-controlled -- the number of queues, for
> example
> --- and others, like credit pools (one directed and one load-balanced pool
> per
> -scheduling domain), are not.
> -
> -The DLB is a closed system eventdev, and as such the ``nb_events_limit``
> device
> -setup argument and the per-port ``new_event_threshold`` argument apply as
> -defined in the eventdev header file. The limit is applied to all enqueues,
> -regardless of whether it will consume a directed or load-balanced credit.
> -
> -Reconfiguration
> -~~~~~~~~~~~~~~~
> -
> -The Eventdev API allows one to reconfigure a device, its ports, and its
> queues
> -by first stopping the device, calling the configuration function(s), then
> -restarting the device. The DLB does not support configuring an individual
> queue
> -or port without first reconfiguring the entire device, however, so there
> are
> -certain reconfiguration sequences that are valid in the eventdev API but
> not
> -supported by the PMD.
> -
> -Specifically, the PMD supports the following configuration sequence:
> -1. Configure and start the device
> -2. Stop the device
> -3. (Optional) Reconfigure the device
> -4. (Optional) If step 3 is run:
> -
> -   a. Setup queue(s). The reconfigured queue(s) lose their previous port
> links.
> -   b. The reconfigured port(s) lose their previous queue links.
> -
> -5. (Optional, only if steps 4a and 4b are run) Link port(s) to queue(s)
> -6. Restart the device. If the device is reconfigured in step 3 but one or
> more
> -   of its ports or queues are not, the PMD will apply their previous
> -   configuration (including port->queue links) at this time.
> -
> -The PMD does not support the following configuration sequences:
> -1. Configure and start the device
> -2. Stop the device
> -3. Setup queue or setup port
> -4. Start the device
> -
> -This sequence is not supported because the event device must be
> reconfigured
> -before its ports or queues can be.
> -
> -Load-Balanced Queues
> -~~~~~~~~~~~~~~~~~~~~
> -
> -A load-balanced queue can support atomic and ordered scheduling, or atomic
> and
> -unordered scheduling, but not atomic and unordered and ordered scheduling.
> A
> -queue's scheduling types are controlled by the event queue configuration.
> -
> -If the user sets the ``RTE_EVENT_QUEUE_CFG_ALL_TYPES`` flag, the
> -``nb_atomic_order_sequences`` determines the supported scheduling types.
> -With non-zero ``nb_atomic_order_sequences``, the queue is configured for
> atomic
> -and ordered scheduling. In this case, ``RTE_SCHED_TYPE_PARALLEL``
> scheduling is
> -supported by scheduling those events as ordered events.  Note that when
> the
> -event is dequeued, its sched_type will be ``RTE_SCHED_TYPE_ORDERED``. Else
> if
> -``nb_atomic_order_sequences`` is zero, the queue is configured for atomic
> and
> -unordered scheduling. In this case, ``RTE_SCHED_TYPE_ORDERED`` is
> unsupported.
> -
> -If the ``RTE_EVENT_QUEUE_CFG_ALL_TYPES`` flag is not set, schedule_type
> -dictates the queue's scheduling type.
> -
> -The ``nb_atomic_order_sequences`` queue configuration field sets the
> ordered
> -queue's reorder buffer size.  DLB has 4 groups of ordered queues, where
> each
> -group is configured to contain either 1 queue with 1024 reorder entries, 2
> -queues with 512 reorder entries, and so on down to 32 queues with 32
> entries.
> -
> -When a load-balanced queue is created, the PMD will configure a new
> sequence
> -number group on-demand if num_sequence_numbers does not match a
> pre-existing
> -group with available reorder buffer entries. If all sequence number groups
> are
> -in use, no new group will be created and queue configuration will fail.
> (Note
> -that when the PMD is used with a virtual DLB device, it cannot change the
> -sequence number configuration.)
> -
> -The queue's ``nb_atomic_flows`` parameter is ignored by the DLB PMD,
> because
> -the DLB does not limit the number of flows a queue can track. In the DLB,
> all
> -load-balanced queues can use the full 16-bit flow ID range.
> -
> -Load-balanced and Directed Ports
> -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> -
> -DLB ports come in two flavors: load-balanced and directed. The eventdev
> API
> -does not have the same concept, but it has a similar one: ports and queues
> that
> -are singly-linked (i.e. linked to a single queue or port, respectively).
> -
> -The ``rte_event_dev_info_get()`` function reports the number of available
> -event ports and queues (among other things). For the DLB PMD,
> max_event_ports
> -and max_event_queues report the number of available load-balanced ports
> and
> -queues, and max_single_link_event_port_queue_pairs reports the number of
> -available directed ports and queues.
> -
> -When a scheduling domain is created in ``rte_event_dev_configure()``, the
> user
> -specifies ``nb_event_ports`` and ``nb_single_link_event_port_queues``,
> which
> -control the total number of ports (load-balanced and directed) and the
> number
> -of directed ports. Hence, the number of requested load-balanced ports is
> -``nb_event_ports - nb_single_link_event_ports``. The ``nb_event_queues``
> field
> -specifies the total number of queues (load-balanced and directed). The
> number
> -of directed queues comes from ``nb_single_link_event_port_queues``, since
> -directed ports and queues come in pairs.
> -
> -When a port is setup, the ``RTE_EVENT_PORT_CFG_SINGLE_LINK`` flag
> determines
> -whether it should be configured as a directed (the flag is set) or a
> -load-balanced (the flag is unset) port. Similarly, the
> -``RTE_EVENT_QUEUE_CFG_SINGLE_LINK`` queue configuration flag controls
> -whether it is a directed or load-balanced queue.
> -
> -Load-balanced ports can only be linked to load-balanced queues, and
> directed
> -ports can only be linked to directed queues. Furthermore, directed ports
> can
> -only be linked to a single directed queue (and vice versa), and that link
> -cannot change after the eventdev is started.
> -
> -The eventdev API does not have a directed scheduling type. To support
> directed
> -traffic, the dlb PMD detects when an event is being sent to a directed
> queue
> -and overrides its scheduling type. Note that the originally selected
> scheduling
> -type (atomic, ordered, or parallel) is not preserved, and an event's
> sched_type
> -will be set to ``RTE_SCHED_TYPE_ATOMIC`` when it is dequeued from a
> directed
> -port.
> -
> -Flow ID
> -~~~~~~~
> -
> -The flow ID field is not preserved in the event when it is scheduled in
> the
> -DLB, because the DLB hardware control word format does not have sufficient
> -space to preserve every event field. As a result, the flow ID specified
> with
> -the enqueued event will not be in the dequeued event. If this field is
> -required, the application should pass it through an out-of-band path (for
> -example in the mbuf's udata64 field, if the event points to an mbuf) or
> -reconstruct the flow ID after receiving the event.
> -
> -Also, the DLB hardware control word supports a 16-bit flow ID. Since
> struct
> -rte_event's flow_id field is 20 bits, the DLB PMD drops the most
> significant
> -four bits from the event's flow ID.
> -
> -Hardware Credits
> -~~~~~~~~~~~~~~~~
> -
> -DLB uses a hardware credit scheme to prevent software from overflowing
> hardware
> -event storage, with each unit of storage represented by a credit. A port
> spends
> -a credit to enqueue an event, and hardware refills the ports with credits
> as the
> -events are scheduled to ports. Refills come from credit pools, and each
> port is
> -a member of a load-balanced credit pool and a directed credit pool. The
> -load-balanced credits are used to enqueue to load-balanced queues, and
> directed
> -credits are used for directed queues.
> -
> -A DLB eventdev contains one load-balanced and one directed credit pool.
> These
> -pools' sizes are controlled by the nb_events_limit field in struct
> -rte_event_dev_config. The load-balanced pool is sized to contain
> -nb_events_limit credits, and the directed pool is sized to contain
> -nb_events_limit/4 credits. The directed pool size can be overridden with
> the
> -num_dir_credits vdev argument, like so:
> -
> -    .. code-block:: console
> -
> -       --vdev=dlb1_event,num_dir_credits=<value>
> -
> -This can be used if the default allocation is too low or too high for the
> -specific application needs. The PMD also supports a vdev arg that limits
> the
> -max_num_events reported by rte_event_dev_info_get():
> -
> -    .. code-block:: console
> -
> -       --vdev=dlb1_event,max_num_events=<value>
> -
> -By default, max_num_events is reported as the total available
> load-balanced
> -credits. If multiple DLB-based applications are being used, it may be
> desirable
> -to control how many load-balanced credits each application uses,
> particularly
> -when application(s) are written to configure nb_events_limit equal to the
> -reported max_num_events.
> -
> -Each port is a member of both credit pools. A port's credit allocation is
> -defined by its low watermark, high watermark, and refill quanta. These
> three
> -parameters are calculated by the dlb PMD like so:
> -
> -- The load-balanced high watermark is set to the port's enqueue_depth.
> -  The directed high watermark is set to the minimum of the enqueue_depth
> and
> -  the directed pool size divided by the total number of ports.
> -- The refill quanta is set to half the high watermark.
> -- The low watermark is set to the minimum of 16 and the refill quanta.
> -
> -When the eventdev is started, each port is pre-allocated a high
> watermark's
> -worth of credits. For example, if an eventdev contains four ports with
> enqueue
> -depths of 32 and a load-balanced credit pool size of 4096, each port will
> start
> -with 32 load-balanced credits, and there will be 3968 credits available to
> -replenish the ports. Thus, a single port is not capable of enqueueing up to
> the
> -nb_events_limit (without any events being dequeued), since the other ports
> are
> -retaining their initial credit allocation; in short, all ports must enqueue
> in
> -order to reach the limit.
> -
> -If a port attempts to enqueue and has no credits available, the enqueue
> -operation will fail and the application must retry the enqueue. Credits
> are
> -replenished asynchronously by the DLB hardware.
> -
> -Software Credits
> -~~~~~~~~~~~~~~~~
> -
> -The DLB is a "closed system" event dev, and the DLB PMD layers a software
> -credit scheme on top of the hardware credit scheme in order to comply with
> -the per-port backpressure described in the eventdev API.
> -
> -The DLB's hardware scheme is local to a queue/pipeline stage: a port spends
> a
> -credit when it enqueues to a queue, and credits are later replenished after
> the
> -events are dequeued and released.
> -
> -In the software credit scheme, a credit is consumed when a new (.op =
> -RTE_EVENT_OP_NEW) event is injected into the system, and the credit is
> -replenished when the event is released from the system (either explicitly
> with
> -RTE_EVENT_OP_RELEASE or implicitly in dequeue_burst()).
> -
> -In this model, an event is "in the system" from its first enqueue into
> eventdev
> -until it is last dequeued. If the event goes through multiple event queues,
> it
> -is still considered "in the system" while a worker thread is processing
> it.
> -
> -A port will fail to enqueue if the number of events in the system exceeds
> its
> -``new_event_threshold`` (specified at port setup time). A port will also
> fail
> -to enqueue if it lacks enough hardware credits to enqueue; load-balanced
> -credits are used to enqueue to a load-balanced queue, and directed credits
> are
> -used to enqueue to a directed queue.
> -
> -The out-of-credit situations are typically transient, and an eventdev
> -application using the DLB ought to retry its enqueues if they fail.
> -If enqueue fails, DLB PMD sets rte_errno as follows:
> -
> -- -ENOSPC: Credit exhaustion (either hardware or software)
> -- -EINVAL: Invalid argument, such as port ID, queue ID, or sched_type.
> -
> -Depending on the pipeline the application has constructed, it's possible
> to
> -enter a credit deadlock scenario wherein the worker thread lacks the
> credit
> -to enqueue an event, and it must dequeue an event before it can recover
> the
> -credit. If the worker thread retries its enqueue indefinitely, it will not
> -make forward progress. Such deadlock is possible if the application has
> event
> -"loops", in which an event in dequeued from queue A and later enqueued back
> to
> -queue A.
> -
> -Due to this, workers should stop retrying after a time, release the events
> it
> -is attempting to enqueue, and dequeue more events. It is important that
> the
> -worker release the events and don't simply set them aside to retry the
> enqueue
> -again later, because the port has limited history list size (by default,
> twice
> -the port's dequeue_depth).
> -
> -Priority
> -~~~~~~~~
> -
> -The DLB supports event priority and per-port queue service priority, as
> -described in the eventdev header file. The DLB does not support 'global'
> event
> -queue priority established at queue creation time.
> -
> -DLB supports 8 event and queue service priority levels. For both priority
> -types, the PMD uses the upper three bits of the priority field to determine
> the
> -DLB priority, discarding the 5 least significant bits. The 5 least
> significant
> -event priority bits are not preserved when an event is enqueued.
> -
> -Atomic Inflights Allocation
> -~~~~~~~~~~~~~~~~~~~~~~~~~~~
> -
> -In the last stage prior to scheduling an atomic event to a CQ, DLB holds
> the
> -inflight event in a temporary buffer that is divided among load-balanced
> -queues. If a queue's atomic buffer storage fills up, this can result in
> -head-of-line-blocking. For example:
> -
> -- An LDB queue allocated N atomic buffer entries
> -- All N entries are filled with events from flow X, which is pinned to CQ
> 0.
> -
> -Until CQ 0 releases 1+ events, no other atomic flows for that LDB queue can
> be
> -scheduled. The likelihood of this case depends on the eventdev
> configuration,
> -traffic behavior, event processing latency, potential for a worker to be
> -interrupted or otherwise delayed, etc.
> -
> -By default, the PMD allocates 16 buffer entries for each load-balanced
> queue,
> -which provides an even division across all 128 queues but potentially
> wastes
> -buffer space (e.g. if not all queues are used, or aren't used for atomic
> -scheduling).
> -
> -The PMD provides a dev arg to override the default per-queue allocation.
> To
> -increase a vdev's per-queue atomic-inflight allocation to (for example)
> 64:
> -
> -    .. code-block:: console
> -
> -       --vdev=dlb1_event,atm_inflights=64
> -
> -Deferred Scheduling
> -~~~~~~~~~~~~~~~~~~~
> -
> -The DLB PMD's default behavior for managing a CQ is to "pop" the CQ once
> per
> -dequeued event before returning from rte_event_dequeue_burst(). This frees
> the
> -corresponding entries in the CQ, which enables the DLB to schedule more
> events
> -to it.
> -
> -To support applications seeking finer-grained scheduling control -- for
> example
> -deferring scheduling to get the best possible priority scheduling and
> -load-balancing -- the PMD supports a deferred scheduling mode. In this
> mode,
> -the CQ entry is not popped until the *subsequent*
> rte_event_dequeue_burst()
> -call. This mode only applies to load-balanced event ports with dequeue
> depth of
> -1.
> -
> -To enable deferred scheduling, use the defer_sched vdev argument like so:
> -
> -    .. code-block:: console
> -
> -       --vdev=dlb1_event,defer_sched=on
> -
> diff --git a/doc/guides/eventdevs/index.rst
> b/doc/guides/eventdevs/index.rst
> index f5b69b39d..738788d9e 100644
> --- a/doc/guides/eventdevs/index.rst
> +++ b/doc/guides/eventdevs/index.rst
> @@ -11,7 +11,6 @@ application through the eventdev API.
>      :maxdepth: 2
>      :numbered:
>
> -    dlb
>      dlb2
>      dpaa
>      dpaa2
> diff --git a/doc/guides/rel_notes/release_21_05.rst
> b/doc/guides/rel_notes/release_21_05.rst
> index 5aa9ed7db..07d12a3af 100644
> --- a/doc/guides/rel_notes/release_21_05.rst
> +++ b/doc/guides/rel_notes/release_21_05.rst
> @@ -68,6 +68,9 @@ Removed Items
>     Also, make sure to start the actual text at the margin.
>     =======================================================
>
> +* Removed support for DLB V1 hardware.  This is not a broad market device,
> +  and existing customers already obtain the source code directly from
> Intel.
> +
>
>  API Changes
>  -----------
> diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c
> deleted file mode 100644
> index 8b26d1d2d..000000000
> --- a/drivers/event/dlb/dlb.c
> +++ /dev/null
> @@ -1,4082 +0,0 @@
> -/* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2016-2020 Intel Corporation
> - */
> -
> -#include <assert.h>
> -#include <errno.h>
> -#include <nmmintrin.h>
> -#include <pthread.h>
> -#include <stdbool.h>
> -#include <stdint.h>
> -#include <stdio.h>
> -#include <string.h>
> -#include <sys/fcntl.h>
> -#include <sys/mman.h>
> -#include <unistd.h>
> -
> -#include <rte_common.h>
> -#include <rte_config.h>
> -#include <rte_cycles.h>
> -#include <rte_debug.h>
> -#include <rte_dev.h>
> -#include <rte_errno.h>
> -#include <rte_io.h>
> -#include <rte_kvargs.h>
> -#include <rte_log.h>
> -#include <rte_malloc.h>
> -#include <rte_mbuf.h>
> -#include <rte_power_intrinsics.h>
> -#include <rte_prefetch.h>
> -#include <rte_ring.h>
> -#include <rte_string_fns.h>
> -
> -#include <rte_eventdev.h>
> -#include <eventdev_pmd.h>
> -
> -#include "dlb_priv.h"
> -#include "dlb_iface.h"
> -#include "dlb_inline_fns.h"
> -
> -/*
> - * Resources exposed to eventdev.
> - */
> -#if (RTE_EVENT_MAX_QUEUES_PER_DEV > UINT8_MAX)
> -#error "RTE_EVENT_MAX_QUEUES_PER_DEV cannot fit in member
> max_event_queues"
> -#endif
> -static struct rte_event_dev_info evdev_dlb_default_info = {
> -	.driver_name = "", /* probe will set */
> -	.min_dequeue_timeout_ns = DLB_MIN_DEQUEUE_TIMEOUT_NS,
> -	.max_dequeue_timeout_ns = DLB_MAX_DEQUEUE_TIMEOUT_NS,
> -#if (RTE_EVENT_MAX_QUEUES_PER_DEV < DLB_MAX_NUM_LDB_QUEUES)
> -	.max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
> -#else
> -	.max_event_queues = DLB_MAX_NUM_LDB_QUEUES,
> -#endif
> -	.max_event_queue_flows = DLB_MAX_NUM_FLOWS,
> -	.max_event_queue_priority_levels = DLB_QID_PRIORITIES,
> -	.max_event_priority_levels = DLB_QID_PRIORITIES,
> -	.max_event_ports = DLB_MAX_NUM_LDB_PORTS,
> -	.max_event_port_dequeue_depth = DLB_MAX_CQ_DEPTH,
> -	.max_event_port_enqueue_depth = DLB_MAX_ENQUEUE_DEPTH,
> -	.max_event_port_links = DLB_MAX_NUM_QIDS_PER_LDB_CQ,
> -	.max_num_events = DLB_MAX_NUM_LDB_CREDITS,
> -	.max_single_link_event_port_queue_pairs = DLB_MAX_NUM_DIR_PORTS,
> -	.event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
> -			  RTE_EVENT_DEV_CAP_EVENT_QOS |
> -			  RTE_EVENT_DEV_CAP_BURST_MODE |
> -			  RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
> -			  RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
> -			  RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES),
> -};
> -
> -struct process_local_port_data
> -dlb_port[DLB_MAX_NUM_PORTS][NUM_DLB_PORT_TYPES];
> -
> -static inline uint16_t
> -dlb_event_enqueue_delayed(void *event_port,
> -			  const struct rte_event events[]);
> -
> -static inline uint16_t
> -dlb_event_enqueue_burst_delayed(void *event_port,
> -				const struct rte_event events[],
> -				uint16_t num);
> -
> -static inline uint16_t
> -dlb_event_enqueue_new_burst_delayed(void *event_port,
> -				    const struct rte_event events[],
> -				    uint16_t num);
> -
> -static inline uint16_t
> -dlb_event_enqueue_forward_burst_delayed(void *event_port,
> -					const struct rte_event events[],
> -					uint16_t num);
> -
> -static int
> -dlb_hw_query_resources(struct dlb_eventdev *dlb)
> -{
> -	struct dlb_hw_dev *handle = &dlb->qm_instance;
> -	struct dlb_hw_resource_info *dlb_info = &handle->info;
> -	int ret;
> -
> -	ret = dlb_iface_get_num_resources(handle,
> -					  &dlb->hw_rsrc_query_results);
> -	if (ret) {
> -		DLB_LOG_ERR("get dlb num resources, err=%d\n", ret);
> -		return ret;
> -	}
> -
> -	/* Complete filling in device resource info returned to evdev app,
> -	 * overriding any default values.
> -	 * The capabilities (CAPs) were set at compile time.
> -	 */
> -
> -	evdev_dlb_default_info.max_event_queues =
> -		dlb->hw_rsrc_query_results.num_ldb_queues;
> -
> -	evdev_dlb_default_info.max_event_ports =
> -		dlb->hw_rsrc_query_results.num_ldb_ports;
> -
> -	evdev_dlb_default_info.max_num_events =
> -		dlb->hw_rsrc_query_results.max_contiguous_ldb_credits;
> -
> -	/* Save off values used when creating the scheduling domain. */
> -
> -	handle->info.num_sched_domains =
> -		dlb->hw_rsrc_query_results.num_sched_domains;
> -
> -	handle->info.hw_rsrc_max.nb_events_limit =
> -		dlb->hw_rsrc_query_results.max_contiguous_ldb_credits;
> -
> -	handle->info.hw_rsrc_max.num_queues =
> -		dlb->hw_rsrc_query_results.num_ldb_queues +
> -		dlb->hw_rsrc_query_results.num_dir_ports;
> -
> -	handle->info.hw_rsrc_max.num_ldb_queues =
> -		dlb->hw_rsrc_query_results.num_ldb_queues;
> -
> -	handle->info.hw_rsrc_max.num_ldb_ports =
> -		dlb->hw_rsrc_query_results.num_ldb_ports;
> -
> -	handle->info.hw_rsrc_max.num_dir_ports =
> -		dlb->hw_rsrc_query_results.num_dir_ports;
> -
> -	handle->info.hw_rsrc_max.reorder_window_size =
> -		dlb->hw_rsrc_query_results.num_hist_list_entries;
> -
> -	rte_memcpy(dlb_info, &handle->info.hw_rsrc_max, sizeof(*dlb_info));
> -
> -	return 0;
> -}
> -
> -static void
> -dlb_free_qe_mem(struct dlb_port *qm_port)
> -{
> -	if (qm_port == NULL)
> -		return;
> -
> -	rte_free(qm_port->qe4);
> -	qm_port->qe4 = NULL;
> -
> -	rte_free(qm_port->consume_qe);
> -	qm_port->consume_qe = NULL;
> -
> -	rte_memzone_free(dlb_port[qm_port->id][PORT_TYPE(qm_port)].mz);
> -	dlb_port[qm_port->id][PORT_TYPE(qm_port)].mz = NULL;
> -}
> -
> -static int
> -dlb_init_consume_qe(struct dlb_port *qm_port, char *mz_name)
> -{
> -	struct dlb_cq_pop_qe *qe;
> -
> -	qe = rte_zmalloc(mz_name,
> -			DLB_NUM_QES_PER_CACHE_LINE *
> -				sizeof(struct dlb_cq_pop_qe),
> -			RTE_CACHE_LINE_SIZE);
> -
> -	if (qe == NULL)	{
> -		DLB_LOG_ERR("dlb: no memory for consume_qe\n");
> -		return -ENOMEM;
> -	}
> -
> -	qm_port->consume_qe = qe;
> -
> -	qe->qe_valid = 0;
> -	qe->qe_frag = 0;
> -	qe->qe_comp = 0;
> -	qe->cq_token = 1;
> -	/* Tokens value is 0-based; i.e. '0' returns 1 token, '1' returns 2,
> -	 * and so on.
> -	 */
> -	qe->tokens = 0;	/* set at run time */
> -	qe->meas_lat = 0;
> -	qe->no_dec = 0;
> -	/* Completion IDs are disabled */
> -	qe->cmp_id = 0;
> -
> -	return 0;
> -}
> -
> -static int
> -dlb_init_qe_mem(struct dlb_port *qm_port, char *mz_name)
> -{
> -	int ret, sz;
> -
> -	sz = DLB_NUM_QES_PER_CACHE_LINE * sizeof(struct dlb_enqueue_qe);
> -
> -	qm_port->qe4 = rte_zmalloc(mz_name, sz, RTE_CACHE_LINE_SIZE);
> -
> -	if (qm_port->qe4 == NULL) {
> -		DLB_LOG_ERR("dlb: no qe4 memory\n");
> -		ret = -ENOMEM;
> -		goto error_exit;
> -	}
> -
> -	ret = dlb_init_consume_qe(qm_port, mz_name);
> -	if (ret < 0) {
> -		DLB_LOG_ERR("dlb: dlb_init_consume_qe ret=%d\n", ret);
> -		goto error_exit;
> -	}
> -
> -	return 0;
> -
> -error_exit:
> -
> -	dlb_free_qe_mem(qm_port);
> -
> -	return ret;
> -}
> -
> -/* Wrapper for string to int conversion. Substituted for atoi(...), which
> is
> - * unsafe.
> - */
> -#define DLB_BASE_10 10
> -
> -static int
> -dlb_string_to_int(int *result, const char *str)
> -{
> -	long ret;
> -	char *endstr;
> -
> -	if (str == NULL || result == NULL)
> -		return -EINVAL;
> -
> -	errno = 0;
> -	ret = strtol(str, &endstr, DLB_BASE_10);
> -	if (errno)
> -		return -errno;
> -
> -	/* long int and int may be different width for some architectures */
> -	if (ret < INT_MIN || ret > INT_MAX || endstr == str)
> -		return -EINVAL;
> -
> -	*result = ret;
> -	return 0;
> -}
> -
> -static int
> -set_numa_node(const char *key __rte_unused, const char *value, void
> *opaque)
> -{
> -	int *socket_id = opaque;
> -	int ret;
> -
> -	ret = dlb_string_to_int(socket_id, value);
> -	if (ret < 0)
> -		return ret;
> -
> -	if (*socket_id > RTE_MAX_NUMA_NODES)
> -		return -EINVAL;
> -
> -	return 0;
> -}
> -
> -static int
> -set_max_num_events(const char *key __rte_unused,
> -		   const char *value,
> -		   void *opaque)
> -{
> -	int *max_num_events = opaque;
> -	int ret;
> -
> -	if (value == NULL || opaque == NULL) {
> -		DLB_LOG_ERR("NULL pointer\n");
> -		return -EINVAL;
> -	}
> -
> -	ret = dlb_string_to_int(max_num_events, value);
> -	if (ret < 0)
> -		return ret;
> -
> -	if (*max_num_events < 0 || *max_num_events > DLB_MAX_NUM_LDB_CREDITS) {
> -		DLB_LOG_ERR("dlb: max_num_events must be between 0 and %d\n",
> -			    DLB_MAX_NUM_LDB_CREDITS);
> -		return -EINVAL;
> -	}
> -
> -	return 0;
> -}
> -
> -static int
> -set_num_dir_credits(const char *key __rte_unused,
> -		    const char *value,
> -		    void *opaque)
> -{
> -	int *num_dir_credits = opaque;
> -	int ret;
> -
> -	if (value == NULL || opaque == NULL) {
> -		DLB_LOG_ERR("NULL pointer\n");
> -		return -EINVAL;
> -	}
> -
> -	ret = dlb_string_to_int(num_dir_credits, value);
> -	if (ret < 0)
> -		return ret;
> -
> -	if (*num_dir_credits < 0 ||
> -	    *num_dir_credits > DLB_MAX_NUM_DIR_CREDITS) {
> -		DLB_LOG_ERR("dlb: num_dir_credits must be between 0 and %d\n",
> -			    DLB_MAX_NUM_DIR_CREDITS);
> -		return -EINVAL;
> -	}
> -	return 0;
> -}
> -
> -/* VDEV-only notes:
> - * This function first unmaps all memory mappings and closes the
> - * domain's file descriptor, which causes the driver to reset the
> - * scheduling domain. Once that completes (when close() returns), we
> - * can safely free the dynamically allocated memory used by the
> - * scheduling domain.
> - *
> - * PF-only notes:
> - * We will maintain a use count and use that to determine when
> - * a reset is required.  In PF mode, we never mmap, or munmap
> - * device memory,  and we own the entire physical PCI device.
> - */
> -
> -static void
> -dlb_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig)
> -{
> -	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
> -	enum dlb_configuration_state config_state;
> -	int i, j;
> -
> -	/* Close and reset the domain */
> -	dlb_iface_domain_close(dlb);
> -
> -	/* Free all dynamically allocated port memory */
> -	for (i = 0; i < dlb->num_ports; i++)
> -		dlb_free_qe_mem(&dlb->ev_ports[i].qm_port);
> -
> -	/* If reconfiguring, mark the device's queues and ports as "previously
> -	 * configured." If the user does not reconfigure them, the PMD will
> -	 * reapply their previous configuration when the device is started.
> -	 */
> -	config_state = (reconfig) ? DLB_PREV_CONFIGURED : DLB_NOT_CONFIGURED;
> -
> -	for (i = 0; i < dlb->num_ports; i++) {
> -		dlb->ev_ports[i].qm_port.config_state = config_state;
> -		/* Reset setup_done so ports can be reconfigured */
> -		dlb->ev_ports[i].setup_done = false;
> -		for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++)
> -			dlb->ev_ports[i].link[j].mapped = false;
> -	}
> -
> -	for (i = 0; i < dlb->num_queues; i++)
> -		dlb->ev_queues[i].qm_queue.config_state = config_state;
> -
> -	for (i = 0; i < DLB_MAX_NUM_QUEUES; i++)
> -		dlb->ev_queues[i].setup_done = false;
> -
> -	dlb->num_ports = 0;
> -	dlb->num_ldb_ports = 0;
> -	dlb->num_dir_ports = 0;
> -	dlb->num_queues = 0;
> -	dlb->num_ldb_queues = 0;
> -	dlb->num_dir_queues = 0;
> -	dlb->configured = false;
> -}
> -
> -static int
> -dlb_ldb_credit_pool_create(struct dlb_hw_dev *handle)
> -{
> -	struct dlb_create_ldb_pool_args cfg;
> -	struct dlb_cmd_response response;
> -	int ret;
> -
> -	if (handle == NULL)
> -		return -EINVAL;
> -
> -	if (!handle->cfg.resources.num_ldb_credits) {
> -		handle->cfg.ldb_credit_pool_id = 0;
> -		handle->cfg.num_ldb_credits = 0;
> -		return 0;
> -	}
> -
> -	cfg.response = (uintptr_t)&response;
> -	cfg.num_ldb_credits = handle->cfg.resources.num_ldb_credits;
> -
> -	ret = dlb_iface_ldb_credit_pool_create(handle,
> -					       &cfg);
> -	if (ret < 0) {
> -		DLB_LOG_ERR("dlb: ldb_credit_pool_create ret=%d (driver status: %s)\n",
> -			    ret, dlb_error_strings[response.status]);
> -	}
> -
> -	handle->cfg.ldb_credit_pool_id = response.id;
> -	handle->cfg.num_ldb_credits = cfg.num_ldb_credits;
> -
> -	return ret;
> -}
> -
> -static int
> -dlb_dir_credit_pool_create(struct dlb_hw_dev *handle)
> -{
> -	struct dlb_create_dir_pool_args cfg;
> -	struct dlb_cmd_response response;
> -	int ret;
> -
> -	if (handle == NULL)
> -		return -EINVAL;
> -
> -	if (!handle->cfg.resources.num_dir_credits) {
> -		handle->cfg.dir_credit_pool_id = 0;
> -		handle->cfg.num_dir_credits = 0;
> -		return 0;
> -	}
> -
> -	cfg.response = (uintptr_t)&response;
> -	cfg.num_dir_credits = handle->cfg.resources.num_dir_credits;
> -
> -	ret = dlb_iface_dir_credit_pool_create(handle, &cfg);
> -	if (ret < 0)
> -		DLB_LOG_ERR("dlb: dir_credit_pool_create ret=%d (driver status: %s)\n",
> -			    ret, dlb_error_strings[response.status]);
> -
> -	handle->cfg.dir_credit_pool_id = response.id;
> -	handle->cfg.num_dir_credits = cfg.num_dir_credits;
> -
> -	return ret;
> -}
> -
> -static int
> -dlb_hw_create_sched_domain(struct dlb_hw_dev *handle,
> -			   struct dlb_eventdev *dlb,
> -			   const struct dlb_hw_rsrcs *resources_asked)
> -{
> -	int ret = 0;
> -	struct dlb_create_sched_domain_args *config_params;
> -	struct dlb_cmd_response response;
> -
> -	if (resources_asked == NULL) {
> -		DLB_LOG_ERR("dlb: dlb_create NULL parameter\n");
> -		ret = EINVAL;
> -		goto error_exit;
> -	}
> -
> -	/* Map generic qm resources to dlb resources */
> -	config_params = &handle->cfg.resources;
> -
> -	config_params->response = (uintptr_t)&response;
> -
> -	/* DIR ports and queues */
> -
> -	config_params->num_dir_ports =
> -		resources_asked->num_dir_ports;
> -
> -	config_params->num_dir_credits =
> -		resources_asked->num_dir_credits;
> -
> -	/* LDB ports and queues */
> -
> -	config_params->num_ldb_queues =
> -		resources_asked->num_ldb_queues;
> -
> -	config_params->num_ldb_ports =
> -		resources_asked->num_ldb_ports;
> -
> -	config_params->num_ldb_credits =
> -		resources_asked->num_ldb_credits;
> -
> -	config_params->num_atomic_inflights =
> -		dlb->num_atm_inflights_per_queue *
> -		config_params->num_ldb_queues;
> -
> -	config_params->num_hist_list_entries = config_params->num_ldb_ports *
> -		DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
> -
> -	/* dlb limited to 1 credit pool per queue type */
> -	config_params->num_ldb_credit_pools = 1;
> -	config_params->num_dir_credit_pools = 1;
> -
> -	DLB_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d,
> atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d,
> ldb_cred_pools=%d, dir-credit_pools=%d\n",
> -		    config_params->num_ldb_queues,
> -		    config_params->num_ldb_ports,
> -		    config_params->num_dir_ports,
> -		    config_params->num_atomic_inflights,
> -		    config_params->num_hist_list_entries,
> -		    config_params->num_ldb_credits,
> -		    config_params->num_dir_credits,
> -		    config_params->num_ldb_credit_pools,
> -		    config_params->num_dir_credit_pools);
> -
> -	/* Configure the QM */
> -
> -	ret = dlb_iface_sched_domain_create(handle, config_params);
> -	if (ret < 0) {
> -		DLB_LOG_ERR("dlb: domain create failed, device_id = %d, (driver ret = %d,
> extra status: %s)\n",
> -			    handle->device_id,
> -			    ret,
> -			    dlb_error_strings[response.status]);
> -		goto error_exit;
> -	}
> -
> -	handle->domain_id = response.id;
> -	handle->domain_id_valid = 1;
> -
> -	config_params->response = 0;
> -
> -	ret = dlb_ldb_credit_pool_create(handle);
> -	if (ret < 0) {
> -		DLB_LOG_ERR("dlb: create ldb credit pool failed\n");
> -		goto error_exit2;
> -	}
> -
> -	ret = dlb_dir_credit_pool_create(handle);
> -	if (ret < 0) {
> -		DLB_LOG_ERR("dlb: create dir credit pool failed\n");
> -		goto error_exit2;
> -	}
> -
> -	handle->cfg.configured = true;
> -
> -	return 0;
> -
> -error_exit2:
> -	dlb_iface_domain_close(dlb);
> -
> -error_exit:
> -	return ret;
> -}
> -
> -/* End HW specific */
> -static void
> -dlb_eventdev_info_get(struct rte_eventdev *dev,
> -		      struct rte_event_dev_info *dev_info)
> -{
> -	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
> -	int ret;
> -
> -	ret = dlb_hw_query_resources(dlb);
> -	if (ret) {
> -		const struct rte_eventdev_data *data = dev->data;
> -
> -		DLB_LOG_ERR("get resources err=%d, devid=%d\n",
> -			    ret, data->dev_id);
> -		/* fn is void, so fall through and return values set up in
> -		 * probe
> -		 */
> -	}
> -
> -	/* Add num resources currently owned by this domain.
> -	 * These would become available if the scheduling domain were reset due
> -	 * to the application recalling eventdev_configure to *reconfigure* the
> -	 * domain.
> -	 */
> -	evdev_dlb_default_info.max_event_ports += dlb->num_ldb_ports;
> -	evdev_dlb_default_info.max_event_queues += dlb->num_ldb_queues;
> -	evdev_dlb_default_info.max_num_events += dlb->num_ldb_credits;
> -
> -	/* In DLB A-stepping hardware, applications are limited to 128
> -	 * configured ports (load-balanced or directed). The reported number of
> -	 * available ports must reflect this.
> -	 */
> -	if (dlb->revision < DLB_REV_B0) {
> -		int used_ports;
> -
> -		used_ports = DLB_MAX_NUM_LDB_PORTS + DLB_MAX_NUM_DIR_PORTS -
> -			dlb->hw_rsrc_query_results.num_ldb_ports -
> -			dlb->hw_rsrc_query_results.num_dir_ports;
> -
> -		evdev_dlb_default_info.max_event_ports =
> -			RTE_MIN(evdev_dlb_default_info.max_event_ports,
> -				128 - used_ports);
> -	}
> -
> -	evdev_dlb_default_info.max_event_queues =
> -		RTE_MIN(evdev_dlb_default_info.max_event_queues,
> -			RTE_EVENT_MAX_QUEUES_PER_DEV);
> -
> -	evdev_dlb_default_info.max_num_events =
> -		RTE_MIN(evdev_dlb_default_info.max_num_events,
> -			dlb->max_num_events_override);
> -
> -	*dev_info = evdev_dlb_default_info;
> -}
> -
> -/* Note: 1 QM instance per QM device, QM instance/device == event device
> */
> -static int
> -dlb_eventdev_configure(const struct rte_eventdev *dev)
> -{
> -	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
> -	struct dlb_hw_dev *handle = &dlb->qm_instance;
> -	struct dlb_hw_rsrcs *rsrcs = &handle->info.hw_rsrc_max;
> -	const struct rte_eventdev_data *data = dev->data;
> -	const struct rte_event_dev_config *config = &data->dev_conf;
> -	int ret;
> -
> -	/* If this eventdev is already configured, we must release the current
> -	 * scheduling domain before attempting to configure a new one.
> -	 */
> -	if (dlb->configured) {
> -		dlb_hw_reset_sched_domain(dev, true);
> -
> -		ret = dlb_hw_query_resources(dlb);
> -		if (ret) {
> -			DLB_LOG_ERR("get resources err=%d, devid=%d\n",
> -				    ret, data->dev_id);
> -			return ret;
> -		}
> -	}
> -
> -	if (config->nb_event_queues > rsrcs->num_queues) {
> -		DLB_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's
> capabilities (%d).\n",
> -			    config->nb_event_queues,
> -			    rsrcs->num_queues);
> -		return -EINVAL;
> -	}
> -	if (config->nb_event_ports > (rsrcs->num_ldb_ports
> -			+ rsrcs->num_dir_ports)) {
> -		DLB_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's
> capabilities (%d).\n",
> -			    config->nb_event_ports,
> -			    (rsrcs->num_ldb_ports + rsrcs->num_dir_ports));
> -		return -EINVAL;
> -	}
> -	if (config->nb_events_limit > rsrcs->nb_events_limit) {
> -		DLB_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's
> capabilities (%d).\n",
> -			    config->nb_events_limit,
> -			    rsrcs->nb_events_limit);
> -		return -EINVAL;
> -	}
> -
> -	if (config->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
> -		dlb->global_dequeue_wait = false;
> -	else {
> -		uint32_t timeout32;
> -
> -		dlb->global_dequeue_wait = true;
> -
> -		timeout32 = config->dequeue_timeout_ns;
> -
> -		dlb->global_dequeue_wait_ticks =
> -			timeout32 * (rte_get_timer_hz() / 1E9);
> -	}
> -
> -	/* Does this platform support umonitor/umwait? */
> -	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_WAITPKG)) {
> -		if (RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE != 0 &&
> -		    RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE != 1) {
> -			DLB_LOG_ERR("invalid value (%d) for RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE
> must be 0 or 1.\n",
> -				    RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE);
> -			return -EINVAL;
> -		}
> -		dlb->umwait_allowed = true;
> -	}
> -
> -	rsrcs->num_dir_ports = config->nb_single_link_event_port_queues;
> -	rsrcs->num_ldb_ports = config->nb_event_ports - rsrcs->num_dir_ports;
> -	/* 1 dir queue per dir port */
> -	rsrcs->num_ldb_queues = config->nb_event_queues - rsrcs->num_dir_ports;
> -
> -	/* Scale down nb_events_limit by 4 for directed credits, since there
> -	 * are 4x as many load-balanced credits.
> -	 */
> -	rsrcs->num_ldb_credits = 0;
> -	rsrcs->num_dir_credits = 0;
> -
> -	if (rsrcs->num_ldb_queues)
> -		rsrcs->num_ldb_credits = config->nb_events_limit;
> -	if (rsrcs->num_dir_ports)
> -		rsrcs->num_dir_credits = config->nb_events_limit / 4;
> -	if (dlb->num_dir_credits_override != -1)
> -		rsrcs->num_dir_credits = dlb->num_dir_credits_override;
> -
> -	if (dlb_hw_create_sched_domain(handle, dlb, rsrcs) < 0) {
> -		DLB_LOG_ERR("dlb_hw_create_sched_domain failed\n");
> -		return -ENODEV;
> -	}
> -
> -	dlb->new_event_limit = config->nb_events_limit;
> -	__atomic_store_n(&dlb->inflights, 0, __ATOMIC_SEQ_CST);
> -
> -	/* Save number of ports/queues for this event dev */
> -	dlb->num_ports = config->nb_event_ports;
> -	dlb->num_queues = config->nb_event_queues;
> -	dlb->num_dir_ports = rsrcs->num_dir_ports;
> -	dlb->num_ldb_ports = dlb->num_ports - dlb->num_dir_ports;
> -	dlb->num_ldb_queues = dlb->num_queues - dlb->num_dir_ports;
> -	dlb->num_dir_queues = dlb->num_dir_ports;
> -	dlb->num_ldb_credits = rsrcs->num_ldb_credits;
> -	dlb->num_dir_credits = rsrcs->num_dir_credits;
> -
> -	dlb->configured = true;
> -
> -	return 0;
> -}
> -
> -static int16_t
> -dlb_hw_unmap_ldb_qid_from_port(struct dlb_hw_dev *handle,
> -			       uint32_t qm_port_id,
> -			       uint16_t qm_qid)
> -{
> -	struct dlb_unmap_qid_args cfg;
> -	struct dlb_cmd_response response;
> -	int32_t ret;
> -
> -	if (handle == NULL)
> -		return -EINVAL;
> -
> -	cfg.response = (uintptr_t)&response;
> -	cfg.port_id = qm_port_id;
> -	cfg.qid = qm_qid;
> -
> -	ret = dlb_iface_unmap_qid(handle, &cfg);
> -	if (ret < 0)
> -		DLB_LOG_ERR("dlb: unmap qid error, ret=%d (driver status: %s)\n",
> -			    ret, dlb_error_strings[response.status]);
> -
> -	return ret;
> -}
> -
> -static int
> -dlb_event_queue_detach_ldb(struct dlb_eventdev *dlb,
> -			   struct dlb_eventdev_port *ev_port,
> -			   struct dlb_eventdev_queue *ev_queue)
> -{
> -	int ret, i;
> -
> -	/* Don't unlink until start time. */
> -	if (dlb->run_state == DLB_RUN_STATE_STOPPED)
> -		return 0;
> -
> -	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
> -		if (ev_port->link[i].valid &&
> -		    ev_port->link[i].queue_id == ev_queue->id)
> -			break; /* found */
> -	}
> -
> -	/* This is expected with eventdev API!
> -	 * It blindly attempts to unmap all queues.
> -	 */
> -	if (i == DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
> -		DLB_LOG_DBG("dlb: ignoring LB QID %d not mapped for qm_port %d.\n",
> -			    ev_queue->qm_queue.id,
> -			    ev_port->qm_port.id);
> -		return 0;
> -	}
> -
> -	ret = dlb_hw_unmap_ldb_qid_from_port(&dlb->qm_instance,
> -					     ev_port->qm_port.id,
> -					     ev_queue->qm_queue.id);
> -	if (!ret)
> -		ev_port->link[i].mapped = false;
> -
> -	return ret;
> -}
> -
> -static int
> -dlb_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port,
> -			 uint8_t queues[], uint16_t nb_unlinks)
> -{
> -	struct dlb_eventdev_port *ev_port = event_port;
> -	struct dlb_eventdev *dlb;
> -	int i;
> -
> -	RTE_SET_USED(dev);
> -
> -	if (!ev_port->setup_done) {
> -		DLB_LOG_ERR("dlb: evport %d is not configured\n",
> -			    ev_port->id);
> -		rte_errno = -EINVAL;
> -		return 0;
> -	}
> -
> -	if (queues == NULL || nb_unlinks == 0) {
> -		DLB_LOG_DBG("dlb: queues is NULL or nb_unlinks is 0\n");
> -		return 0; /* Ignore and return success */
> -	}
> -
> -	if (ev_port->qm_port.is_directed) {
> -		DLB_LOG_DBG("dlb: ignore unlink from dir port %d\n",
> -			    ev_port->id);
> -		rte_errno = 0;
> -		return nb_unlinks; /* as if success */
> -	}
> -
> -	dlb = ev_port->dlb;
> -
> -	for (i = 0; i < nb_unlinks; i++) {
> -		struct dlb_eventdev_queue *ev_queue;
> -		int ret, j;
> -
> -		if (queues[i] >= dlb->num_queues) {
> -			DLB_LOG_ERR("dlb: invalid queue id %d\n", queues[i]);
> -			rte_errno = -EINVAL;
> -			return i; /* return index of offending queue */
> -		}
> -
> -		ev_queue = &dlb->ev_queues[queues[i]];
> -
> -		/* Does a link exist? */
> -		for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++)
> -			if (ev_port->link[j].queue_id == queues[i] &&
> -			    ev_port->link[j].valid)
> -				break;
> -
> -		if (j == DLB_MAX_NUM_QIDS_PER_LDB_CQ)
> -			continue;
> -
> -		ret = dlb_event_queue_detach_ldb(dlb, ev_port, ev_queue);
> -		if (ret) {
> -			DLB_LOG_ERR("unlink err=%d for port %d queue %d\n",
> -				    ret, ev_port->id, queues[i]);
> -			rte_errno = -ENOENT;
> -			return i; /* return index of offending queue */
> -		}
> -
> -		ev_port->link[j].valid = false;
> -		ev_port->num_links--;
> -		ev_queue->num_links--;
> -	}
> -
> -	return nb_unlinks;
> -}
> -
> -static int
> -dlb_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev,
> -				      void *event_port)
> -{
> -	struct dlb_eventdev_port *ev_port = event_port;
> -	struct dlb_eventdev *dlb;
> -	struct dlb_hw_dev *handle;
> -	struct dlb_pending_port_unmaps_args cfg;
> -	struct dlb_cmd_response response;
> -	int ret;
> -
> -	RTE_SET_USED(dev);
> -
> -	if (!ev_port->setup_done) {
> -		DLB_LOG_ERR("dlb: evport %d is not configured\n",
> -			    ev_port->id);
> -		rte_errno = -EINVAL;
> -		return 0;
> -	}
> -
> -	cfg.port_id = ev_port->qm_port.id;
> -	cfg.response = (uintptr_t)&response;
> -	dlb = ev_port->dlb;
> -	handle = &dlb->qm_instance;
> -	ret = dlb_iface_pending_port_unmaps(handle, &cfg);
> -
> -	if (ret < 0) {
> -		DLB_LOG_ERR("dlb: num_unlinks_in_progress ret=%d (driver status: %s)\n",
> -			    ret, dlb_error_strings[response.status]);
> -		return ret;
> -	}
> -
> -	return response.id;
> -}
> -
> -static void
> -dlb_eventdev_port_default_conf_get(struct rte_eventdev *dev,
> -				   uint8_t port_id,
> -				   struct rte_event_port_conf *port_conf)
> -{
> -	RTE_SET_USED(port_id);
> -	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
> -
> -	port_conf->new_event_threshold = dlb->new_event_limit;
> -	port_conf->dequeue_depth = 32;
> -	port_conf->enqueue_depth = DLB_MAX_ENQUEUE_DEPTH;
> -	port_conf->event_port_cfg = 0;
> -}
> -
> -static void
> -dlb_eventdev_queue_default_conf_get(struct rte_eventdev *dev,
> -				    uint8_t queue_id,
> -				    struct rte_event_queue_conf *queue_conf)
> -{
> -	RTE_SET_USED(dev);
> -	RTE_SET_USED(queue_id);
> -	queue_conf->nb_atomic_flows = 1024;
> -	queue_conf->nb_atomic_order_sequences = 32;
> -	queue_conf->event_queue_cfg = 0;
> -	queue_conf->priority = 0;
> -}
> -
> -static int
> -dlb_hw_create_ldb_port(struct dlb_eventdev *dlb,
> -		       struct dlb_eventdev_port *ev_port,
> -		       uint32_t dequeue_depth,
> -		       uint32_t cq_depth,
> -		       uint32_t enqueue_depth,
> -		       uint16_t rsvd_tokens,
> -		       bool use_rsvd_token_scheme)
> -{
> -	struct dlb_hw_dev *handle = &dlb->qm_instance;
> -	struct dlb_create_ldb_port_args cfg = {0};
> -	struct dlb_cmd_response response = {0};
> -	int ret;
> -	struct dlb_port *qm_port = NULL;
> -	char mz_name[RTE_MEMZONE_NAMESIZE];
> -	uint32_t qm_port_id;
> -
> -	if (handle == NULL)
> -		return -EINVAL;
> -
> -	if (cq_depth < DLB_MIN_LDB_CQ_DEPTH) {
> -		DLB_LOG_ERR("dlb: invalid cq_depth, must be %d-%d\n",
> -			DLB_MIN_LDB_CQ_DEPTH, DLB_MAX_INPUT_QUEUE_DEPTH);
> -		return -EINVAL;
> -	}
> -
> -	if (enqueue_depth < DLB_MIN_ENQUEUE_DEPTH) {
> -		DLB_LOG_ERR("dlb: invalid enqueue_depth, must be at least %d\n",
> -			    DLB_MIN_ENQUEUE_DEPTH);
> -		return -EINVAL;
> -	}
> -
> -	rte_spinlock_lock(&handle->resource_lock);
> -
> -	cfg.response = (uintptr_t)&response;
> -
> -	/* We round up to the next power of 2 if necessary */
> -	cfg.cq_depth = rte_align32pow2(cq_depth);
> -	cfg.cq_depth_threshold = rsvd_tokens;
> -
> -	cfg.cq_history_list_size = DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
> -
> -	/* User controls the LDB high watermark via enqueue depth. The DIR high
> -	 * watermark is equal, unless the directed credit pool is too small.
> -	 */
> -	cfg.ldb_credit_high_watermark = enqueue_depth;
> -
> -	/* If there are no directed ports, the kernel driver will ignore this
> -	 * port's directed credit settings. Don't use enqueue_depth if it would
> -	 * require more directed credits than are available.
> -	 */
> -	cfg.dir_credit_high_watermark =
> -		RTE_MIN(enqueue_depth,
> -			handle->cfg.num_dir_credits / dlb->num_ports);
> -
> -	cfg.ldb_credit_quantum = cfg.ldb_credit_high_watermark / 2;
> -	cfg.ldb_credit_low_watermark = RTE_MIN(16, cfg.ldb_credit_quantum);
> -
> -	cfg.dir_credit_quantum = cfg.dir_credit_high_watermark / 2;
> -	cfg.dir_credit_low_watermark = RTE_MIN(16, cfg.dir_credit_quantum);
> -
> -	/* Per QM values */
> -
> -	cfg.ldb_credit_pool_id = handle->cfg.ldb_credit_pool_id;
> -	cfg.dir_credit_pool_id = handle->cfg.dir_credit_pool_id;
> -
> -	ret = dlb_iface_ldb_port_create(handle, &cfg, dlb->poll_mode);
> -	if (ret < 0) {
> -		DLB_LOG_ERR("dlb: dlb_ldb_port_create error, ret=%d (driver status:
> %s)\n",
> -			    ret, dlb_error_strings[response.status]);
> -		goto error_exit;
> -	}
> -
> -	qm_port_id = response.id;
> -
> -	DLB_LOG_DBG("dlb: ev_port %d uses qm LB port %d <<<<<\n",
> -		    ev_port->id, qm_port_id);
> -
> -	qm_port = &ev_port->qm_port;
> -	qm_port->ev_port = ev_port; /* back ptr */
> -	qm_port->dlb = dlb; /* back ptr */
> -
> -	/*
> -	 * Allocate and init local qe struct(s).
> -	 * Note: MOVDIR64 requires the enqueue QE (qe4) to be aligned.
> -	 */
> -
> -	snprintf(mz_name, sizeof(mz_name), "ldb_port%d",
> -		 ev_port->id);
> -
> -	ret = dlb_init_qe_mem(qm_port, mz_name);
> -	if (ret < 0) {
> -		DLB_LOG_ERR("dlb: init_qe_mem failed, ret=%d\n", ret);
> -		goto error_exit;
> -	}
> -
> -	qm_port->pp_mmio_base = DLB_LDB_PP_BASE + PAGE_SIZE * qm_port_id;
> -	qm_port->id = qm_port_id;
> -
> -	/* The credit window is one high water mark of QEs */
> -	qm_port->ldb_pushcount_at_credit_expiry = 0;
> -	qm_port->cached_ldb_credits = cfg.ldb_credit_high_watermark;
> -	/* The credit window is one high water mark of QEs */
> -	qm_port->dir_pushcount_at_credit_expiry = 0;
> -	qm_port->cached_dir_credits = cfg.dir_credit_high_watermark;
> -	/* CQs with depth < 8 use an 8-entry queue, but withhold credits so
> -	 * the effective depth is smaller.
> -	 */
> -	qm_port->cq_depth = cfg.cq_depth <= 8 ? 8 : cfg.cq_depth;
> -	qm_port->cq_idx = 0;
> -	qm_port->cq_idx_unmasked = 0;
> -	if (dlb->poll_mode == DLB_CQ_POLL_MODE_SPARSE)
> -		qm_port->cq_depth_mask = (qm_port->cq_depth * 4) - 1;
> -	else
> -		qm_port->cq_depth_mask = qm_port->cq_depth - 1;
> -
> -	qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
> -	/* starting value of gen bit - it toggles at wrap time */
> -	qm_port->gen_bit = 1;
> -
> -	qm_port->use_rsvd_token_scheme = use_rsvd_token_scheme;
> -	qm_port->cq_rsvd_token_deficit = rsvd_tokens;
> -	qm_port->int_armed = false;
> -
> -	/* Save off for later use in info and lookup APIs. */
> -	qm_port->qid_mappings = &dlb->qm_ldb_to_ev_queue_id[0];
> -
> -	qm_port->dequeue_depth = dequeue_depth;
> -
> -	/* When using the reserved token scheme, token_pop_thresh is
> -	 * initially 2 * dequeue_depth. Once the tokens are reserved,
> -	 * the enqueue code re-assigns it to dequeue_depth.
> -	 */
> -	qm_port->token_pop_thresh = cq_depth;
> -
> -	/* When the deferred scheduling vdev arg is selected, use deferred pop
> -	 * for all single-entry CQs.
> -	 */
> -	if (cfg.cq_depth == 1 || (cfg.cq_depth == 2 && use_rsvd_token_scheme)) {
> -		if (dlb->defer_sched)
> -			qm_port->token_pop_mode = DEFERRED_POP;
> -	}
> -
> -	/* The default enqueue functions do not include delayed-pop support for
> -	 * performance reasons.
> -	 */
> -	if (qm_port->token_pop_mode == DELAYED_POP) {
> -		dlb->event_dev->enqueue = dlb_event_enqueue_delayed;
> -		dlb->event_dev->enqueue_burst =
> -			dlb_event_enqueue_burst_delayed;
> -		dlb->event_dev->enqueue_new_burst =
> -			dlb_event_enqueue_new_burst_delayed;
> -		dlb->event_dev->enqueue_forward_burst =
> -			dlb_event_enqueue_forward_burst_delayed;
> -	}
> -
> -	qm_port->owed_tokens = 0;
> -	qm_port->issued_releases = 0;
> -
> -	/* update state */
> -	qm_port->state = PORT_STARTED; /* enabled at create time */
> -	qm_port->config_state = DLB_CONFIGURED;
> -
> -	qm_port->dir_credits = cfg.dir_credit_high_watermark;
> -	qm_port->ldb_credits = cfg.ldb_credit_high_watermark;
> -
> -	DLB_LOG_DBG("dlb: created ldb port %d, depth = %d, ldb credits=%d, dir
> credits=%d\n",
> -		    qm_port_id,
> -		    cq_depth,
> -		    qm_port->ldb_credits,
> -		    qm_port->dir_credits);
> -
> -	rte_spinlock_unlock(&handle->resource_lock);
> -
> -	return 0;
> -
> -error_exit:
> -	if (qm_port) {
> -		dlb_free_qe_mem(qm_port);
> -		qm_port->pp_mmio_base = 0;
> -	}
> -
> -	rte_spinlock_unlock(&handle->resource_lock);
> -
> -	DLB_LOG_ERR("dlb: create ldb port failed!\n");
> -
> -	return ret;
> -}
> -
> -static int
> -dlb_hw_create_dir_port(struct dlb_eventdev *dlb,
> -		       struct dlb_eventdev_port *ev_port,
> -		       uint32_t dequeue_depth,
> -		       uint32_t cq_depth,
> -		       uint32_t enqueue_depth,
> -		       uint16_t rsvd_tokens,
> -		       bool use_rsvd_token_scheme)
> -{
> -	struct dlb_hw_dev *handle = &dlb->qm_instance;
> -	struct dlb_create_dir_port_args cfg = {0};
> -	struct dlb_cmd_response response = {0};
> -	int ret;
> -	struct dlb_port *qm_port = NULL;
> -	char mz_name[RTE_MEMZONE_NAMESIZE];
> -	uint32_t qm_port_id;
> -
> -	if (dlb == NULL || handle == NULL)
> -		return -EINVAL;
> -
> -	if (cq_depth < DLB_MIN_DIR_CQ_DEPTH) {
> -		DLB_LOG_ERR("dlb: invalid cq_depth, must be at least %d\n",
> -			    DLB_MIN_DIR_CQ_DEPTH);
> -		return -EINVAL;
> -	}
> -
> -	if (enqueue_depth < DLB_MIN_ENQUEUE_DEPTH) {
> -		DLB_LOG_ERR("dlb: invalid enqueue_depth, must be at least %d\n",
> -			    DLB_MIN_ENQUEUE_DEPTH);
> -		return -EINVAL;
> -	}
> -
> -	rte_spinlock_lock(&handle->resource_lock);
> -
> -	/* Directed queues are configured at link time. */
> -	cfg.queue_id = -1;
> -
> -	cfg.response = (uintptr_t)&response;
> -
> -	/* We round up to the next power of 2 if necessary */
> -	cfg.cq_depth = rte_align32pow2(cq_depth);
> -	cfg.cq_depth_threshold = rsvd_tokens;
> -
> -	/* User controls the LDB high watermark via enqueue depth. The DIR high
> -	 * watermark is equal, unless the directed credit pool is too small.
> -	 */
> -	cfg.ldb_credit_high_watermark = enqueue_depth;
> -
> -	/* Don't use enqueue_depth if it would require more directed credits
> -	 * than are available.
> -	 */
> -	cfg.dir_credit_high_watermark =
> -		RTE_MIN(enqueue_depth,
> -			handle->cfg.num_dir_credits / dlb->num_ports);
> -
> -	cfg.ldb_credit_quantum = cfg.ldb_credit_high_watermark / 2;
> -	cfg.ldb_credit_low_watermark = RTE_MIN(16, cfg.ldb_credit_quantum);
> -
> -	cfg.dir_credit_quantum = cfg.dir_credit_high_watermark / 2;
> -	cfg.dir_credit_low_watermark = RTE_MIN(16, cfg.dir_credit_quantum);
> -
> -	/* Per QM values */
> -
> -	cfg.ldb_credit_pool_id = handle->cfg.ldb_credit_pool_id;
> -	cfg.dir_credit_pool_id = handle->cfg.dir_credit_pool_id;
> -
> -	ret = dlb_iface_dir_port_create(handle, &cfg, dlb->poll_mode);
> -	if (ret < 0) {
> -		DLB_LOG_ERR("dlb: dlb_dir_port_create error, ret=%d (driver status:
> %s)\n",
> -			    ret, dlb_error_strings[response.status]);
> -		goto error_exit;
> -	}
> -
> -	qm_port_id = response.id;
> -
> -	DLB_LOG_DBG("dlb: ev_port %d uses qm DIR port %d <<<<<\n",
> -		    ev_port->id, qm_port_id);
> -
> -	qm_port = &ev_port->qm_port;
> -	qm_port->ev_port = ev_port; /* back ptr */
> -	qm_port->dlb = dlb;  /* back ptr */
> -
> -	/*
> -	 * Init local qe struct(s).
> -	 * Note: MOVDIR64 requires the enqueue QE to be aligned
> -	 */
> -
> -	snprintf(mz_name, sizeof(mz_name), "dir_port%d",
> -		 ev_port->id);
> -
> -	ret = dlb_init_qe_mem(qm_port, mz_name);
> -
> -	if (ret < 0) {
> -		DLB_LOG_ERR("dlb: init_qe_mem failed, ret=%d\n", ret);
> -		goto error_exit;
> -	}
> -
> -	qm_port->pp_mmio_base = DLB_DIR_PP_BASE + PAGE_SIZE * qm_port_id;
> -	qm_port->id = qm_port_id;
> -
> -	/* The credit window is one high water mark of QEs */
> -	qm_port->ldb_pushcount_at_credit_expiry = 0;
> -	qm_port->cached_ldb_credits = cfg.ldb_credit_high_watermark;
> -	/* The credit window is one high water mark of QEs */
> -	qm_port->dir_pushcount_at_credit_expiry = 0;
> -	qm_port->cached_dir_credits = cfg.dir_credit_high_watermark;
> -	qm_port->cq_depth = cfg.cq_depth;
> -	qm_port->cq_idx = 0;
> -	qm_port->cq_idx_unmasked = 0;
> -	if (dlb->poll_mode == DLB_CQ_POLL_MODE_SPARSE)
> -		qm_port->cq_depth_mask = (cfg.cq_depth * 4) - 1;
> -	else
> -		qm_port->cq_depth_mask = cfg.cq_depth - 1;
> -
> -	qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
> -	/* starting value of gen bit - it toggles at wrap time */
> -	qm_port->gen_bit = 1;
> -
> -	qm_port->use_rsvd_token_scheme = use_rsvd_token_scheme;
> -	qm_port->cq_rsvd_token_deficit = rsvd_tokens;
> -	qm_port->int_armed = false;
> -
> -	/* Save off for later use in info and lookup APIs. */
> -	qm_port->qid_mappings = &dlb->qm_dir_to_ev_queue_id[0];
> -
> -	qm_port->dequeue_depth = dequeue_depth;
> -
> -	/* Directed ports are auto-pop, by default. */
> -	qm_port->token_pop_mode = AUTO_POP;
> -	qm_port->owed_tokens = 0;
> -	qm_port->issued_releases = 0;
> -
> -	/* update state */
> -	qm_port->state = PORT_STARTED; /* enabled at create time */
> -	qm_port->config_state = DLB_CONFIGURED;
> -
> -	qm_port->dir_credits = cfg.dir_credit_high_watermark;
> -	qm_port->ldb_credits = cfg.ldb_credit_high_watermark;
> -
> -	DLB_LOG_DBG("dlb: created dir port %d, depth = %d cr=%d,%d\n",
> -		    qm_port_id,
> -		    cq_depth,
> -		    cfg.dir_credit_high_watermark,
> -		    cfg.ldb_credit_high_watermark);
> -
> -	rte_spinlock_unlock(&handle->resource_lock);
> -
> -	return 0;
> -
> -error_exit:
> -	if (qm_port) {
> -		qm_port->pp_mmio_base = 0;
> -		dlb_free_qe_mem(qm_port);
> -	}
> -
> -	rte_spinlock_unlock(&handle->resource_lock);
> -
> -	DLB_LOG_ERR("dlb: create dir port failed!\n");
> -
> -	return ret;
> -}
> -
> -static int32_t
> -dlb_hw_create_ldb_queue(struct dlb_eventdev *dlb,
> -			struct dlb_queue *queue,
> -			const struct rte_event_queue_conf *evq_conf)
> -{
> -	struct dlb_hw_dev *handle = &dlb->qm_instance;
> -	struct dlb_create_ldb_queue_args cfg;
> -	struct dlb_cmd_response response;
> -	int32_t ret;
> -	uint32_t qm_qid;
> -	int sched_type = -1;
> -
> -	if (evq_conf == NULL)
> -		return -EINVAL;
> -
> -	if (evq_conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) {
> -		if (evq_conf->nb_atomic_order_sequences != 0)
> -			sched_type = RTE_SCHED_TYPE_ORDERED;
> -		else
> -			sched_type = RTE_SCHED_TYPE_PARALLEL;
> -	} else
> -		sched_type = evq_conf->schedule_type;
> -
> -	cfg.response = (uintptr_t)&response;
> -	cfg.num_atomic_inflights = dlb->num_atm_inflights_per_queue;
> -	cfg.num_sequence_numbers = evq_conf->nb_atomic_order_sequences;
> -	cfg.num_qid_inflights = evq_conf->nb_atomic_order_sequences;
> -
> -	if (sched_type != RTE_SCHED_TYPE_ORDERED) {
> -		cfg.num_sequence_numbers = 0;
> -		cfg.num_qid_inflights = DLB_DEF_UNORDERED_QID_INFLIGHTS;
> -	}
> -
> -	ret = dlb_iface_ldb_queue_create(handle, &cfg);
> -	if (ret < 0) {
> -		DLB_LOG_ERR("dlb: create LB event queue error, ret=%d (driver status:
> %s)\n",
> -			    ret, dlb_error_strings[response.status]);
> -		return -EINVAL;
> -	}
> -
> -	qm_qid = response.id;
> -
> -	/* Save off queue config for debug, resource lookups, and reconfig */
> -	queue->num_qid_inflights = cfg.num_qid_inflights;
> -	queue->num_atm_inflights = cfg.num_atomic_inflights;
> -
> -	queue->sched_type = sched_type;
> -	queue->config_state = DLB_CONFIGURED;
> -
> -	DLB_LOG_DBG("Created LB event queue %d, nb_inflights=%d, nb_seq=%d, qid
> inflights=%d\n",
> -		    qm_qid,
> -		    cfg.num_atomic_inflights,
> -		    cfg.num_sequence_numbers,
> -		    cfg.num_qid_inflights);
> -
> -	return qm_qid;
> -}
> -
> -static int32_t
> -dlb_get_sn_allocation(struct dlb_eventdev *dlb, int group)
> -{
> -	struct dlb_hw_dev *handle = &dlb->qm_instance;
> -	struct dlb_get_sn_allocation_args cfg;
> -	struct dlb_cmd_response response;
> -	int ret;
> -
> -	cfg.group = group;
> -	cfg.response = (uintptr_t)&response;
> -
> -	ret = dlb_iface_get_sn_allocation(handle, &cfg);
> -	if (ret < 0) {
> -		DLB_LOG_ERR("dlb: get_sn_allocation ret=%d (driver status: %s)\n",
> -			    ret, dlb_error_strings[response.status]);
> -		return ret;
> -	}
> -
> -	return response.id;
> -}
> -
> -static int
> -dlb_set_sn_allocation(struct dlb_eventdev *dlb, int group, int num)
> -{
> -	struct dlb_hw_dev *handle = &dlb->qm_instance;
> -	struct dlb_set_sn_allocation_args cfg;
> -	struct dlb_cmd_response response;
> -	int ret;
> -
> -	cfg.num = num;
> -	cfg.group = group;
> -	cfg.response = (uintptr_t)&response;
> -
> -	ret = dlb_iface_set_sn_allocation(handle, &cfg);
> -	if (ret < 0) {
> -		DLB_LOG_ERR("dlb: set_sn_allocation ret=%d (driver status: %s)\n",
> -			    ret, dlb_error_strings[response.status]);
> -		return ret;
> -	}
> -
> -	return ret;
> -}
> -
> -static int32_t
> -dlb_get_sn_occupancy(struct dlb_eventdev *dlb, int group)
> -{
> -	struct dlb_hw_dev *handle = &dlb->qm_instance;
> -	struct dlb_get_sn_occupancy_args cfg;
> -	struct dlb_cmd_response response;
> -	int ret;
> -
> -	cfg.group = group;
> -	cfg.response = (uintptr_t)&response;
> -
> -	ret = dlb_iface_get_sn_occupancy(handle, &cfg);
> -	if (ret < 0) {
> -		DLB_LOG_ERR("dlb: get_sn_occupancy ret=%d (driver status: %s)\n",
> -			    ret, dlb_error_strings[response.status]);
> -		return ret;
> -	}
> -
> -	return response.id;
> -}
> -
> -/* Query the current sequence number allocations and, if they conflict with
> the
> - * requested LDB queue configuration, attempt to re-allocate sequence
> numbers.
> - * This is best-effort; if it fails, the PMD will attempt to configure the
> - * load-balanced queue and return an error.
> - */
> -static void
> -dlb_program_sn_allocation(struct dlb_eventdev *dlb,
> -			  const struct rte_event_queue_conf *queue_conf)
> -{
> -	int grp_occupancy[DLB_NUM_SN_GROUPS];
> -	int grp_alloc[DLB_NUM_SN_GROUPS];
> -	int i, sequence_numbers;
> -
> -	sequence_numbers = (int)queue_conf->nb_atomic_order_sequences;
> -
> -	for (i = 0; i < DLB_NUM_SN_GROUPS; i++) {
> -		int total_slots;
> -
> -		grp_alloc[i] = dlb_get_sn_allocation(dlb, i);
> -		if (grp_alloc[i] < 0)
> -			return;
> -
> -		total_slots = DLB_MAX_LDB_SN_ALLOC / grp_alloc[i];
> -
> -		grp_occupancy[i] = dlb_get_sn_occupancy(dlb, i);
> -		if (grp_occupancy[i] < 0)
> -			return;
> -
> -		/* DLB has at least one available slot for the requested
> -		 * sequence numbers, so no further configuration required.
> -		 */
> -		if (grp_alloc[i] == sequence_numbers &&
> -		    grp_occupancy[i] < total_slots)
> -			return;
> -	}
> -
> -	/* None of the sequence number groups are configured for the requested
> -	 * sequence numbers, so we have to reconfigure one of them. This is
> -	 * only possible if a group is not in use.
> -	 */
> -	for (i = 0; i < DLB_NUM_SN_GROUPS; i++) {
> -		if (grp_occupancy[i] == 0)
> -			break;
> -	}
> -
> -	if (i == DLB_NUM_SN_GROUPS) {
> -		DLB_LOG_ERR("[%s()] No groups with %d sequence_numbers are available or
> have free slots\n",
> -		       __func__, sequence_numbers);
> -		return;
> -	}
> -
> -	/* Attempt to configure slot i with the requested number of sequence
> -	 * numbers. Ignore the return value -- if this fails, the error will be
> -	 * caught during subsequent queue configuration.
> -	 */
> -	dlb_set_sn_allocation(dlb, i, sequence_numbers);
> -}
> -
> -static int
> -dlb_eventdev_ldb_queue_setup(struct rte_eventdev *dev,
> -			     struct dlb_eventdev_queue *ev_queue,
> -			     const struct rte_event_queue_conf *queue_conf)
> -{
> -	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
> -	int32_t qm_qid;
> -
> -	if (queue_conf->nb_atomic_order_sequences)
> -		dlb_program_sn_allocation(dlb, queue_conf);
> -
> -	qm_qid = dlb_hw_create_ldb_queue(dlb,
> -					 &ev_queue->qm_queue,
> -					 queue_conf);
> -	if (qm_qid < 0) {
> -		DLB_LOG_ERR("Failed to create the load-balanced queue\n");
> -
> -		return qm_qid;
> -	}
> -
> -	dlb->qm_ldb_to_ev_queue_id[qm_qid] = ev_queue->id;
> -
> -	ev_queue->qm_queue.id = qm_qid;
> -
> -	return 0;
> -}
> -
> -static int dlb_num_dir_queues_setup(struct dlb_eventdev *dlb)
> -{
> -	int i, num = 0;
> -
> -	for (i = 0; i < dlb->num_queues; i++) {
> -		if (dlb->ev_queues[i].setup_done &&
> -		    dlb->ev_queues[i].qm_queue.is_directed)
> -			num++;
> -	}
> -
> -	return num;
> -}
> -
> -static void
> -dlb_queue_link_teardown(struct dlb_eventdev *dlb,
> -			struct dlb_eventdev_queue *ev_queue)
> -{
> -	struct dlb_eventdev_port *ev_port;
> -	int i, j;
> -
> -	for (i = 0; i < dlb->num_ports; i++) {
> -		ev_port = &dlb->ev_ports[i];
> -
> -		for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
> -			if (!ev_port->link[j].valid ||
> -			    ev_port->link[j].queue_id != ev_queue->id)
> -				continue;
> -
> -			ev_port->link[j].valid = false;
> -			ev_port->num_links--;
> -		}
> -	}
> -
> -	ev_queue->num_links = 0;
> -}
> -
> -static int
> -dlb_eventdev_queue_setup(struct rte_eventdev *dev,
> -			 uint8_t ev_qid,
> -			 const struct rte_event_queue_conf *queue_conf)
> -{
> -	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
> -	struct dlb_eventdev_queue *ev_queue;
> -	int ret;
> -
> -	if (queue_conf == NULL)
> -		return -EINVAL;
> -
> -	if (ev_qid >= dlb->num_queues)
> -		return -EINVAL;
> -
> -	ev_queue = &dlb->ev_queues[ev_qid];
> -
> -	ev_queue->qm_queue.is_directed = queue_conf->event_queue_cfg &
> -		RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
> -	ev_queue->id = ev_qid;
> -	ev_queue->conf = *queue_conf;
> -
> -	if (!ev_queue->qm_queue.is_directed) {
> -		ret = dlb_eventdev_ldb_queue_setup(dev, ev_queue, queue_conf);
> -	} else {
> -		/* The directed queue isn't setup until link time, at which
> -		 * point we know its directed port ID. Directed queue setup
> -		 * will only fail if this queue is already setup or there are
> -		 * no directed queues left to configure.
> -		 */
> -		ret = 0;
> -
> -		ev_queue->qm_queue.config_state = DLB_NOT_CONFIGURED;
> -
> -		if (ev_queue->setup_done ||
> -		    dlb_num_dir_queues_setup(dlb) == dlb->num_dir_queues)
> -			ret = -EINVAL;
> -	}
> -
> -	/* Tear down pre-existing port->queue links */
> -	if (!ret && dlb->run_state == DLB_RUN_STATE_STOPPED)
> -		dlb_queue_link_teardown(dlb, ev_queue);
> -
> -	if (!ret)
> -		ev_queue->setup_done = true;
> -
> -	return ret;
> -}
> -
> -static void
> -dlb_port_link_teardown(struct dlb_eventdev *dlb,
> -		       struct dlb_eventdev_port *ev_port)
> -{
> -	struct dlb_eventdev_queue *ev_queue;
> -	int i;
> -
> -	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
> -		if (!ev_port->link[i].valid)
> -			continue;
> -
> -		ev_queue = &dlb->ev_queues[ev_port->link[i].queue_id];
> -
> -		ev_port->link[i].valid = false;
> -		ev_port->num_links--;
> -		ev_queue->num_links--;
> -	}
> -}
> -
> -static int
> -dlb_eventdev_port_setup(struct rte_eventdev *dev,
> -			uint8_t ev_port_id,
> -			const struct rte_event_port_conf *port_conf)
> -{
> -	struct dlb_eventdev *dlb;
> -	struct dlb_eventdev_port *ev_port;
> -	bool use_rsvd_token_scheme;
> -	uint32_t adj_cq_depth;
> -	uint16_t rsvd_tokens;
> -	int ret;
> -
> -	if (dev == NULL || port_conf == NULL) {
> -		DLB_LOG_ERR("Null parameter\n");
> -		return -EINVAL;
> -	}
> -
> -	dlb = dlb_pmd_priv(dev);
> -
> -	if (ev_port_id >= DLB_MAX_NUM_PORTS)
> -		return -EINVAL;
> -
> -	if (port_conf->dequeue_depth >
> -		evdev_dlb_default_info.max_event_port_dequeue_depth ||
> -	    port_conf->enqueue_depth >
> -		evdev_dlb_default_info.max_event_port_enqueue_depth)
> -		return -EINVAL;
> -
> -	ev_port = &dlb->ev_ports[ev_port_id];
> -	/* configured? */
> -	if (ev_port->setup_done) {
> -		DLB_LOG_ERR("evport %d is already configured\n", ev_port_id);
> -		return -EINVAL;
> -	}
> -
> -	/* The reserved token interrupt arming scheme requires that one or more
> -	 * CQ tokens be reserved by the PMD. This limits the amount of CQ space
> -	 * usable by the DLB, so in order to give an *effective* CQ depth equal
> -	 * to the user-requested value, we double CQ depth and reserve half of
> -	 * its tokens. If the user requests the max CQ depth (256) then we
> -	 * cannot double it, so we reserve one token and give an effective
> -	 * depth of 255 entries.
> -	 */
> -	use_rsvd_token_scheme = true;
> -	rsvd_tokens = 1;
> -	adj_cq_depth = port_conf->dequeue_depth;
> -
> -	if (use_rsvd_token_scheme && adj_cq_depth < 256) {
> -		rsvd_tokens = adj_cq_depth;
> -		adj_cq_depth *= 2;
> -	}
> -
> -	ev_port->qm_port.is_directed = port_conf->event_port_cfg &
> -		RTE_EVENT_PORT_CFG_SINGLE_LINK;
> -
> -	if (!ev_port->qm_port.is_directed) {
> -		ret = dlb_hw_create_ldb_port(dlb,
> -					     ev_port,
> -					     port_conf->dequeue_depth,
> -					     adj_cq_depth,
> -					     port_conf->enqueue_depth,
> -					     rsvd_tokens,
> -					     use_rsvd_token_scheme);
> -		if (ret < 0) {
> -			DLB_LOG_ERR("Failed to create the lB port ve portId=%d\n",
> -				    ev_port_id);
> -			return ret;
> -		}
> -	} else {
> -		ret = dlb_hw_create_dir_port(dlb,
> -					     ev_port,
> -					     port_conf->dequeue_depth,
> -					     adj_cq_depth,
> -					     port_conf->enqueue_depth,
> -					     rsvd_tokens,
> -					     use_rsvd_token_scheme);
> -		if (ret < 0) {
> -			DLB_LOG_ERR("Failed to create the DIR port\n");
> -			return ret;
> -		}
> -	}
> -
> -	/* Save off port config for reconfig */
> -	dlb->ev_ports[ev_port_id].conf = *port_conf;
> -
> -	dlb->ev_ports[ev_port_id].id = ev_port_id;
> -	dlb->ev_ports[ev_port_id].enq_configured = true;
> -	dlb->ev_ports[ev_port_id].setup_done = true;
> -	dlb->ev_ports[ev_port_id].inflight_max =
> -		port_conf->new_event_threshold;
> -	dlb->ev_ports[ev_port_id].implicit_release =
> -		!(port_conf->event_port_cfg &
> -		  RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
> -	dlb->ev_ports[ev_port_id].outstanding_releases = 0;
> -	dlb->ev_ports[ev_port_id].inflight_credits = 0;
> -	dlb->ev_ports[ev_port_id].credit_update_quanta =
> -		RTE_LIBRTE_PMD_DLB_SW_CREDIT_QUANTA;
> -	dlb->ev_ports[ev_port_id].dlb = dlb; /* reverse link */
> -
> -	/* Tear down pre-existing port->queue links */
> -	if (dlb->run_state == DLB_RUN_STATE_STOPPED)
> -		dlb_port_link_teardown(dlb, &dlb->ev_ports[ev_port_id]);
> -
> -	dev->data->ports[ev_port_id] = &dlb->ev_ports[ev_port_id];
> -
> -	return 0;
> -}
> -
> -static int
> -dlb_eventdev_reapply_configuration(struct rte_eventdev *dev)
> -{
> -	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
> -	int ret, i;
> -
> -	/* If an event queue or port was previously configured, but hasn't been
> -	 * reconfigured, reapply its original configuration.
> -	 */
> -	for (i = 0; i < dlb->num_queues; i++) {
> -		struct dlb_eventdev_queue *ev_queue;
> -
> -		ev_queue = &dlb->ev_queues[i];
> -
> -		if (ev_queue->qm_queue.config_state != DLB_PREV_CONFIGURED)
> -			continue;
> -
> -		ret = dlb_eventdev_queue_setup(dev, i, &ev_queue->conf);
> -		if (ret < 0) {
> -			DLB_LOG_ERR("dlb: failed to reconfigure queue %d", i);
> -			return ret;
> -		}
> -	}
> -
> -	for (i = 0; i < dlb->num_ports; i++) {
> -		struct dlb_eventdev_port *ev_port = &dlb->ev_ports[i];
> -
> -		if (ev_port->qm_port.config_state != DLB_PREV_CONFIGURED)
> -			continue;
> -
> -		ret = dlb_eventdev_port_setup(dev, i, &ev_port->conf);
> -		if (ret < 0) {
> -			DLB_LOG_ERR("dlb: failed to reconfigure ev_port %d",
> -				    i);
> -			return ret;
> -		}
> -	}
> -
> -	return 0;
> -}
> -
> -static int
> -set_dev_id(const char *key __rte_unused,
> -	   const char *value,
> -	   void *opaque)
> -{
> -	int *dev_id = opaque;
> -	int ret;
> -
> -	if (value == NULL || opaque == NULL) {
> -		DLB_LOG_ERR("NULL pointer\n");
> -		return -EINVAL;
> -	}
> -
> -	ret = dlb_string_to_int(dev_id, value);
> -	if (ret < 0)
> -		return ret;
> -
> -	return 0;
> -}
> -
> -static int
> -set_defer_sched(const char *key __rte_unused,
> -		const char *value,
> -		void *opaque)
> -{
> -	int *defer_sched = opaque;
> -
> -	if (value == NULL || opaque == NULL) {
> -		DLB_LOG_ERR("NULL pointer\n");
> -		return -EINVAL;
> -	}
> -
> -	if (strncmp(value, "on", 2) != 0) {
> -		DLB_LOG_ERR("Invalid defer_sched argument \"%s\" (expected \"on\")\n",
> -			    value);
> -		return -EINVAL;
> -	}
> -
> -	*defer_sched = 1;
> -
> -	return 0;
> -}
> -
> -static int
> -set_num_atm_inflights(const char *key __rte_unused,
> -		      const char *value,
> -		      void *opaque)
> -{
> -	int *num_atm_inflights = opaque;
> -	int ret;
> -
> -	if (value == NULL || opaque == NULL) {
> -		DLB_LOG_ERR("NULL pointer\n");
> -		return -EINVAL;
> -	}
> -
> -	ret = dlb_string_to_int(num_atm_inflights, value);
> -	if (ret < 0)
> -		return ret;
> -
> -	if (*num_atm_inflights < 0 ||
> -	    *num_atm_inflights > DLB_MAX_NUM_ATM_INFLIGHTS) {
> -		DLB_LOG_ERR("dlb: atm_inflights must be between 0 and %d\n",
> -			    DLB_MAX_NUM_ATM_INFLIGHTS);
> -		return -EINVAL;
> -	}
> -
> -	return 0;
> -}
> -
> -static int
> -dlb_validate_port_link(struct dlb_eventdev_port *ev_port,
> -		       uint8_t queue_id,
> -		       bool link_exists,
> -		       int index)
> -{
> -	struct dlb_eventdev *dlb = ev_port->dlb;
> -	struct dlb_eventdev_queue *ev_queue;
> -	bool port_is_dir, queue_is_dir;
> -
> -	if (queue_id > dlb->num_queues) {
> -		DLB_LOG_ERR("queue_id %d > num queues %d\n",
> -			    queue_id, dlb->num_queues);
> -		rte_errno = -EINVAL;
> -		return -1;
> -	}
> -
> -	ev_queue = &dlb->ev_queues[queue_id];
> -
> -	if (!ev_queue->setup_done &&
> -	    ev_queue->qm_queue.config_state != DLB_PREV_CONFIGURED) {
> -		DLB_LOG_ERR("setup not done and not previously configured\n");
> -		rte_errno = -EINVAL;
> -		return -1;
> -	}
> -
> -	port_is_dir = ev_port->qm_port.is_directed;
> -	queue_is_dir = ev_queue->qm_queue.is_directed;
> -
> -	if (port_is_dir != queue_is_dir) {
> -		DLB_LOG_ERR("%s queue %u can't link to %s port %u\n",
> -			    queue_is_dir ? "DIR" : "LDB", ev_queue->id,
> -			    port_is_dir ? "DIR" : "LDB", ev_port->id);
> -
> -		rte_errno = -EINVAL;
> -		return -1;
> -	}
> -
> -	/* Check if there is space for the requested link */
> -	if (!link_exists && index == -1) {
> -		DLB_LOG_ERR("no space for new link\n");
> -		rte_errno = -ENOSPC;
> -		return -1;
> -	}
> -
> -	/* Check if the directed port is already linked */
> -	if (ev_port->qm_port.is_directed && ev_port->num_links > 0 &&
> -	    !link_exists) {
> -		DLB_LOG_ERR("Can't link DIR port %d to >1 queues\n",
> -			    ev_port->id);
> -		rte_errno = -EINVAL;
> -		return -1;
> -	}
> -
> -	/* Check if the directed queue is already linked */
> -	if (ev_queue->qm_queue.is_directed && ev_queue->num_links > 0 &&
> -	    !link_exists) {
> -		DLB_LOG_ERR("Can't link DIR queue %d to >1 ports\n",
> -			    ev_queue->id);
> -		rte_errno = -EINVAL;
> -		return -1;
> -	}
> -
> -	return 0;
> -}
> -
> -static int32_t
> -dlb_hw_create_dir_queue(struct dlb_eventdev *dlb, int32_t qm_port_id)
> -{
> -	struct dlb_hw_dev *handle = &dlb->qm_instance;
> -	struct dlb_create_dir_queue_args cfg;
> -	struct dlb_cmd_response response = {0};
> -	int32_t ret;
> -
> -	cfg.response = (uintptr_t)&response;
> -
> -	/* The directed port is always configured before its queue */
> -	cfg.port_id = qm_port_id;
> -
> -	ret = dlb_iface_dir_queue_create(handle, &cfg);
> -	if (ret < 0) {
> -		DLB_LOG_ERR("dlb: create DIR event queue error, ret=%d (driver status:
> %s)\n",
> -			    ret, dlb_error_strings[response.status]);
> -		return -EINVAL;
> -	}
> -
> -	return response.id;
> -}
> -
> -static int
> -dlb_eventdev_dir_queue_setup(struct dlb_eventdev *dlb,
> -			     struct dlb_eventdev_queue *ev_queue,
> -			     struct dlb_eventdev_port *ev_port)
> -{
> -	int32_t qm_qid;
> -
> -	qm_qid = dlb_hw_create_dir_queue(dlb, ev_port->qm_port.id);
> -
> -	if (qm_qid < 0) {
> -		DLB_LOG_ERR("Failed to create the DIR queue\n");
> -		return qm_qid;
> -	}
> -
> -	dlb->qm_dir_to_ev_queue_id[qm_qid] = ev_queue->id;
> -
> -	ev_queue->qm_queue.id = qm_qid;
> -
> -	return 0;
> -}
> -
> -static int16_t
> -dlb_hw_map_ldb_qid_to_port(struct dlb_hw_dev *handle,
> -			   uint32_t qm_port_id,
> -			   uint16_t qm_qid,
> -			   uint8_t priority)
> -{
> -	struct dlb_map_qid_args cfg;
> -	struct dlb_cmd_response response;
> -	int32_t ret;
> -
> -	if (handle == NULL)
> -		return -EINVAL;
> -
> -	/* Build message */
> -	cfg.response = (uintptr_t)&response;
> -	cfg.port_id = qm_port_id;
> -	cfg.qid = qm_qid;
> -	cfg.priority = EV_TO_DLB_PRIO(priority);
> -
> -	ret = dlb_iface_map_qid(handle, &cfg);
> -	if (ret < 0) {
> -		DLB_LOG_ERR("dlb: map qid error, ret=%d (driver status: %s)\n",
> -			    ret, dlb_error_strings[response.status]);
> -		DLB_LOG_ERR("dlb: device_id=%d grp=%d, qm_port=%d, qm_qid=%d prio=%d\n",
> -			    handle->device_id,
> -			    handle->domain_id, cfg.port_id,
> -			    cfg.qid,
> -			    cfg.priority);
> -	} else {
> -		DLB_LOG_DBG("dlb: mapped queue %d to qm_port %d\n",
> -			    qm_qid, qm_port_id);
> -	}
> -
> -	return ret;
> -}
> -
> -static int
> -dlb_event_queue_join_ldb(struct dlb_eventdev *dlb,
> -			 struct dlb_eventdev_port *ev_port,
> -			 struct dlb_eventdev_queue *ev_queue,
> -			 uint8_t priority)
> -{
> -	int first_avail = -1;
> -	int ret, i;
> -
> -	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
> -		if (ev_port->link[i].valid) {
> -			if (ev_port->link[i].queue_id == ev_queue->id &&
> -			    ev_port->link[i].priority == priority) {
> -				if (ev_port->link[i].mapped)
> -					return 0; /* already mapped */
> -				first_avail = i;
> -			}
> -		} else {
> -			if (first_avail == -1)
> -				first_avail = i;
> -		}
> -	}
> -	if (first_avail == -1) {
> -		DLB_LOG_ERR("dlb: qm_port %d has no available QID slots.\n",
> -			    ev_port->qm_port.id);
> -		return -EINVAL;
> -	}
> -
> -	ret = dlb_hw_map_ldb_qid_to_port(&dlb->qm_instance,
> -					 ev_port->qm_port.id,
> -					 ev_queue->qm_queue.id,
> -					 priority);
> -
> -	if (!ret)
> -		ev_port->link[first_avail].mapped = true;
> -
> -	return ret;
> -}
> -
> -static int
> -dlb_do_port_link(struct rte_eventdev *dev,
> -		 struct dlb_eventdev_queue *ev_queue,
> -		 struct dlb_eventdev_port *ev_port,
> -		 uint8_t prio)
> -{
> -	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
> -	int err;
> -
> -	/* Don't link until start time. */
> -	if (dlb->run_state == DLB_RUN_STATE_STOPPED)
> -		return 0;
> -
> -	if (ev_queue->qm_queue.is_directed)
> -		err = dlb_eventdev_dir_queue_setup(dlb, ev_queue, ev_port);
> -	else
> -		err = dlb_event_queue_join_ldb(dlb, ev_port, ev_queue, prio);
> -
> -	if (err) {
> -		DLB_LOG_ERR("port link failure for %s ev_q %d, ev_port %d\n",
> -			    ev_queue->qm_queue.is_directed ? "DIR" : "LDB",
> -			    ev_queue->id, ev_port->id);
> -
> -		rte_errno = err;
> -		return -1;
> -	}
> -
> -	return 0;
> -}
> -
> -static int
> -dlb_eventdev_apply_port_links(struct rte_eventdev *dev)
> -{
> -	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
> -	int i;
> -
> -	/* Perform requested port->queue links */
> -	for (i = 0; i < dlb->num_ports; i++) {
> -		struct dlb_eventdev_port *ev_port = &dlb->ev_ports[i];
> -		int j;
> -
> -		for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
> -			struct dlb_eventdev_queue *ev_queue;
> -			uint8_t prio, queue_id;
> -
> -			if (!ev_port->link[j].valid)
> -				continue;
> -
> -			prio = ev_port->link[j].priority;
> -			queue_id = ev_port->link[j].queue_id;
> -
> -			if (dlb_validate_port_link(ev_port, queue_id, true, j))
> -				return -EINVAL;
> -
> -			ev_queue = &dlb->ev_queues[queue_id];
> -
> -			if (dlb_do_port_link(dev, ev_queue, ev_port, prio))
> -				return -EINVAL;
> -		}
> -	}
> -
> -	return 0;
> -}
> -
> -static int
> -dlb_eventdev_port_link(struct rte_eventdev *dev, void *event_port,
> -		       const uint8_t queues[], const uint8_t priorities[],
> -		       uint16_t nb_links)
> -
> -{
> -	struct dlb_eventdev_port *ev_port = event_port;
> -	struct dlb_eventdev *dlb;
> -	int i, j;
> -
> -	RTE_SET_USED(dev);
> -
> -	if (ev_port == NULL) {
> -		DLB_LOG_ERR("dlb: evport not setup\n");
> -		rte_errno = -EINVAL;
> -		return 0;
> -	}
> -
> -	if (!ev_port->setup_done &&
> -	    ev_port->qm_port.config_state != DLB_PREV_CONFIGURED) {
> -		DLB_LOG_ERR("dlb: evport not setup\n");
> -		rte_errno = -EINVAL;
> -		return 0;
> -	}
> -
> -	/* Note: rte_event_port_link() ensures the PMD won't receive a NULL
> -	 * queues pointer.
> -	 */
> -	if (nb_links == 0) {
> -		DLB_LOG_DBG("dlb: nb_links is 0\n");
> -		return 0; /* Ignore and return success */
> -	}
> -
> -	dlb = ev_port->dlb;
> -
> -	DLB_LOG_DBG("Linking %u queues to %s port %d\n",
> -		    nb_links,
> -		    ev_port->qm_port.is_directed ? "DIR" : "LDB",
> -		    ev_port->id);
> -
> -	for (i = 0; i < nb_links; i++) {
> -		struct dlb_eventdev_queue *ev_queue;
> -		uint8_t queue_id, prio;
> -		bool found = false;
> -		int index = -1;
> -
> -		queue_id = queues[i];
> -		prio = priorities[i];
> -
> -		/* Check if the link already exists. */
> -		for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++)
> -			if (ev_port->link[j].valid) {
> -				if (ev_port->link[j].queue_id == queue_id) {
> -					found = true;
> -					index = j;
> -					break;
> -				}
> -			} else {
> -				if (index == -1)
> -					index = j;
> -			}
> -
> -		/* could not link */
> -		if (index == -1)
> -			break;
> -
> -		/* Check if already linked at the requested priority */
> -		if (found && ev_port->link[j].priority == prio)
> -			continue;
> -
> -		if (dlb_validate_port_link(ev_port, queue_id, found, index))
> -			break; /* return index of offending queue */
> -
> -		ev_queue = &dlb->ev_queues[queue_id];
> -
> -		if (dlb_do_port_link(dev, ev_queue, ev_port, prio))
> -			break; /* return index of offending queue */
> -
> -		ev_queue->num_links++;
> -
> -		ev_port->link[index].queue_id = queue_id;
> -		ev_port->link[index].priority = prio;
> -		ev_port->link[index].valid = true;
> -		/* Entry already exists?  If so, then must be prio change */
> -		if (!found)
> -			ev_port->num_links++;
> -	}
> -	return i;
> -}
> -
> -static int
> -dlb_eventdev_start(struct rte_eventdev *dev)
> -{
> -	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
> -	struct dlb_hw_dev *handle = &dlb->qm_instance;
> -	struct dlb_start_domain_args cfg;
> -	struct dlb_cmd_response response;
> -	int ret, i;
> -
> -	rte_spinlock_lock(&dlb->qm_instance.resource_lock);
> -	if (dlb->run_state != DLB_RUN_STATE_STOPPED) {
> -		DLB_LOG_ERR("bad state %d for dev_start\n",
> -			    (int)dlb->run_state);
> -		rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
> -		return -EINVAL;
> -	}
> -	dlb->run_state	= DLB_RUN_STATE_STARTING;
> -	rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
> -
> -	/* If the device was configured more than once, some event ports and/or
> -	 * queues may need to be reconfigured.
> -	 */
> -	ret = dlb_eventdev_reapply_configuration(dev);
> -	if (ret)
> -		return ret;
> -
> -	/* The DLB PMD delays port links until the device is started. */
> -	ret = dlb_eventdev_apply_port_links(dev);
> -	if (ret)
> -		return ret;
> -
> -	cfg.response = (uintptr_t)&response;
> -
> -	for (i = 0; i < dlb->num_ports; i++) {
> -		if (!dlb->ev_ports[i].setup_done) {
> -			DLB_LOG_ERR("dlb: port %d not setup", i);
> -			return -ESTALE;
> -		}
> -	}
> -
> -	for (i = 0; i < dlb->num_queues; i++) {
> -		if (dlb->ev_queues[i].num_links == 0) {
> -			DLB_LOG_ERR("dlb: queue %d is not linked", i);
> -			return -ENOLINK;
> -		}
> -	}
> -
> -	ret = dlb_iface_sched_domain_start(handle, &cfg);
> -	if (ret < 0) {
> -		DLB_LOG_ERR("dlb: sched_domain_start ret=%d (driver status: %s)\n",
> -			    ret, dlb_error_strings[response.status]);
> -		return ret;
> -	}
> -
> -	dlb->run_state = DLB_RUN_STATE_STARTED;
> -	DLB_LOG_DBG("dlb: sched_domain_start completed OK\n");
> -
> -	return 0;
> -}
> -
> -static inline int
> -dlb_check_enqueue_sw_credits(struct dlb_eventdev *dlb,
> -			     struct dlb_eventdev_port *ev_port)
> -{
> -	uint32_t sw_inflights = __atomic_load_n(&dlb->inflights,
> -						__ATOMIC_SEQ_CST);
> -	const int num = 1;
> -
> -	if (unlikely(ev_port->inflight_max < sw_inflights)) {
> -		DLB_INC_STAT(ev_port->stats.traffic.tx_nospc_inflight_max, 1);
> -		rte_errno = -ENOSPC;
> -		return 1;
> -	}
> -
> -	if (ev_port->inflight_credits < num) {
> -		/* check if event enqueue brings ev_port over max threshold */
> -		uint32_t credit_update_quanta = ev_port->credit_update_quanta;
> -
> -		if (sw_inflights + credit_update_quanta >
> -		    dlb->new_event_limit) {
> -			DLB_INC_STAT(
> -				ev_port->stats.traffic.tx_nospc_new_event_limit,
> -				1);
> -			rte_errno = -ENOSPC;
> -			return 1;
> -		}
> -
> -		__atomic_fetch_add(&dlb->inflights, credit_update_quanta,
> -				   __ATOMIC_SEQ_CST);
> -		ev_port->inflight_credits += (credit_update_quanta);
> -
> -		if (ev_port->inflight_credits < num) {
> -			DLB_INC_STAT(
> -			    ev_port->stats.traffic.tx_nospc_inflight_credits,
> -			    1);
> -			rte_errno = -ENOSPC;
> -			return 1;
> -		}
> -	}
> -
> -	return 0;
> -}
> -
> -static inline void
> -dlb_replenish_sw_credits(struct dlb_eventdev *dlb,
> -			 struct dlb_eventdev_port *ev_port)
> -{
> -	uint16_t quanta = ev_port->credit_update_quanta;
> -
> -	if (ev_port->inflight_credits >= quanta * 2) {
> -		/* Replenish credits, saving one quanta for enqueues */
> -		uint16_t val = ev_port->inflight_credits - quanta;
> -
> -		__atomic_fetch_sub(&dlb->inflights, val, __ATOMIC_SEQ_CST);
> -		ev_port->inflight_credits -= val;
> -	}
> -}
> -
> -static __rte_always_inline uint16_t
> -dlb_read_pc(struct process_local_port_data *port_data, bool ldb)
> -{
> -	volatile uint16_t *popcount;
> -
> -	if (ldb)
> -		popcount = port_data->ldb_popcount;
> -	else
> -		popcount = port_data->dir_popcount;
> -
> -	return *popcount;
> -}
> -
> -static inline int
> -dlb_check_enqueue_hw_ldb_credits(struct dlb_port *qm_port,
> -				 struct process_local_port_data *port_data)
> -{
> -	if (unlikely(qm_port->cached_ldb_credits == 0)) {
> -		uint16_t pc;
> -
> -		pc = dlb_read_pc(port_data, true);
> -
> -		qm_port->cached_ldb_credits = pc -
> -			qm_port->ldb_pushcount_at_credit_expiry;
> -		if (unlikely(qm_port->cached_ldb_credits == 0)) {
> -			DLB_INC_STAT(
> -			qm_port->ev_port->stats.traffic.tx_nospc_ldb_hw_credits,
> -			1);
> -
> -			DLB_LOG_DBG("ldb credits exhausted\n");
> -			return 1;
> -		}
> -		qm_port->ldb_pushcount_at_credit_expiry +=
> -			qm_port->cached_ldb_credits;
> -	}
> -
> -	return 0;
> -}
> -
> -static inline int
> -dlb_check_enqueue_hw_dir_credits(struct dlb_port *qm_port,
> -				 struct process_local_port_data *port_data)
> -{
> -	if (unlikely(qm_port->cached_dir_credits == 0)) {
> -		uint16_t pc;
> -
> -		pc = dlb_read_pc(port_data, false);
> -
> -		qm_port->cached_dir_credits = pc -
> -			qm_port->dir_pushcount_at_credit_expiry;
> -
> -		if (unlikely(qm_port->cached_dir_credits == 0)) {
> -			DLB_INC_STAT(
> -			qm_port->ev_port->stats.traffic.tx_nospc_dir_hw_credits,
> -			1);
> -
> -			DLB_LOG_DBG("dir credits exhausted\n");
> -			return 1;
> -		}
> -		qm_port->dir_pushcount_at_credit_expiry +=
> -			qm_port->cached_dir_credits;
> -	}
> -
> -	return 0;
> -}
> -
> -static inline int
> -dlb_event_enqueue_prep(struct dlb_eventdev_port *ev_port,
> -		       struct dlb_port *qm_port,
> -		       const struct rte_event ev[],
> -		       struct process_local_port_data *port_data,
> -		       uint8_t *sched_type,
> -		       uint8_t *queue_id)
> -{
> -	struct dlb_eventdev *dlb = ev_port->dlb;
> -	struct dlb_eventdev_queue *ev_queue;
> -	uint16_t *cached_credits = NULL;
> -	struct dlb_queue *qm_queue;
> -
> -	ev_queue = &dlb->ev_queues[ev->queue_id];
> -	qm_queue = &ev_queue->qm_queue;
> -	*queue_id = qm_queue->id;
> -
> -	/* Ignore sched_type and hardware credits on release events */
> -	if (ev->op == RTE_EVENT_OP_RELEASE)
> -		goto op_check;
> -
> -	if (!qm_queue->is_directed) {
> -		/* Load balanced destination queue */
> -
> -		if (dlb_check_enqueue_hw_ldb_credits(qm_port, port_data)) {
> -			rte_errno = -ENOSPC;
> -			return 1;
> -		}
> -		cached_credits = &qm_port->cached_ldb_credits;
> -
> -		switch (ev->sched_type) {
> -		case RTE_SCHED_TYPE_ORDERED:
> -			DLB_LOG_DBG("dlb: put_qe: RTE_SCHED_TYPE_ORDERED\n");
> -			if (qm_queue->sched_type != RTE_SCHED_TYPE_ORDERED) {
> -				DLB_LOG_ERR("dlb: tried to send ordered event to unordered queue
> %d\n",
> -					    *queue_id);
> -				rte_errno = -EINVAL;
> -				return 1;
> -			}
> -			*sched_type = DLB_SCHED_ORDERED;
> -			break;
> -		case RTE_SCHED_TYPE_ATOMIC:
> -			DLB_LOG_DBG("dlb: put_qe: RTE_SCHED_TYPE_ATOMIC\n");
> -			*sched_type = DLB_SCHED_ATOMIC;
> -			break;
> -		case RTE_SCHED_TYPE_PARALLEL:
> -			DLB_LOG_DBG("dlb: put_qe: RTE_SCHED_TYPE_PARALLEL\n");
> -			if (qm_queue->sched_type == RTE_SCHED_TYPE_ORDERED)
> -				*sched_type = DLB_SCHED_ORDERED;
> -			else
> -				*sched_type = DLB_SCHED_UNORDERED;
> -			break;
> -		default:
> -			DLB_LOG_ERR("Unsupported LDB sched type in put_qe\n");
> -			DLB_INC_STAT(ev_port->stats.tx_invalid, 1);
> -			rte_errno = -EINVAL;
> -			return 1;
> -		}
> -	} else {
> -		/* Directed destination queue */
> -
> -		if (dlb_check_enqueue_hw_dir_credits(qm_port, port_data)) {
> -			rte_errno = -ENOSPC;
> -			return 1;
> -		}
> -		cached_credits = &qm_port->cached_dir_credits;
> -
> -		DLB_LOG_DBG("dlb: put_qe: RTE_SCHED_TYPE_DIRECTED\n");
> -
> -		*sched_type = DLB_SCHED_DIRECTED;
> -	}
> -
> -op_check:
> -	switch (ev->op) {
> -	case RTE_EVENT_OP_NEW:
> -		/* Check that a sw credit is available */
> -		if (dlb_check_enqueue_sw_credits(dlb, ev_port)) {
> -			rte_errno = -ENOSPC;
> -			return 1;
> -		}
> -		ev_port->inflight_credits--;
> -		(*cached_credits)--;
> -		break;
> -	case RTE_EVENT_OP_FORWARD:
> -		/* Check for outstanding_releases underflow. If this occurs,
> -		 * the application is not using the EVENT_OPs correctly; for
> -		 * example, forwarding or releasing events that were not
> -		 * dequeued.
> -		 */
> -		RTE_ASSERT(ev_port->outstanding_releases > 0);
> -		ev_port->outstanding_releases--;
> -		qm_port->issued_releases++;
> -		(*cached_credits)--;
> -		break;
> -	case RTE_EVENT_OP_RELEASE:
> -		ev_port->inflight_credits++;
> -		/* Check for outstanding_releases underflow. If this occurs,
> -		 * the application is not using the EVENT_OPs correctly; for
> -		 * example, forwarding or releasing events that were not
> -		 * dequeued.
> -		 */
> -		RTE_ASSERT(ev_port->outstanding_releases > 0);
> -		ev_port->outstanding_releases--;
> -		qm_port->issued_releases++;
> -		/* Replenish s/w credits if enough are cached */
> -		dlb_replenish_sw_credits(dlb, ev_port);
> -		break;
> -	}
> -
> -	DLB_INC_STAT(ev_port->stats.tx_op_cnt[ev->op], 1);
> -	DLB_INC_STAT(ev_port->stats.traffic.tx_ok, 1);
> -
> -#ifndef RTE_LIBRTE_PMD_DLB_QUELL_STATS
> -	if (ev->op != RTE_EVENT_OP_RELEASE) {
> -		DLB_INC_STAT(ev_port->stats.enq_ok[ev->queue_id], 1);
> -		DLB_INC_STAT(ev_port->stats.tx_sched_cnt[*sched_type], 1);
> -	}
> -#endif
> -
> -	return 0;
> -}
> -
> -static uint8_t cmd_byte_map[NUM_DLB_PORT_TYPES][DLB_NUM_HW_SCHED_TYPES] =
> {
> -	{
> -		/* Load-balanced cmd bytes */
> -		[RTE_EVENT_OP_NEW] = DLB_NEW_CMD_BYTE,
> -		[RTE_EVENT_OP_FORWARD] = DLB_FWD_CMD_BYTE,
> -		[RTE_EVENT_OP_RELEASE] = DLB_COMP_CMD_BYTE,
> -	},
> -	{
> -		/* Directed cmd bytes */
> -		[RTE_EVENT_OP_NEW] = DLB_NEW_CMD_BYTE,
> -		[RTE_EVENT_OP_FORWARD] = DLB_NEW_CMD_BYTE,
> -		[RTE_EVENT_OP_RELEASE] = DLB_NOOP_CMD_BYTE,
> -	},
> -};
> -
> -static inline void
> -dlb_event_build_hcws(struct dlb_port *qm_port,
> -		     const struct rte_event ev[],
> -		     int num,
> -		     uint8_t *sched_type,
> -		     uint8_t *queue_id)
> -{
> -	struct dlb_enqueue_qe *qe;
> -	uint16_t sched_word[4];
> -	__m128i sse_qe[2];
> -	int i;
> -
> -	qe = qm_port->qe4;
> -
> -	sse_qe[0] = _mm_setzero_si128();
> -	sse_qe[1] = _mm_setzero_si128();
> -
> -	switch (num) {
> -	case 4:
> -		/* Construct the metadata portion of two HCWs in one 128b SSE
> -		 * register. HCW metadata is constructed in the SSE registers
> -		 * like so:
> -		 * sse_qe[0][63:0]:   qe[0]'s metadata
> -		 * sse_qe[0][127:64]: qe[1]'s metadata
> -		 * sse_qe[1][63:0]:   qe[2]'s metadata
> -		 * sse_qe[1][127:64]: qe[3]'s metadata
> -		 */
> -
> -		/* Convert the event operation into a command byte and store it
> -		 * in the metadata:
> -		 * sse_qe[0][63:56]   = cmd_byte_map[is_directed][ev[0].op]
> -		 * sse_qe[0][127:120] = cmd_byte_map[is_directed][ev[1].op]
> -		 * sse_qe[1][63:56]   = cmd_byte_map[is_directed][ev[2].op]
> -		 * sse_qe[1][127:120] = cmd_byte_map[is_directed][ev[3].op]
> -		 */
> -#define DLB_QE_CMD_BYTE 7
> -		sse_qe[0] = _mm_insert_epi8(sse_qe[0],
> -				cmd_byte_map[qm_port->is_directed][ev[0].op],
> -				DLB_QE_CMD_BYTE);
> -		sse_qe[0] = _mm_insert_epi8(sse_qe[0],
> -				cmd_byte_map[qm_port->is_directed][ev[1].op],
> -				DLB_QE_CMD_BYTE + 8);
> -		sse_qe[1] = _mm_insert_epi8(sse_qe[1],
> -				cmd_byte_map[qm_port->is_directed][ev[2].op],
> -				DLB_QE_CMD_BYTE);
> -		sse_qe[1] = _mm_insert_epi8(sse_qe[1],
> -				cmd_byte_map[qm_port->is_directed][ev[3].op],
> -				DLB_QE_CMD_BYTE + 8);
> -
> -		/* Store priority, scheduling type, and queue ID in the sched
> -		 * word array because these values are re-used when the
> -		 * destination is a directed queue.
> -		 */
> -		sched_word[0] = EV_TO_DLB_PRIO(ev[0].priority) << 10 |
> -				sched_type[0] << 8 |
> -				queue_id[0];
> -		sched_word[1] = EV_TO_DLB_PRIO(ev[1].priority) << 10 |
> -				sched_type[1] << 8 |
> -				queue_id[1];
> -		sched_word[2] = EV_TO_DLB_PRIO(ev[2].priority) << 10 |
> -				sched_type[2] << 8 |
> -				queue_id[2];
> -		sched_word[3] = EV_TO_DLB_PRIO(ev[3].priority) << 10 |
> -				sched_type[3] << 8 |
> -				queue_id[3];
> -
> -		/* Store the event priority, scheduling type, and queue ID in
> -		 * the metadata:
> -		 * sse_qe[0][31:16] = sched_word[0]
> -		 * sse_qe[0][95:80] = sched_word[1]
> -		 * sse_qe[1][31:16] = sched_word[2]
> -		 * sse_qe[1][95:80] = sched_word[3]
> -		 */
> -#define DLB_QE_QID_SCHED_WORD 1
> -		sse_qe[0] = _mm_insert_epi16(sse_qe[0],
> -					     sched_word[0],
> -					     DLB_QE_QID_SCHED_WORD);
> -		sse_qe[0] = _mm_insert_epi16(sse_qe[0],
> -					     sched_word[1],
> -					     DLB_QE_QID_SCHED_WORD + 4);
> -		sse_qe[1] = _mm_insert_epi16(sse_qe[1],
> -					     sched_word[2],
> -					     DLB_QE_QID_SCHED_WORD);
> -		sse_qe[1] = _mm_insert_epi16(sse_qe[1],
> -					     sched_word[3],
> -					     DLB_QE_QID_SCHED_WORD + 4);
> -
> -		/* If the destination is a load-balanced queue, store the lock
> -		 * ID. If it is a directed queue, DLB places this field in
> -		 * bytes 10-11 of the received QE, so we format it accordingly:
> -		 * sse_qe[0][47:32]  = dir queue ? sched_word[0] : flow_id[0]
> -		 * sse_qe[0][111:96] = dir queue ? sched_word[1] : flow_id[1]
> -		 * sse_qe[1][47:32]  = dir queue ? sched_word[2] : flow_id[2]
> -		 * sse_qe[1][111:96] = dir queue ? sched_word[3] : flow_id[3]
> -		 */
> -#define DLB_QE_LOCK_ID_WORD 2
> -		sse_qe[0] = _mm_insert_epi16(sse_qe[0],
> -				(sched_type[0] == DLB_SCHED_DIRECTED) ?
> -					sched_word[0] : ev[0].flow_id,
> -				DLB_QE_LOCK_ID_WORD);
> -		sse_qe[0] = _mm_insert_epi16(sse_qe[0],
> -				(sched_type[1] == DLB_SCHED_DIRECTED) ?
> -					sched_word[1] : ev[1].flow_id,
> -				DLB_QE_LOCK_ID_WORD + 4);
> -		sse_qe[1] = _mm_insert_epi16(sse_qe[1],
> -				(sched_type[2] == DLB_SCHED_DIRECTED) ?
> -					sched_word[2] : ev[2].flow_id,
> -				DLB_QE_LOCK_ID_WORD);
> -		sse_qe[1] = _mm_insert_epi16(sse_qe[1],
> -				(sched_type[3] == DLB_SCHED_DIRECTED) ?
> -					sched_word[3] : ev[3].flow_id,
> -				DLB_QE_LOCK_ID_WORD + 4);
> -
> -		/* Store the event type and sub event type in the metadata:
> -		 * sse_qe[0][15:0]  = flow_id[0]
> -		 * sse_qe[0][79:64] = flow_id[1]
> -		 * sse_qe[1][15:0]  = flow_id[2]
> -		 * sse_qe[1][79:64] = flow_id[3]
> -		 */
> -#define DLB_QE_EV_TYPE_WORD 0
> -		sse_qe[0] = _mm_insert_epi16(sse_qe[0],
> -					     ev[0].sub_event_type << 8 |
> -						ev[0].event_type,
> -					     DLB_QE_EV_TYPE_WORD);
> -		sse_qe[0] = _mm_insert_epi16(sse_qe[0],
> -					     ev[1].sub_event_type << 8 |
> -						ev[1].event_type,
> -					     DLB_QE_EV_TYPE_WORD + 4);
> -		sse_qe[1] = _mm_insert_epi16(sse_qe[1],
> -					     ev[2].sub_event_type << 8 |
> -						ev[2].event_type,
> -					     DLB_QE_EV_TYPE_WORD);
> -		sse_qe[1] = _mm_insert_epi16(sse_qe[1],
> -					     ev[3].sub_event_type << 8 |
> -						ev[3].event_type,
> -					     DLB_QE_EV_TYPE_WORD + 4);
> -
> -		/* Store the metadata to memory (use the double-precision
> -		 * _mm_storeh_pd because there is no integer function for
> -		 * storing the upper 64b):
> -		 * qe[0] metadata = sse_qe[0][63:0]
> -		 * qe[1] metadata = sse_qe[0][127:64]
> -		 * qe[2] metadata = sse_qe[1][63:0]
> -		 * qe[3] metadata = sse_qe[1][127:64]
> -		 */
> -		_mm_storel_epi64((__m128i *)&qe[0].u.opaque_data, sse_qe[0]);
> -		_mm_storeh_pd((double *)&qe[1].u.opaque_data,
> -			      (__m128d) sse_qe[0]);
> -		_mm_storel_epi64((__m128i *)&qe[2].u.opaque_data, sse_qe[1]);
> -		_mm_storeh_pd((double *)&qe[3].u.opaque_data,
> -			      (__m128d) sse_qe[1]);
> -
> -		qe[0].data = ev[0].u64;
> -		qe[1].data = ev[1].u64;
> -		qe[2].data = ev[2].u64;
> -		qe[3].data = ev[3].u64;
> -
> -		break;
> -	case 3:
> -	case 2:
> -	case 1:
> -		for (i = 0; i < num; i++) {
> -			qe[i].cmd_byte =
> -				cmd_byte_map[qm_port->is_directed][ev[i].op];
> -			qe[i].sched_type = sched_type[i];
> -			qe[i].data = ev[i].u64;
> -			qe[i].qid = queue_id[i];
> -			qe[i].priority = EV_TO_DLB_PRIO(ev[i].priority);
> -			qe[i].lock_id = ev[i].flow_id;
> -			if (sched_type[i] == DLB_SCHED_DIRECTED) {
> -				struct dlb_msg_info *info =
> -					(struct dlb_msg_info *)&qe[i].lock_id;
> -
> -				info->qid = queue_id[i];
> -				info->sched_type = DLB_SCHED_DIRECTED;
> -				info->priority = qe[i].priority;
> -			}
> -			qe[i].u.event_type.major = ev[i].event_type;
> -			qe[i].u.event_type.sub = ev[i].sub_event_type;
> -		}
> -		break;
> -	case 0:
> -		break;
> -	}
> -}
> -
> -static inline void
> -dlb_construct_token_pop_qe(struct dlb_port *qm_port, int idx)
> -{
> -	struct dlb_cq_pop_qe *qe = (void *)qm_port->qe4;
> -	int num = qm_port->owed_tokens;
> -
> -	if (qm_port->use_rsvd_token_scheme) {
> -		/* Check if there's a deficit of reserved tokens, and return
> -		 * early if there are no (unreserved) tokens to consume.
> -		 */
> -		if (num <= qm_port->cq_rsvd_token_deficit) {
> -			qm_port->cq_rsvd_token_deficit -= num;
> -			qm_port->owed_tokens = 0;
> -			return;
> -		}
> -		num -= qm_port->cq_rsvd_token_deficit;
> -		qm_port->cq_rsvd_token_deficit = 0;
> -	}
> -
> -	qe[idx].cmd_byte = DLB_POP_CMD_BYTE;
> -	qe[idx].tokens = num - 1;
> -	qm_port->owed_tokens = 0;
> -}
> -
> -static __rte_always_inline void
> -dlb_pp_write(struct dlb_enqueue_qe *qe4,
> -	     struct process_local_port_data *port_data)
> -{
> -	dlb_movdir64b(port_data->pp_addr, qe4);
> -}
> -
> -static inline void
> -dlb_hw_do_enqueue(struct dlb_port *qm_port,
> -		  bool do_sfence,
> -		  struct process_local_port_data *port_data)
> -{
> -	DLB_LOG_DBG("dlb: Flushing QE(s) to DLB\n");
> -
> -	/* Since MOVDIR64B is weakly-ordered, use an SFENCE to ensure that
> -	 * application writes complete before enqueueing the release HCW.
> -	 */
> -	if (do_sfence)
> -		rte_wmb();
> -
> -	dlb_pp_write(qm_port->qe4, port_data);
> -}
> -
> -static inline int
> -dlb_consume_qe_immediate(struct dlb_port *qm_port, int num)
> -{
> -	struct process_local_port_data *port_data;
> -	struct dlb_cq_pop_qe *qe;
> -
> -	RTE_ASSERT(qm_port->config_state == DLB_CONFIGURED);
> -
> -	if (qm_port->use_rsvd_token_scheme) {
> -		/* Check if there's a deficit of reserved tokens, and return
> -		 * early if there are no (unreserved) tokens to consume.
> -		 */
> -		if (num <= qm_port->cq_rsvd_token_deficit) {
> -			qm_port->cq_rsvd_token_deficit -= num;
> -			qm_port->owed_tokens = 0;
> -			return 0;
> -		}
> -		num -= qm_port->cq_rsvd_token_deficit;
> -		qm_port->cq_rsvd_token_deficit = 0;
> -	}
> -
> -	qe = qm_port->consume_qe;
> -
> -	qe->tokens = num - 1;
> -	qe->int_arm = 0;
> -
> -	/* No store fence needed since no pointer is being sent, and CQ token
> -	 * pops can be safely reordered with other HCWs.
> -	 */
> -	port_data = &dlb_port[qm_port->id][PORT_TYPE(qm_port)];
> -
> -	dlb_movntdq_single(port_data->pp_addr, qe);
> -
> -	DLB_LOG_DBG("dlb: consume immediate - %d QEs\n", num);
> -
> -	qm_port->owed_tokens = 0;
> -
> -	return 0;
> -}
> -
> -static inline uint16_t
> -__dlb_event_enqueue_burst(void *event_port,
> -			  const struct rte_event events[],
> -			  uint16_t num,
> -			  bool use_delayed)
> -{
> -	struct dlb_eventdev_port *ev_port = event_port;
> -	struct dlb_port *qm_port = &ev_port->qm_port;
> -	struct process_local_port_data *port_data;
> -	int i;
> -
> -	RTE_ASSERT(ev_port->enq_configured);
> -	RTE_ASSERT(events != NULL);
> -
> -	rte_errno = 0;
> -	i = 0;
> -
> -	port_data = &dlb_port[qm_port->id][PORT_TYPE(qm_port)];
> -
> -	while (i < num) {
> -		uint8_t sched_types[DLB_NUM_QES_PER_CACHE_LINE];
> -		uint8_t queue_ids[DLB_NUM_QES_PER_CACHE_LINE];
> -		int pop_offs = 0;
> -		int j = 0;
> -
> -		memset(qm_port->qe4,
> -		       0,
> -		       DLB_NUM_QES_PER_CACHE_LINE *
> -		       sizeof(struct dlb_enqueue_qe));
> -
> -		for (; j < DLB_NUM_QES_PER_CACHE_LINE && (i + j) < num; j++) {
> -			const struct rte_event *ev = &events[i + j];
> -			int16_t thresh = qm_port->token_pop_thresh;
> -
> -			if (use_delayed &&
> -			    qm_port->token_pop_mode == DELAYED_POP &&
> -			    (ev->op == RTE_EVENT_OP_FORWARD ||
> -			     ev->op == RTE_EVENT_OP_RELEASE) &&
> -			    qm_port->issued_releases >= thresh - 1) {
> -				/* Insert the token pop QE and break out. This
> -				 * may result in a partial HCW, but that is
> -				 * simpler than supporting arbitrary QE
> -				 * insertion.
> -				 */
> -				dlb_construct_token_pop_qe(qm_port, j);
> -
> -				/* Reset the releases for the next QE batch */
> -				qm_port->issued_releases -= thresh;
> -
> -				/* When using delayed token pop mode, the
> -				 * initial token threshold is the full CQ
> -				 * depth. After the first token pop, we need to
> -				 * reset it to the dequeue_depth.
> -				 */
> -				qm_port->token_pop_thresh =
> -					qm_port->dequeue_depth;
> -
> -				pop_offs = 1;
> -				j++;
> -				break;
> -			}
> -
> -			if (dlb_event_enqueue_prep(ev_port, qm_port, ev,
> -						   port_data, &sched_types[j],
> -						   &queue_ids[j]))
> -				break;
> -		}
> -
> -		if (j == 0)
> -			break;
> -
> -		dlb_event_build_hcws(qm_port, &events[i], j - pop_offs,
> -				     sched_types, queue_ids);
> -
> -		dlb_hw_do_enqueue(qm_port, i == 0, port_data);
> -
> -		/* Don't include the token pop QE in the enqueue count */
> -		i += j - pop_offs;
> -
> -		/* Don't interpret j < DLB_NUM_... as out-of-credits if
> -		 * pop_offs != 0
> -		 */
> -		if (j < DLB_NUM_QES_PER_CACHE_LINE && pop_offs == 0)
> -			break;
> -	}
> -
> -	RTE_ASSERT(!((i == 0 && rte_errno != -ENOSPC)));
> -
> -	return i;
> -}
> -
> -static inline uint16_t
> -dlb_event_enqueue_burst(void *event_port,
> -			const struct rte_event events[],
> -			uint16_t num)
> -{
> -	return __dlb_event_enqueue_burst(event_port, events, num, false);
> -}
> -
> -static inline uint16_t
> -dlb_event_enqueue_burst_delayed(void *event_port,
> -				const struct rte_event events[],
> -				uint16_t num)
> -{
> -	return __dlb_event_enqueue_burst(event_port, events, num, true);
> -}
> -
> -static inline uint16_t
> -dlb_event_enqueue(void *event_port,
> -		  const struct rte_event events[])
> -{
> -	return __dlb_event_enqueue_burst(event_port, events, 1, false);
> -}
> -
> -static inline uint16_t
> -dlb_event_enqueue_delayed(void *event_port,
> -			  const struct rte_event events[])
> -{
> -	return __dlb_event_enqueue_burst(event_port, events, 1, true);
> -}
> -
> -static uint16_t
> -dlb_event_enqueue_new_burst(void *event_port,
> -			    const struct rte_event events[],
> -			    uint16_t num)
> -{
> -	return __dlb_event_enqueue_burst(event_port, events, num, false);
> -}
> -
> -static uint16_t
> -dlb_event_enqueue_new_burst_delayed(void *event_port,
> -				    const struct rte_event events[],
> -				    uint16_t num)
> -{
> -	return __dlb_event_enqueue_burst(event_port, events, num, true);
> -}
> -
> -static uint16_t
> -dlb_event_enqueue_forward_burst(void *event_port,
> -				const struct rte_event events[],
> -				uint16_t num)
> -{
> -	return __dlb_event_enqueue_burst(event_port, events, num, false);
> -}
> -
> -static uint16_t
> -dlb_event_enqueue_forward_burst_delayed(void *event_port,
> -					const struct rte_event events[],
> -					uint16_t num)
> -{
> -	return __dlb_event_enqueue_burst(event_port, events, num, true);
> -}
> -
> -static __rte_always_inline int
> -dlb_recv_qe(struct dlb_port *qm_port, struct dlb_dequeue_qe *qe,
> -	    uint8_t *offset)
> -{
> -	uint8_t xor_mask[2][4] = { {0x0F, 0x0E, 0x0C, 0x08},
> -				   {0x00, 0x01, 0x03, 0x07} };
> -	uint8_t and_mask[4] = {0x0F, 0x0E, 0x0C, 0x08};
> -	volatile struct dlb_dequeue_qe *cq_addr;
> -	__m128i *qes = (__m128i *)qe;
> -	uint64_t *cache_line_base;
> -	uint8_t gen_bits;
> -
> -	cq_addr = dlb_port[qm_port->id][PORT_TYPE(qm_port)].cq_base;
> -	cq_addr = &cq_addr[qm_port->cq_idx];
> -
> -	cache_line_base = (void *)(((uintptr_t)cq_addr) & ~0x3F);
> -	*offset = ((uintptr_t)cq_addr & 0x30) >> 4;
> -
> -	/* Load the next CQ cache line from memory. Pack these reads as tight
> -	 * as possible to reduce the chance that DLB invalidates the line while
> -	 * the CPU is reading it. Read the cache line backwards to ensure that
> -	 * if QE[N] (N > 0) is valid, then QEs[0:N-1] are too.
> -	 *
> -	 * (Valid QEs start at &qe[offset])
> -	 */
> -	qes[3] = _mm_load_si128((__m128i *)&cache_line_base[6]);
> -	qes[2] = _mm_load_si128((__m128i *)&cache_line_base[4]);
> -	qes[1] = _mm_load_si128((__m128i *)&cache_line_base[2]);
> -	qes[0] = _mm_load_si128((__m128i *)&cache_line_base[0]);
> -
> -	/* Evict the cache line ASAP */
> -	rte_cldemote(cache_line_base);
> -
> -	/* Extract and combine the gen bits */
> -	gen_bits = ((_mm_extract_epi8(qes[0], 15) & 0x1) << 0) |
> -		   ((_mm_extract_epi8(qes[1], 15) & 0x1) << 1) |
> -		   ((_mm_extract_epi8(qes[2], 15) & 0x1) << 2) |
> -		   ((_mm_extract_epi8(qes[3], 15) & 0x1) << 3);
> -
> -	/* XOR the combined bits such that a 1 represents a valid QE */
> -	gen_bits ^= xor_mask[qm_port->gen_bit][*offset];
> -
> -	/* Mask off gen bits we don't care about */
> -	gen_bits &= and_mask[*offset];
> -
> -	return __builtin_popcount(gen_bits);
> -}
> -
> -static inline void
> -dlb_inc_cq_idx(struct dlb_port *qm_port, int cnt)
> -{
> -	uint16_t idx = qm_port->cq_idx_unmasked + cnt;
> -
> -	qm_port->cq_idx_unmasked = idx;
> -	qm_port->cq_idx = idx & qm_port->cq_depth_mask;
> -	qm_port->gen_bit = (~(idx >> qm_port->gen_bit_shift)) & 0x1;
> -}
> -
> -static inline int
> -dlb_process_dequeue_qes(struct dlb_eventdev_port *ev_port,
> -			struct dlb_port *qm_port,
> -			struct rte_event *events,
> -			struct dlb_dequeue_qe *qes,
> -			int cnt)
> -{
> -	uint8_t *qid_mappings = qm_port->qid_mappings;
> -	int i, num;
> -
> -	RTE_SET_USED(ev_port);  /* avoids unused variable error */
> -
> -	for (i = 0, num = 0; i < cnt; i++) {
> -		struct dlb_dequeue_qe *qe = &qes[i];
> -		int sched_type_map[4] = {
> -			[DLB_SCHED_ATOMIC] = RTE_SCHED_TYPE_ATOMIC,
> -			[DLB_SCHED_UNORDERED] = RTE_SCHED_TYPE_PARALLEL,
> -			[DLB_SCHED_ORDERED] = RTE_SCHED_TYPE_ORDERED,
> -			[DLB_SCHED_DIRECTED] = RTE_SCHED_TYPE_ATOMIC,
> -		};
> -
> -		DLB_LOG_DBG("dequeue success, data = 0x%llx, qid=%d, event_type=%d,
> subevent=%d\npp_id = %d, sched_type = %d, qid = %d, err=%d\n",
> -			    (long long)qe->data, qe->qid,
> -			    qe->u.event_type.major,
> -			    qe->u.event_type.sub,
> -			    qe->pp_id, qe->sched_type, qe->qid, qe->error);
> -
> -		/* Fill in event information.
> -		 * Note that flow_id must be embedded in the data by
> -		 * the app, such as the mbuf RSS hash field if the data
> -		 * buffer is a mbuf.
> -		 */
> -		if (unlikely(qe->error)) {
> -			DLB_LOG_ERR("QE error bit ON\n");
> -			DLB_INC_STAT(ev_port->stats.traffic.rx_drop, 1);
> -			dlb_consume_qe_immediate(qm_port, 1);
> -			continue; /* Ignore */
> -		}
> -
> -		events[num].u64 = qe->data;
> -		events[num].queue_id = qid_mappings[qe->qid];
> -		events[num].priority = DLB_TO_EV_PRIO((uint8_t)qe->priority);
> -		events[num].event_type = qe->u.event_type.major;
> -		events[num].sub_event_type = qe->u.event_type.sub;
> -		events[num].sched_type = sched_type_map[qe->sched_type];
> -		DLB_INC_STAT(ev_port->stats.rx_sched_cnt[qe->sched_type], 1);
> -		num++;
> -	}
> -	DLB_INC_STAT(ev_port->stats.traffic.rx_ok, num);
> -
> -	return num;
> -}
> -
> -static inline int
> -dlb_process_dequeue_four_qes(struct dlb_eventdev_port *ev_port,
> -			     struct dlb_port *qm_port,
> -			     struct rte_event *events,
> -			     struct dlb_dequeue_qe *qes)
> -{
> -	int sched_type_map[] = {
> -		[DLB_SCHED_ATOMIC] = RTE_SCHED_TYPE_ATOMIC,
> -		[DLB_SCHED_UNORDERED] = RTE_SCHED_TYPE_PARALLEL,
> -		[DLB_SCHED_ORDERED] = RTE_SCHED_TYPE_ORDERED,
> -		[DLB_SCHED_DIRECTED] = RTE_SCHED_TYPE_ATOMIC,
> -	};
> -	const int num_events = DLB_NUM_QES_PER_CACHE_LINE;
> -	uint8_t *qid_mappings = qm_port->qid_mappings;
> -	__m128i sse_evt[2];
> -	int i;
> -
> -	/* In the unlikely case that any of the QE error bits are set, process
> -	 * them one at a time.
> -	 */
> -	if (unlikely(qes[0].error || qes[1].error ||
> -		     qes[2].error || qes[3].error))
> -		return dlb_process_dequeue_qes(ev_port, qm_port, events,
> -					       qes, num_events);
> -
> -	for (i = 0; i < DLB_NUM_QES_PER_CACHE_LINE; i++) {
> -		DLB_LOG_DBG("dequeue success, data = 0x%llx, qid=%d, event_type=%d,
> subevent=%d\npp_id = %d, sched_type = %d, qid = %d, err=%d\n",
> -			    (long long)qes[i].data, qes[i].qid,
> -			    qes[i].u.event_type.major,
> -			    qes[i].u.event_type.sub,
> -			    qes[i].pp_id, qes[i].sched_type, qes[i].qid,
> -			    qes[i].error);
> -	}
> -
> -	events[0].u64 = qes[0].data;
> -	events[1].u64 = qes[1].data;
> -	events[2].u64 = qes[2].data;
> -	events[3].u64 = qes[3].data;
> -
> -	/* Construct the metadata portion of two struct rte_events
> -	 * in one 128b SSE register. Event metadata is constructed in the SSE
> -	 * registers like so:
> -	 * sse_evt[0][63:0]:   event[0]'s metadata
> -	 * sse_evt[0][127:64]: event[1]'s metadata
> -	 * sse_evt[1][63:0]:   event[2]'s metadata
> -	 * sse_evt[1][127:64]: event[3]'s metadata
> -	 */
> -	sse_evt[0] = _mm_setzero_si128();
> -	sse_evt[1] = _mm_setzero_si128();
> -
> -	/* Convert the hardware queue ID to an event queue ID and store it in
> -	 * the metadata:
> -	 * sse_evt[0][47:40]   = qid_mappings[qes[0].qid]
> -	 * sse_evt[0][111:104] = qid_mappings[qes[1].qid]
> -	 * sse_evt[1][47:40]   = qid_mappings[qes[2].qid]
> -	 * sse_evt[1][111:104] = qid_mappings[qes[3].qid]
> -	 */
> -#define DLB_EVENT_QUEUE_ID_BYTE 5
> -	sse_evt[0] = _mm_insert_epi8(sse_evt[0],
> -				     qid_mappings[qes[0].qid],
> -				     DLB_EVENT_QUEUE_ID_BYTE);
> -	sse_evt[0] = _mm_insert_epi8(sse_evt[0],
> -				     qid_mappings[qes[1].qid],
> -				     DLB_EVENT_QUEUE_ID_BYTE + 8);
> -	sse_evt[1] = _mm_insert_epi8(sse_evt[1],
> -				     qid_mappings[qes[2].qid],
> -				     DLB_EVENT_QUEUE_ID_BYTE);
> -	sse_evt[1] = _mm_insert_epi8(sse_evt[1],
> -				     qid_mappings[qes[3].qid],
> -				     DLB_EVENT_QUEUE_ID_BYTE + 8);
> -
> -	/* Convert the hardware priority to an event priority and store it in
> -	 * the metadata:
> -	 * sse_evt[0][55:48]   = DLB_TO_EV_PRIO(qes[0].priority)
> -	 * sse_evt[0][119:112] = DLB_TO_EV_PRIO(qes[1].priority)
> -	 * sse_evt[1][55:48]   = DLB_TO_EV_PRIO(qes[2].priority)
> -	 * sse_evt[1][119:112] = DLB_TO_EV_PRIO(qes[3].priority)
> -	 */
> -#define DLB_EVENT_PRIO_BYTE 6
> -	sse_evt[0] = _mm_insert_epi8(sse_evt[0],
> -				     DLB_TO_EV_PRIO((uint8_t)qes[0].priority),
> -				     DLB_EVENT_PRIO_BYTE);
> -	sse_evt[0] = _mm_insert_epi8(sse_evt[0],
> -				     DLB_TO_EV_PRIO((uint8_t)qes[1].priority),
> -				     DLB_EVENT_PRIO_BYTE + 8);
> -	sse_evt[1] = _mm_insert_epi8(sse_evt[1],
> -				     DLB_TO_EV_PRIO((uint8_t)qes[2].priority),
> -				     DLB_EVENT_PRIO_BYTE);
> -	sse_evt[1] = _mm_insert_epi8(sse_evt[1],
> -				     DLB_TO_EV_PRIO((uint8_t)qes[3].priority),
> -				     DLB_EVENT_PRIO_BYTE + 8);
> -
> -	/* Write the event type and sub event type to the event metadata. Leave
> -	 * flow ID unspecified, since the hardware does not maintain it during
> -	 * scheduling:
> -	 * sse_evt[0][31:0]   = qes[0].u.event_type.major << 28 |
> -	 *			qes[0].u.event_type.sub << 20;
> -	 * sse_evt[0][95:64]  = qes[1].u.event_type.major << 28 |
> -	 *			qes[1].u.event_type.sub << 20;
> -	 * sse_evt[1][31:0]   = qes[2].u.event_type.major << 28 |
> -	 *			qes[2].u.event_type.sub << 20;
> -	 * sse_evt[1][95:64]  = qes[3].u.event_type.major << 28 |
> -	 *			qes[3].u.event_type.sub << 20;
> -	 */
> -#define DLB_EVENT_EV_TYPE_DW 0
> -#define DLB_EVENT_EV_TYPE_SHIFT 28
> -#define DLB_EVENT_SUB_EV_TYPE_SHIFT 20
> -	sse_evt[0] = _mm_insert_epi32(sse_evt[0],
> -			qes[0].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
> -			qes[0].u.event_type.sub << DLB_EVENT_SUB_EV_TYPE_SHIFT,
> -			DLB_EVENT_EV_TYPE_DW);
> -	sse_evt[0] = _mm_insert_epi32(sse_evt[0],
> -			qes[1].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
> -			qes[1].u.event_type.sub <<  DLB_EVENT_SUB_EV_TYPE_SHIFT,
> -			DLB_EVENT_EV_TYPE_DW + 2);
> -	sse_evt[1] = _mm_insert_epi32(sse_evt[1],
> -			qes[2].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
> -			qes[2].u.event_type.sub <<  DLB_EVENT_SUB_EV_TYPE_SHIFT,
> -			DLB_EVENT_EV_TYPE_DW);
> -	sse_evt[1] = _mm_insert_epi32(sse_evt[1],
> -			qes[3].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT  |
> -			qes[3].u.event_type.sub << DLB_EVENT_SUB_EV_TYPE_SHIFT,
> -			DLB_EVENT_EV_TYPE_DW + 2);
> -
> -	/* Write the sched type to the event metadata. 'op' and 'rsvd' are not
> -	 * set:
> -	 * sse_evt[0][39:32]  = sched_type_map[qes[0].sched_type] << 6
> -	 * sse_evt[0][103:96] = sched_type_map[qes[1].sched_type] << 6
> -	 * sse_evt[1][39:32]  = sched_type_map[qes[2].sched_type] << 6
> -	 * sse_evt[1][103:96] = sched_type_map[qes[3].sched_type] << 6
> -	 */
> -#define DLB_EVENT_SCHED_TYPE_BYTE 4
> -#define DLB_EVENT_SCHED_TYPE_SHIFT 6
> -	sse_evt[0] = _mm_insert_epi8(sse_evt[0],
> -		sched_type_map[qes[0].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
> -		DLB_EVENT_SCHED_TYPE_BYTE);
> -	sse_evt[0] = _mm_insert_epi8(sse_evt[0],
> -		sched_type_map[qes[1].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
> -		DLB_EVENT_SCHED_TYPE_BYTE + 8);
> -	sse_evt[1] = _mm_insert_epi8(sse_evt[1],
> -		sched_type_map[qes[2].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
> -		DLB_EVENT_SCHED_TYPE_BYTE);
> -	sse_evt[1] = _mm_insert_epi8(sse_evt[1],
> -		sched_type_map[qes[3].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
> -		DLB_EVENT_SCHED_TYPE_BYTE + 8);
> -
> -	/* Store the metadata to the event (use the double-precision
> -	 * _mm_storeh_pd because there is no integer function for storing the
> -	 * upper 64b):
> -	 * events[0].event = sse_evt[0][63:0]
> -	 * events[1].event = sse_evt[0][127:64]
> -	 * events[2].event = sse_evt[1][63:0]
> -	 * events[3].event = sse_evt[1][127:64]
> -	 */
> -	_mm_storel_epi64((__m128i *)&events[0].event, sse_evt[0]);
> -	_mm_storeh_pd((double *)&events[1].event, (__m128d) sse_evt[0]);
> -	_mm_storel_epi64((__m128i *)&events[2].event, sse_evt[1]);
> -	_mm_storeh_pd((double *)&events[3].event, (__m128d) sse_evt[1]);
> -
> -	DLB_INC_STAT(ev_port->stats.rx_sched_cnt[qes[0].sched_type], 1);
> -	DLB_INC_STAT(ev_port->stats.rx_sched_cnt[qes[1].sched_type], 1);
> -	DLB_INC_STAT(ev_port->stats.rx_sched_cnt[qes[2].sched_type], 1);
> -	DLB_INC_STAT(ev_port->stats.rx_sched_cnt[qes[3].sched_type], 1);
> -
> -	DLB_INC_STAT(ev_port->stats.traffic.rx_ok, num_events);
> -
> -	return num_events;
> -}
> -
> -static inline int
> -dlb_dequeue_wait(struct dlb_eventdev *dlb,
> -		 struct dlb_eventdev_port *ev_port,
> -		 struct dlb_port *qm_port,
> -		 uint64_t timeout,
> -		 uint64_t start_ticks)
> -{
> -	struct process_local_port_data *port_data;
> -	uint64_t elapsed_ticks;
> -
> -	port_data = &dlb_port[qm_port->id][PORT_TYPE(qm_port)];
> -
> -	elapsed_ticks = rte_get_timer_cycles() - start_ticks;
> -
> -	/* Wait/poll time expired */
> -	if (elapsed_ticks >= timeout) {
> -		/* Interrupts not supported by PF PMD */
> -		return 1;
> -	} else if (dlb->umwait_allowed) {
> -		struct rte_power_monitor_cond pmc;
> -		volatile struct dlb_dequeue_qe *cq_base;
> -		union {
> -			uint64_t raw_qe[2];
> -			struct dlb_dequeue_qe qe;
> -		} qe_mask;
> -		uint64_t expected_value;
> -		volatile uint64_t *monitor_addr;
> -
> -		qe_mask.qe.cq_gen = 1; /* set mask */
> -
> -		cq_base = port_data->cq_base;
> -		monitor_addr = (volatile uint64_t *)(volatile void *)
> -			&cq_base[qm_port->cq_idx];
> -		monitor_addr++; /* cq_gen bit is in second 64bit location */
> -
> -		if (qm_port->gen_bit)
> -			expected_value = qe_mask.raw_qe[1];
> -		else
> -			expected_value = 0;
> -
> -		pmc.addr = monitor_addr;
> -		pmc.val = expected_value;
> -		pmc.mask = qe_mask.raw_qe[1];
> -		pmc.size = sizeof(uint64_t);
> -
> -		rte_power_monitor(&pmc, timeout + start_ticks);
> -
> -		DLB_INC_STAT(ev_port->stats.traffic.rx_umonitor_umwait, 1);
> -	} else {
> -		uint64_t poll_interval = RTE_LIBRTE_PMD_DLB_POLL_INTERVAL;
> -		uint64_t curr_ticks = rte_get_timer_cycles();
> -		uint64_t init_ticks = curr_ticks;
> -
> -		while ((curr_ticks - start_ticks < timeout) &&
> -		       (curr_ticks - init_ticks < poll_interval))
> -			curr_ticks = rte_get_timer_cycles();
> -	}
> -
> -	return 0;
> -}
> -
> -static inline int16_t
> -dlb_hw_dequeue(struct dlb_eventdev *dlb,
> -	       struct dlb_eventdev_port *ev_port,
> -	       struct rte_event *events,
> -	       uint16_t max_num,
> -	       uint64_t dequeue_timeout_ticks)
> -{
> -	uint64_t timeout;
> -	uint64_t start_ticks = 0ULL;
> -	struct dlb_port *qm_port;
> -	int num = 0;
> -
> -	qm_port = &ev_port->qm_port;
> -
> -	/* If configured for per dequeue wait, then use wait value provided
> -	 * to this API. Otherwise we must use the global
> -	 * value from eventdev config time.
> -	 */
> -	if (!dlb->global_dequeue_wait)
> -		timeout = dequeue_timeout_ticks;
> -	else
> -		timeout = dlb->global_dequeue_wait_ticks;
> -
> -	if (timeout)
> -		start_ticks = rte_get_timer_cycles();
> -
> -	while (num < max_num) {
> -		struct dlb_dequeue_qe qes[DLB_NUM_QES_PER_CACHE_LINE];
> -		uint8_t offset;
> -		int num_avail;
> -
> -		/* Copy up to 4 QEs from the current cache line into qes */
> -		num_avail = dlb_recv_qe(qm_port, qes, &offset);
> -
> -		/* But don't process more than the user requested */
> -		num_avail = RTE_MIN(num_avail, max_num - num);
> -
> -		dlb_inc_cq_idx(qm_port, num_avail);
> -
> -		if (num_avail == DLB_NUM_QES_PER_CACHE_LINE)
> -			num += dlb_process_dequeue_four_qes(ev_port,
> -							     qm_port,
> -							     &events[num],
> -							     &qes[offset]);
> -		else if (num_avail)
> -			num += dlb_process_dequeue_qes(ev_port,
> -							qm_port,
> -							&events[num],
> -							&qes[offset],
> -							num_avail);
> -		else if ((timeout == 0) || (num > 0))
> -			/* Not waiting in any form, or 1+ events received? */
> -			break;
> -		else if (dlb_dequeue_wait(dlb, ev_port, qm_port,
> -					  timeout, start_ticks))
> -			break;
> -	}
> -
> -	qm_port->owed_tokens += num;
> -
> -	if (num && qm_port->token_pop_mode == AUTO_POP)
> -		dlb_consume_qe_immediate(qm_port, num);
> -
> -	ev_port->outstanding_releases += num;
> -
> -	return num;
> -}
> -
> -static __rte_always_inline int
> -dlb_recv_qe_sparse(struct dlb_port *qm_port, struct dlb_dequeue_qe *qe)
> -{
> -	volatile struct dlb_dequeue_qe *cq_addr;
> -	uint8_t xor_mask[2] = {0x0F, 0x00};
> -	const uint8_t and_mask = 0x0F;
> -	__m128i *qes = (__m128i *)qe;
> -	uint8_t gen_bits, gen_bit;
> -	uintptr_t addr[4];
> -	uint16_t idx;
> -
> -	cq_addr = dlb_port[qm_port->id][PORT_TYPE(qm_port)].cq_base;
> -
> -	idx = qm_port->cq_idx;
> -
> -	/* Load the next 4 QEs */
> -	addr[0] = (uintptr_t)&cq_addr[idx];
> -	addr[1] = (uintptr_t)&cq_addr[(idx +  4) & qm_port->cq_depth_mask];
> -	addr[2] = (uintptr_t)&cq_addr[(idx +  8) & qm_port->cq_depth_mask];
> -	addr[3] = (uintptr_t)&cq_addr[(idx + 12) & qm_port->cq_depth_mask];
> -
> -	/* Prefetch next batch of QEs (all CQs occupy minimum 8 cache lines) */
> -	rte_prefetch0(&cq_addr[(idx + 16) & qm_port->cq_depth_mask]);
> -	rte_prefetch0(&cq_addr[(idx + 20) & qm_port->cq_depth_mask]);
> -	rte_prefetch0(&cq_addr[(idx + 24) & qm_port->cq_depth_mask]);
> -	rte_prefetch0(&cq_addr[(idx + 28) & qm_port->cq_depth_mask]);
> -
> -	/* Correct the xor_mask for wrap-around QEs */
> -	gen_bit = qm_port->gen_bit;
> -	xor_mask[gen_bit] ^= !!((idx +  4) > qm_port->cq_depth_mask) << 1;
> -	xor_mask[gen_bit] ^= !!((idx +  8) > qm_port->cq_depth_mask) << 2;
> -	xor_mask[gen_bit] ^= !!((idx + 12) > qm_port->cq_depth_mask) << 3;
> -
> -	/* Read the cache lines backwards to ensure that if QE[N] (N > 0) is
> -	 * valid, then QEs[0:N-1] are too.
> -	 */
> -	qes[3] = _mm_load_si128((__m128i *)(void *)addr[3]);
> -	rte_compiler_barrier();
> -	qes[2] = _mm_load_si128((__m128i *)(void *)addr[2]);
> -	rte_compiler_barrier();
> -	qes[1] = _mm_load_si128((__m128i *)(void *)addr[1]);
> -	rte_compiler_barrier();
> -	qes[0] = _mm_load_si128((__m128i *)(void *)addr[0]);
> -
> -	/* Extract and combine the gen bits */
> -	gen_bits = ((_mm_extract_epi8(qes[0], 15) & 0x1) << 0) |
> -		   ((_mm_extract_epi8(qes[1], 15) & 0x1) << 1) |
> -		   ((_mm_extract_epi8(qes[2], 15) & 0x1) << 2) |
> -		   ((_mm_extract_epi8(qes[3], 15) & 0x1) << 3);
> -
> -	/* XOR the combined bits such that a 1 represents a valid QE */
> -	gen_bits ^= xor_mask[gen_bit];
> -
> -	/* Mask off gen bits we don't care about */
> -	gen_bits &= and_mask;
> -
> -	return __builtin_popcount(gen_bits);
> -}
> -
> -static inline int16_t
> -dlb_hw_dequeue_sparse(struct dlb_eventdev *dlb,
> -		      struct dlb_eventdev_port *ev_port,
> -		      struct rte_event *events,
> -		      uint16_t max_num,
> -		      uint64_t dequeue_timeout_ticks)
> -{
> -	uint64_t timeout;
> -	uint64_t start_ticks = 0ULL;
> -	struct dlb_port *qm_port;
> -	int num = 0;
> -
> -	qm_port = &ev_port->qm_port;
> -
> -	/* If configured for per dequeue wait, then use wait value provided
> -	 * to this API. Otherwise we must use the global
> -	 * value from eventdev config time.
> -	 */
> -	if (!dlb->global_dequeue_wait)
> -		timeout = dequeue_timeout_ticks;
> -	else
> -		timeout = dlb->global_dequeue_wait_ticks;
> -
> -	if (timeout)
> -		start_ticks = rte_get_timer_cycles();
> -
> -	while (num < max_num) {
> -		struct dlb_dequeue_qe qes[DLB_NUM_QES_PER_CACHE_LINE];
> -		int num_avail;
> -
> -		/* Copy up to 4 QEs from the current cache line into qes */
> -		num_avail = dlb_recv_qe_sparse(qm_port, qes);
> -
> -		/* But don't process more than the user requested */
> -		num_avail = RTE_MIN(num_avail, max_num - num);
> -
> -		dlb_inc_cq_idx(qm_port, num_avail << 2);
> -
> -		if (num_avail == DLB_NUM_QES_PER_CACHE_LINE)
> -			num += dlb_process_dequeue_four_qes(ev_port,
> -							     qm_port,
> -							     &events[num],
> -							     &qes[0]);
> -		else if (num_avail)
> -			num += dlb_process_dequeue_qes(ev_port,
> -							qm_port,
> -							&events[num],
> -							&qes[0],
> -							num_avail);
> -		else if ((timeout == 0) || (num > 0))
> -			/* Not waiting in any form, or 1+ events received? */
> -			break;
> -		else if (dlb_dequeue_wait(dlb, ev_port, qm_port,
> -					  timeout, start_ticks))
> -			break;
> -	}
> -
> -	qm_port->owed_tokens += num;
> -
> -	if (num && qm_port->token_pop_mode == AUTO_POP)
> -		dlb_consume_qe_immediate(qm_port, num);
> -
> -	ev_port->outstanding_releases += num;
> -
> -	return num;
> -}
> -
> -static int
> -dlb_event_release(struct dlb_eventdev *dlb, uint8_t port_id, int n)
> -{
> -	struct process_local_port_data *port_data;
> -	struct dlb_eventdev_port *ev_port;
> -	struct dlb_port *qm_port;
> -	int i;
> -
> -	if (port_id > dlb->num_ports) {
> -		DLB_LOG_ERR("Invalid port id %d in dlb-event_release\n",
> -			    port_id);
> -		rte_errno = -EINVAL;
> -		return rte_errno;
> -	}
> -
> -	ev_port = &dlb->ev_ports[port_id];
> -	qm_port = &ev_port->qm_port;
> -	port_data = &dlb_port[qm_port->id][PORT_TYPE(qm_port)];
> -
> -	i = 0;
> -
> -	if (qm_port->is_directed) {
> -		i = n;
> -		goto sw_credit_update;
> -	}
> -
> -	while (i < n) {
> -		int pop_offs = 0;
> -		int j = 0;
> -
> -		/* Zero-out QEs */
> -		qm_port->qe4[0].cmd_byte = 0;
> -		qm_port->qe4[1].cmd_byte = 0;
> -		qm_port->qe4[2].cmd_byte = 0;
> -		qm_port->qe4[3].cmd_byte = 0;
> -
> -		for (; j < DLB_NUM_QES_PER_CACHE_LINE && (i + j) < n; j++) {
> -			int16_t thresh = qm_port->token_pop_thresh;
> -
> -			if (qm_port->token_pop_mode == DELAYED_POP &&
> -			    qm_port->issued_releases >= thresh - 1) {
> -				/* Insert the token pop QE */
> -				dlb_construct_token_pop_qe(qm_port, j);
> -
> -				/* Reset the releases for the next QE batch */
> -				qm_port->issued_releases -= thresh;
> -
> -				/* When using delayed token pop mode, the
> -				 * initial token threshold is the full CQ
> -				 * depth. After the first token pop, we need to
> -				 * reset it to the dequeue_depth.
> -				 */
> -				qm_port->token_pop_thresh =
> -					qm_port->dequeue_depth;
> -
> -				pop_offs = 1;
> -				j++;
> -				break;
> -			}
> -
> -			qm_port->qe4[j].cmd_byte = DLB_COMP_CMD_BYTE;
> -			qm_port->issued_releases++;
> -		}
> -
> -		dlb_hw_do_enqueue(qm_port, i == 0, port_data);
> -
> -		/* Don't include the token pop QE in the release count */
> -		i += j - pop_offs;
> -	}
> -
> -sw_credit_update:
> -	/* each release returns one credit */
> -	if (!ev_port->outstanding_releases) {
> -		DLB_LOG_ERR("Unrecoverable application error. Outstanding releases
> underflowed.\n");
> -		rte_errno = -ENOTRECOVERABLE;
> -		return rte_errno;
> -	}
> -
> -	ev_port->outstanding_releases -= i;
> -	ev_port->inflight_credits += i;
> -
> -	/* Replenish s/w credits if enough releases are performed */
> -	dlb_replenish_sw_credits(dlb, ev_port);
> -	return 0;
> -}
> -
> -static uint16_t
> -dlb_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t
> num,
> -			uint64_t wait)
> -{
> -	struct dlb_eventdev_port *ev_port = event_port;
> -	struct dlb_port *qm_port = &ev_port->qm_port;
> -	struct dlb_eventdev *dlb = ev_port->dlb;
> -	uint16_t cnt;
> -	int ret;
> -
> -	rte_errno = 0;
> -
> -	RTE_ASSERT(ev_port->setup_done);
> -	RTE_ASSERT(ev != NULL);
> -
> -	if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
> -		uint16_t out_rels = ev_port->outstanding_releases;
> -
> -		ret = dlb_event_release(dlb, ev_port->id, out_rels);
> -		if (ret)
> -			return(ret);
> -
> -		DLB_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
> -	}
> -
> -	if (qm_port->token_pop_mode == DEFERRED_POP &&
> -			qm_port->owed_tokens)
> -		dlb_consume_qe_immediate(qm_port, qm_port->owed_tokens);
> -
> -	cnt = dlb_hw_dequeue(dlb, ev_port, ev, num, wait);
> -
> -	DLB_INC_STAT(ev_port->stats.traffic.total_polls, 1);
> -	DLB_INC_STAT(ev_port->stats.traffic.zero_polls, ((cnt == 0) ? 1 : 0));
> -	return cnt;
> -}
> -
> -static uint16_t
> -dlb_event_dequeue(void *event_port, struct rte_event *ev, uint64_t wait)
> -{
> -	return dlb_event_dequeue_burst(event_port, ev, 1, wait);
> -}
> -
> -static uint16_t
> -dlb_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
> -			       uint16_t num, uint64_t wait)
> -{
> -	struct dlb_eventdev_port *ev_port = event_port;
> -	struct dlb_port *qm_port = &ev_port->qm_port;
> -	struct dlb_eventdev *dlb = ev_port->dlb;
> -	uint16_t cnt;
> -	int ret;
> -
> -	rte_errno = 0;
> -
> -	RTE_ASSERT(ev_port->setup_done);
> -	RTE_ASSERT(ev != NULL);
> -
> -	if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
> -		uint16_t out_rels = ev_port->outstanding_releases;
> -
> -		ret = dlb_event_release(dlb, ev_port->id, out_rels);
> -		if (ret)
> -			return(ret);
> -
> -		DLB_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
> -	}
> -
> -	if (qm_port->token_pop_mode == DEFERRED_POP &&
> -	    qm_port->owed_tokens)
> -		dlb_consume_qe_immediate(qm_port, qm_port->owed_tokens);
> -
> -	cnt = dlb_hw_dequeue_sparse(dlb, ev_port, ev, num, wait);
> -
> -	DLB_INC_STAT(ev_port->stats.traffic.total_polls, 1);
> -	DLB_INC_STAT(ev_port->stats.traffic.zero_polls, ((cnt == 0) ? 1 : 0));
> -	return cnt;
> -}
> -
> -static uint16_t
> -dlb_event_dequeue_sparse(void *event_port, struct rte_event *ev, uint64_t
> wait)
> -{
> -	return dlb_event_dequeue_burst_sparse(event_port, ev, 1, wait);
> -}
> -
> -static uint32_t
> -dlb_get_ldb_queue_depth(struct dlb_eventdev *dlb,
> -			struct dlb_eventdev_queue *queue)
> -{
> -	struct dlb_hw_dev *handle = &dlb->qm_instance;
> -	struct dlb_get_ldb_queue_depth_args cfg;
> -	struct dlb_cmd_response response = {0};
> -	int ret;
> -
> -	cfg.queue_id = queue->qm_queue.id;
> -	cfg.response = (uintptr_t)&response;
> -
> -	ret = dlb_iface_get_ldb_queue_depth(handle, &cfg);
> -	if (ret < 0) {
> -		DLB_LOG_ERR("dlb: get_ldb_queue_depth ret=%d (driver status: %s)\n",
> -			    ret, dlb_error_strings[response.status]);
> -		return ret;
> -	}
> -
> -	return response.id;
> -}
> -
> -static uint32_t
> -dlb_get_dir_queue_depth(struct dlb_eventdev *dlb,
> -			struct dlb_eventdev_queue *queue)
> -{
> -	struct dlb_hw_dev *handle = &dlb->qm_instance;
> -	struct dlb_get_dir_queue_depth_args cfg;
> -	struct dlb_cmd_response response = {0};
> -	int ret;
> -
> -	cfg.queue_id = queue->qm_queue.id;
> -	cfg.response = (uintptr_t)&response;
> -
> -	ret = dlb_iface_get_dir_queue_depth(handle, &cfg);
> -	if (ret < 0) {
> -		DLB_LOG_ERR("dlb: get_dir_queue_depth ret=%d (driver status: %s)\n",
> -			    ret, dlb_error_strings[response.status]);
> -		return ret;
> -	}
> -
> -	return response.id;
> -}
> -
> -uint32_t
> -dlb_get_queue_depth(struct dlb_eventdev *dlb,
> -		    struct dlb_eventdev_queue *queue)
> -{
> -	if (queue->qm_queue.is_directed)
> -		return dlb_get_dir_queue_depth(dlb, queue);
> -	else
> -		return dlb_get_ldb_queue_depth(dlb, queue);
> -}
> -
> -static bool
> -dlb_queue_is_empty(struct dlb_eventdev *dlb,
> -		   struct dlb_eventdev_queue *queue)
> -{
> -	return dlb_get_queue_depth(dlb, queue) == 0;
> -}
> -
> -static bool
> -dlb_linked_queues_empty(struct dlb_eventdev *dlb)
> -{
> -	int i;
> -
> -	for (i = 0; i < dlb->num_queues; i++) {
> -		if (dlb->ev_queues[i].num_links == 0)
> -			continue;
> -		if (!dlb_queue_is_empty(dlb, &dlb->ev_queues[i]))
> -			return false;
> -	}
> -
> -	return true;
> -}
> -
> -static bool
> -dlb_queues_empty(struct dlb_eventdev *dlb)
> -{
> -	int i;
> -
> -	for (i = 0; i < dlb->num_queues; i++) {
> -		if (!dlb_queue_is_empty(dlb, &dlb->ev_queues[i]))
> -			return false;
> -	}
> -
> -	return true;
> -}
> -
> -static void
> -dlb_flush_port(struct rte_eventdev *dev, int port_id)
> -{
> -	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
> -	eventdev_stop_flush_t flush;
> -	struct rte_event ev;
> -	uint8_t dev_id;
> -	void *arg;
> -	int i;
> -
> -	flush = dev->dev_ops->dev_stop_flush;
> -	dev_id = dev->data->dev_id;
> -	arg = dev->data->dev_stop_flush_arg;
> -
> -	while (rte_event_dequeue_burst(dev_id, port_id, &ev, 1, 0)) {
> -		if (flush)
> -			flush(dev_id, ev, arg);
> -
> -		if (dlb->ev_ports[port_id].qm_port.is_directed)
> -			continue;
> -
> -		ev.op = RTE_EVENT_OP_RELEASE;
> -
> -		rte_event_enqueue_burst(dev_id, port_id, &ev, 1);
> -	}
> -
> -	/* Enqueue any additional outstanding releases */
> -	ev.op = RTE_EVENT_OP_RELEASE;
> -
> -	for (i = dlb->ev_ports[port_id].outstanding_releases; i > 0; i--)
> -		rte_event_enqueue_burst(dev_id, port_id, &ev, 1);
> -}
> -
> -static void
> -dlb_drain(struct rte_eventdev *dev)
> -{
> -	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
> -	struct dlb_eventdev_port *ev_port = NULL;
> -	uint8_t dev_id;
> -	int i;
> -
> -	dev_id = dev->data->dev_id;
> -
> -	while (!dlb_linked_queues_empty(dlb)) {
> -		/* Flush all the ev_ports, which will drain all their connected
> -		 * queues.
> -		 */
> -		for (i = 0; i < dlb->num_ports; i++)
> -			dlb_flush_port(dev, i);
> -	}
> -
> -	/* The queues are empty, but there may be events left in the ports. */
> -	for (i = 0; i < dlb->num_ports; i++)
> -		dlb_flush_port(dev, i);
> -
> -	/* If the domain's queues are empty, we're done. */
> -	if (dlb_queues_empty(dlb))
> -		return;
> -
> -	/* Else, there must be at least one unlinked load-balanced queue.
> -	 * Select a load-balanced port with which to drain the unlinked
> -	 * queue(s).
> -	 */
> -	for (i = 0; i < dlb->num_ports; i++) {
> -		ev_port = &dlb->ev_ports[i];
> -
> -		if (!ev_port->qm_port.is_directed)
> -			break;
> -	}
> -
> -	if (i == dlb->num_ports) {
> -		DLB_LOG_ERR("internal error: no LDB ev_ports\n");
> -		return;
> -	}
> -
> -	rte_errno = 0;
> -	rte_event_port_unlink(dev_id, ev_port->id, NULL, 0);
> -
> -	if (rte_errno) {
> -		DLB_LOG_ERR("internal error: failed to unlink ev_port %d\n",
> -			    ev_port->id);
> -		return;
> -	}
> -
> -	for (i = 0; i < dlb->num_queues; i++) {
> -		uint8_t qid, prio;
> -		int ret;
> -
> -		if (dlb_queue_is_empty(dlb, &dlb->ev_queues[i]))
> -			continue;
> -
> -		qid = i;
> -		prio = 0;
> -
> -		/* Link the ev_port to the queue */
> -		ret = rte_event_port_link(dev_id, ev_port->id, &qid, &prio, 1);
> -		if (ret != 1) {
> -			DLB_LOG_ERR("internal error: failed to link ev_port %d to queue %d\n",
> -				    ev_port->id, qid);
> -			return;
> -		}
> -
> -		/* Flush the queue */
> -		while (!dlb_queue_is_empty(dlb, &dlb->ev_queues[i]))
> -			dlb_flush_port(dev, ev_port->id);
> -
> -		/* Drain any extant events in the ev_port. */
> -		dlb_flush_port(dev, ev_port->id);
> -
> -		/* Unlink the ev_port from the queue */
> -		ret = rte_event_port_unlink(dev_id, ev_port->id, &qid, 1);
> -		if (ret != 1) {
> -			DLB_LOG_ERR("internal error: failed to unlink ev_port %d to queue
> %d\n",
> -				    ev_port->id, qid);
> -			return;
> -		}
> -	}
> -}
> -
> -static void
> -dlb_eventdev_stop(struct rte_eventdev *dev)
> -{
> -	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
> -
> -	rte_spinlock_lock(&dlb->qm_instance.resource_lock);
> -
> -	if (dlb->run_state == DLB_RUN_STATE_STOPPED) {
> -		DLB_LOG_DBG("Internal error: already stopped\n");
> -		rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
> -		return;
> -	} else if (dlb->run_state != DLB_RUN_STATE_STARTED) {
> -		DLB_LOG_ERR("Internal error: bad state %d for dev_stop\n",
> -			    (int)dlb->run_state);
> -		rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
> -		return;
> -	}
> -
> -	dlb->run_state = DLB_RUN_STATE_STOPPING;
> -
> -	rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
> -
> -	dlb_drain(dev);
> -
> -	dlb->run_state = DLB_RUN_STATE_STOPPED;
> -}
> -
> -static int
> -dlb_eventdev_close(struct rte_eventdev *dev)
> -{
> -	dlb_hw_reset_sched_domain(dev, false);
> -
> -	return 0;
> -}
> -
> -static void
> -dlb_eventdev_port_release(void *port)
> -{
> -	struct dlb_eventdev_port *ev_port = port;
> -
> -	if (ev_port) {
> -		struct dlb_port *qm_port = &ev_port->qm_port;
> -
> -		if (qm_port->config_state == DLB_CONFIGURED)
> -			dlb_free_qe_mem(qm_port);
> -	}
> -}
> -
> -static void
> -dlb_eventdev_queue_release(struct rte_eventdev *dev, uint8_t id)
> -{
> -	RTE_SET_USED(dev);
> -	RTE_SET_USED(id);
> -
> -	/* This function intentionally left blank. */
> -}
> -
> -static int
> -dlb_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
> -			   uint64_t *timeout_ticks)
> -{
> -	RTE_SET_USED(dev);
> -	uint64_t cycles_per_ns = rte_get_timer_hz() / 1E9;
> -
> -	*timeout_ticks = ns * cycles_per_ns;
> -
> -	return 0;
> -}
> -
> -void
> -dlb_entry_points_init(struct rte_eventdev *dev)
> -{
> -	struct dlb_eventdev *dlb;
> -
> -	static struct rte_eventdev_ops dlb_eventdev_entry_ops = {
> -		.dev_infos_get    = dlb_eventdev_info_get,
> -		.dev_configure    = dlb_eventdev_configure,
> -		.dev_start        = dlb_eventdev_start,
> -		.dev_stop         = dlb_eventdev_stop,
> -		.dev_close        = dlb_eventdev_close,
> -		.queue_def_conf   = dlb_eventdev_queue_default_conf_get,
> -		.port_def_conf    = dlb_eventdev_port_default_conf_get,
> -		.queue_setup      = dlb_eventdev_queue_setup,
> -		.queue_release    = dlb_eventdev_queue_release,
> -		.port_setup       = dlb_eventdev_port_setup,
> -		.port_release     = dlb_eventdev_port_release,
> -		.port_link        = dlb_eventdev_port_link,
> -		.port_unlink      = dlb_eventdev_port_unlink,
> -		.port_unlinks_in_progress =
> -				    dlb_eventdev_port_unlinks_in_progress,
> -		.timeout_ticks    = dlb_eventdev_timeout_ticks,
> -		.dump             = dlb_eventdev_dump,
> -		.xstats_get       = dlb_eventdev_xstats_get,
> -		.xstats_get_names = dlb_eventdev_xstats_get_names,
> -		.xstats_get_by_name = dlb_eventdev_xstats_get_by_name,
> -		.xstats_reset	    = dlb_eventdev_xstats_reset,
> -		.dev_selftest     = test_dlb_eventdev,
> -	};
> -
> -	/* Expose PMD's eventdev interface */
> -	dev->dev_ops = &dlb_eventdev_entry_ops;
> -
> -	dev->enqueue = dlb_event_enqueue;
> -	dev->enqueue_burst = dlb_event_enqueue_burst;
> -	dev->enqueue_new_burst = dlb_event_enqueue_new_burst;
> -	dev->enqueue_forward_burst = dlb_event_enqueue_forward_burst;
> -	dev->dequeue = dlb_event_dequeue;
> -	dev->dequeue_burst = dlb_event_dequeue_burst;
> -
> -	dlb = dev->data->dev_private;
> -
> -	if (dlb->poll_mode == DLB_CQ_POLL_MODE_SPARSE) {
> -		dev->dequeue = dlb_event_dequeue_sparse;
> -		dev->dequeue_burst = dlb_event_dequeue_burst_sparse;
> -	}
> -}
> -
> -int
> -dlb_primary_eventdev_probe(struct rte_eventdev *dev,
> -			   const char *name,
> -			   struct dlb_devargs *dlb_args)
> -{
> -	struct dlb_eventdev *dlb;
> -	int err, i;
> -
> -	dlb = dev->data->dev_private;
> -
> -	dlb->event_dev = dev; /* backlink */
> -
> -	evdev_dlb_default_info.driver_name = name;
> -
> -	dlb->max_num_events_override = dlb_args->max_num_events;
> -	dlb->num_dir_credits_override = dlb_args->num_dir_credits_override;
> -	dlb->defer_sched = dlb_args->defer_sched;
> -	dlb->num_atm_inflights_per_queue = dlb_args->num_atm_inflights;
> -
> -	/* Open the interface.
> -	 * For vdev mode, this means open the dlb kernel module.
> -	 */
> -	err = dlb_iface_open(&dlb->qm_instance, name);
> -	if (err < 0) {
> -		DLB_LOG_ERR("could not open event hardware device, err=%d\n",
> -			    err);
> -		return err;
> -	}
> -
> -	err = dlb_iface_get_device_version(&dlb->qm_instance, &dlb->revision);
> -	if (err < 0) {
> -		DLB_LOG_ERR("dlb: failed to get the device version, err=%d\n",
> -			    err);
> -		return err;
> -	}
> -
> -	err = dlb_hw_query_resources(dlb);
> -	if (err) {
> -		DLB_LOG_ERR("get resources err=%d for %s\n", err, name);
> -		return err;
> -	}
> -
> -	err = dlb_iface_get_cq_poll_mode(&dlb->qm_instance, &dlb->poll_mode);
> -	if (err < 0) {
> -		DLB_LOG_ERR("dlb: failed to get the poll mode, err=%d\n", err);
> -		return err;
> -	}
> -
> -	/* Complete xtstats runtime initialization */
> -	err = dlb_xstats_init(dlb);
> -	if (err) {
> -		DLB_LOG_ERR("dlb: failed to init xstats, err=%d\n", err);
> -		return err;
> -	}
> -
> -	/* Initialize each port's token pop mode */
> -	for (i = 0; i < DLB_MAX_NUM_PORTS; i++)
> -		dlb->ev_ports[i].qm_port.token_pop_mode = AUTO_POP;
> -
> -	rte_spinlock_init(&dlb->qm_instance.resource_lock);
> -
> -	dlb_iface_low_level_io_init(dlb);
> -
> -	dlb_entry_points_init(dev);
> -
> -	return 0;
> -}
> -
> -int
> -dlb_secondary_eventdev_probe(struct rte_eventdev *dev,
> -			     const char *name)
> -{
> -	struct dlb_eventdev *dlb;
> -	int err;
> -
> -	dlb = dev->data->dev_private;
> -
> -	evdev_dlb_default_info.driver_name = name;
> -
> -	err = dlb_iface_open(&dlb->qm_instance, name);
> -	if (err < 0) {
> -		DLB_LOG_ERR("could not open event hardware device, err=%d\n",
> -			    err);
> -		return err;
> -	}
> -
> -	err = dlb_hw_query_resources(dlb);
> -	if (err) {
> -		DLB_LOG_ERR("get resources err=%d for %s\n", err, name);
> -		return err;
> -	}
> -
> -	dlb_iface_low_level_io_init(dlb);
> -
> -	dlb_entry_points_init(dev);
> -
> -	return 0;
> -}
> -
> -int
> -dlb_parse_params(const char *params,
> -		 const char *name,
> -		 struct dlb_devargs *dlb_args)
> -{
> -	int ret = 0;
> -	static const char * const args[] = { NUMA_NODE_ARG,
> -					     DLB_MAX_NUM_EVENTS,
> -					     DLB_NUM_DIR_CREDITS,
> -					     DEV_ID_ARG,
> -					     DLB_DEFER_SCHED_ARG,
> -					     DLB_NUM_ATM_INFLIGHTS_ARG,
> -					     NULL };
> -
> -	if (params && params[0] != '\0') {
> -		struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
> -
> -		if (kvlist == NULL) {
> -			DLB_LOG_INFO("Ignoring unsupported parameters when creating device
> '%s'\n",
> -				     name);
> -		} else {
> -			int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
> -						     set_numa_node,
> -						     &dlb_args->socket_id);
> -			if (ret != 0) {
> -				DLB_LOG_ERR("%s: Error parsing numa node parameter",
> -					    name);
> -				rte_kvargs_free(kvlist);
> -				return ret;
> -			}
> -
> -			ret = rte_kvargs_process(kvlist, DLB_MAX_NUM_EVENTS,
> -						 set_max_num_events,
> -						 &dlb_args->max_num_events);
> -			if (ret != 0) {
> -				DLB_LOG_ERR("%s: Error parsing max_num_events parameter",
> -					    name);
> -				rte_kvargs_free(kvlist);
> -				return ret;
> -			}
> -
> -			ret = rte_kvargs_process(kvlist,
> -					DLB_NUM_DIR_CREDITS,
> -					set_num_dir_credits,
> -					&dlb_args->num_dir_credits_override);
> -			if (ret != 0) {
> -				DLB_LOG_ERR("%s: Error parsing num_dir_credits parameter",
> -					    name);
> -				rte_kvargs_free(kvlist);
> -				return ret;
> -			}
> -
> -			ret = rte_kvargs_process(kvlist, DEV_ID_ARG,
> -						 set_dev_id,
> -						 &dlb_args->dev_id);
> -			if (ret != 0) {
> -				DLB_LOG_ERR("%s: Error parsing dev_id parameter",
> -					    name);
> -				rte_kvargs_free(kvlist);
> -				return ret;
> -			}
> -
> -			ret = rte_kvargs_process(kvlist, DLB_DEFER_SCHED_ARG,
> -						 set_defer_sched,
> -						 &dlb_args->defer_sched);
> -			if (ret != 0) {
> -				DLB_LOG_ERR("%s: Error parsing defer_sched parameter",
> -					    name);
> -				rte_kvargs_free(kvlist);
> -				return ret;
> -			}
> -
> -			ret = rte_kvargs_process(kvlist,
> -						 DLB_NUM_ATM_INFLIGHTS_ARG,
> -						 set_num_atm_inflights,
> -						 &dlb_args->num_atm_inflights);
> -			if (ret != 0) {
> -				DLB_LOG_ERR("%s: Error parsing atm_inflights parameter",
> -					    name);
> -				rte_kvargs_free(kvlist);
> -				return ret;
> -			}
> -
> -			rte_kvargs_free(kvlist);
> -		}
> -	}
> -	return ret;
> -}
> -RTE_LOG_REGISTER(eventdev_dlb_log_level, pmd.event.dlb, NOTICE);
> diff --git a/drivers/event/dlb/dlb_iface.c b/drivers/event/dlb/dlb_iface.c
> deleted file mode 100644
> index 44f958f5d..000000000
> --- a/drivers/event/dlb/dlb_iface.c
> +++ /dev/null
> @@ -1,79 +0,0 @@
> -/* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2016-2020 Intel Corporation
> - */
> -
> -#include <stdint.h>
> -
> -#include "dlb_priv.h"
> -
> -/* DLB PMD Internal interface function pointers.
> - * If VDEV (bifurcated PMD),  these will resolve to functions that issue
> ioctls
> - * serviced by DLB kernel module.
> - * If PCI (PF PMD),  these will be implemented locally in user mode.
> - */
> -
> -void (*dlb_iface_low_level_io_init)(struct dlb_eventdev *dlb);
> -
> -int (*dlb_iface_open)(struct dlb_hw_dev *handle, const char *name);
> -
> -void (*dlb_iface_domain_close)(struct dlb_eventdev *dlb);
> -
> -int (*dlb_iface_get_device_version)(struct dlb_hw_dev *handle,
> -				    uint8_t *revision);
> -
> -int (*dlb_iface_get_num_resources)(struct dlb_hw_dev *handle,
> -				   struct dlb_get_num_resources_args *rsrcs);
> -
> -int (*dlb_iface_sched_domain_create)(struct dlb_hw_dev *handle,
> -				     struct dlb_create_sched_domain_args *args);
> -
> -int (*dlb_iface_ldb_credit_pool_create)(struct dlb_hw_dev *handle,
> -					struct dlb_create_ldb_pool_args *cfg);
> -
> -int (*dlb_iface_dir_credit_pool_create)(struct dlb_hw_dev *handle,
> -					struct dlb_create_dir_pool_args *cfg);
> -
> -int (*dlb_iface_dir_queue_create)(struct dlb_hw_dev *handle,
> -				  struct dlb_create_dir_queue_args *cfg);
> -
> -int (*dlb_iface_ldb_queue_create)(struct dlb_hw_dev *handle,
> -				  struct dlb_create_ldb_queue_args *cfg);
> -
> -int (*dlb_iface_ldb_port_create)(struct dlb_hw_dev *handle,
> -				 struct dlb_create_ldb_port_args *cfg,
> -				 enum dlb_cq_poll_modes poll_mode);
> -
> -int (*dlb_iface_dir_port_create)(struct dlb_hw_dev *handle,
> -				 struct dlb_create_dir_port_args *cfg,
> -				 enum dlb_cq_poll_modes poll_mode);
> -
> -int (*dlb_iface_map_qid)(struct dlb_hw_dev *handle,
> -			 struct dlb_map_qid_args *cfg);
> -
> -int (*dlb_iface_unmap_qid)(struct dlb_hw_dev *handle,
> -			   struct dlb_unmap_qid_args *cfg);
> -
> -int (*dlb_iface_sched_domain_start)(struct dlb_hw_dev *handle,
> -				    struct dlb_start_domain_args *cfg);
> -
> -int (*dlb_iface_pending_port_unmaps)(struct dlb_hw_dev *handle,
> -				     struct dlb_pending_port_unmaps_args *args);
> -
> -int (*dlb_iface_get_cq_poll_mode)(struct dlb_hw_dev *handle,
> -				  enum dlb_cq_poll_modes *mode);
> -
> -int (*dlb_iface_get_sn_allocation)(struct dlb_hw_dev *handle,
> -				   struct dlb_get_sn_allocation_args *args);
> -
> -int (*dlb_iface_set_sn_allocation)(struct dlb_hw_dev *handle,
> -				   struct dlb_set_sn_allocation_args *args);
> -
> -int (*dlb_iface_get_sn_occupancy)(struct dlb_hw_dev *handle,
> -				  struct dlb_get_sn_occupancy_args *args);
> -
> -int (*dlb_iface_get_ldb_queue_depth)(struct dlb_hw_dev *handle,
> -				     struct dlb_get_ldb_queue_depth_args *args);
> -
> -int (*dlb_iface_get_dir_queue_depth)(struct dlb_hw_dev *handle,
> -				     struct dlb_get_dir_queue_depth_args *args);
> -
> diff --git a/drivers/event/dlb/dlb_iface.h b/drivers/event/dlb/dlb_iface.h
> deleted file mode 100644
> index 9f61135ce..000000000
> --- a/drivers/event/dlb/dlb_iface.h
> +++ /dev/null
> @@ -1,82 +0,0 @@
> -/* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2016-2020 Intel Corporation
> - */
> -
> -#ifndef _DLB_IFACE_H
> -#define _DLB_IFACE_H
> -
> -/* DLB PMD Internal interface function pointers.
> - * If VDEV (bifurcated PMD), these will resolve to functions that issue
> ioctls
> - * serviced by DLB kernel module.
> - * If PCI (PF PMD), these will be implemented locally in user mode.
> - */
> -
> -extern void (*dlb_iface_low_level_io_init)(struct dlb_eventdev *dlb);
> -
> -extern int (*dlb_iface_open)(struct dlb_hw_dev *handle, const char *name);
> -
> -extern void (*dlb_iface_domain_close)(struct dlb_eventdev *dlb);
> -
> -extern int (*dlb_iface_get_device_version)(struct dlb_hw_dev *handle,
> -					   uint8_t *revision);
> -
> -extern int (*dlb_iface_get_num_resources)(struct dlb_hw_dev *handle,
> -				   struct dlb_get_num_resources_args *rsrcs);
> -
> -extern int (*dlb_iface_sched_domain_create)(struct dlb_hw_dev *handle,
> -				     struct dlb_create_sched_domain_args *args);
> -
> -extern int (*dlb_iface_ldb_credit_pool_create)(struct dlb_hw_dev *handle,
> -					struct dlb_create_ldb_pool_args *cfg);
> -
> -extern int (*dlb_iface_dir_credit_pool_create)(struct dlb_hw_dev *handle,
> -					struct dlb_create_dir_pool_args *cfg);
> -
> -extern int (*dlb_iface_ldb_queue_create)(struct dlb_hw_dev *handle,
> -				  struct dlb_create_ldb_queue_args *cfg);
> -
> -extern int (*dlb_iface_dir_queue_create)(struct dlb_hw_dev *handle,
> -				  struct dlb_create_dir_queue_args *cfg);
> -
> -extern int (*dlb_iface_ldb_port_create)(struct dlb_hw_dev *handle,
> -					struct dlb_create_ldb_port_args *cfg,
> -					enum dlb_cq_poll_modes poll_mode);
> -
> -extern int (*dlb_iface_dir_port_create)(struct dlb_hw_dev *handle,
> -					struct dlb_create_dir_port_args *cfg,
> -					enum dlb_cq_poll_modes poll_mode);
> -
> -extern int (*dlb_iface_ldb_queue_create)(struct dlb_hw_dev *handle,
> -				  struct dlb_create_ldb_queue_args *cfg);
> -
> -extern int (*dlb_iface_map_qid)(struct dlb_hw_dev *handle,
> -			 struct dlb_map_qid_args *cfg);
> -
> -extern int (*dlb_iface_unmap_qid)(struct dlb_hw_dev *handle,
> -				  struct dlb_unmap_qid_args *cfg);
> -
> -extern int (*dlb_iface_sched_domain_start)(struct dlb_hw_dev *handle,
> -				    struct dlb_start_domain_args *cfg);
> -
> -extern int (*dlb_iface_pending_port_unmaps)(struct dlb_hw_dev *handle,
> -				struct dlb_pending_port_unmaps_args *args);
> -
> -extern int (*dlb_iface_get_cq_poll_mode)(struct dlb_hw_dev *handle,
> -					 enum dlb_cq_poll_modes *mode);
> -
> -extern int (*dlb_iface_get_sn_allocation)(struct dlb_hw_dev *handle,
> -				  struct dlb_get_sn_allocation_args *args);
> -
> -extern int (*dlb_iface_set_sn_allocation)(struct dlb_hw_dev *handle,
> -				  struct dlb_set_sn_allocation_args *args);
> -
> -extern int (*dlb_iface_get_sn_occupancy)(struct dlb_hw_dev *handle,
> -				  struct dlb_get_sn_occupancy_args *args);
> -
> -extern int (*dlb_iface_get_ldb_queue_depth)(struct dlb_hw_dev *handle,
> -				    struct dlb_get_ldb_queue_depth_args *args);
> -
> -extern int (*dlb_iface_get_dir_queue_depth)(struct dlb_hw_dev *handle,
> -				    struct dlb_get_dir_queue_depth_args *args);
> -
> -#endif /* _DLB_IFACE_H */
> diff --git a/drivers/event/dlb/dlb_inline_fns.h
> b/drivers/event/dlb/dlb_inline_fns.h
> deleted file mode 100644
> index aae94dc3c..000000000
> --- a/drivers/event/dlb/dlb_inline_fns.h
> +++ /dev/null
> @@ -1,36 +0,0 @@
> -/* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2016-2020 Intel Corporation
> - */
> -
> -#ifndef _DLB_INLINE_FNS_H_
> -#define _DLB_INLINE_FNS_H_
> -
> -#include "rte_memcpy.h"
> -#include "rte_io.h"
> -
> -/* Inline functions required in more than one source file. */
> -
> -static inline struct dlb_eventdev *
> -dlb_pmd_priv(const struct rte_eventdev *eventdev)
> -{
> -	return eventdev->data->dev_private;
> -}
> -
> -static inline void
> -dlb_movntdq_single(void *dest, void *src)
> -{
> -	long long *_src  = (long long *)src;
> -	__m128i src_data0 = (__m128i){_src[0], _src[1]};
> -
> -	_mm_stream_si128(dest, src_data0);
> -}
> -
> -static inline void
> -dlb_movdir64b(void *dest, void *src)
> -{
> -	asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
> -		:
> -		: "a" (dest), "d" (src));
> -}
> -
> -#endif /* _DLB_INLINE_FNS_H_ */
> diff --git a/drivers/event/dlb/dlb_log.h b/drivers/event/dlb/dlb_log.h
> deleted file mode 100644
> index c69c9e5be..000000000
> --- a/drivers/event/dlb/dlb_log.h
> +++ /dev/null
> @@ -1,25 +0,0 @@
> -/* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2016-2020 Intel Corporation
> - */
> -
> -#ifndef _DLB_EVDEV_LOG_H_
> -#define _DLB_EVDEV_LOG_H_
> -
> -extern int eventdev_dlb_log_level;
> -
> -/* Dynamic logging */
> -#define DLB_LOG_IMPL(level, fmt, args...) \
> -	rte_log(RTE_LOG_ ## level, eventdev_dlb_log_level, "%s" fmt "\n", \
> -		__func__, ##args)
> -
> -#define DLB_LOG_INFO(fmt, args...) \
> -	DLB_LOG_IMPL(INFO, fmt, ## args)
> -
> -#define DLB_LOG_ERR(fmt, args...) \
> -	DLB_LOG_IMPL(ERR, fmt, ## args)
> -
> -/* remove debug logs at compile time unless actually debugging */
> -#define DLB_LOG_DBG(fmt, args...) \
> -	RTE_LOG_DP(DEBUG, PMD, fmt, ## args)
> -
> -#endif /* _DLB_EVDEV_LOG_H_ */
> diff --git a/drivers/event/dlb/dlb_priv.h b/drivers/event/dlb/dlb_priv.h
> deleted file mode 100644
> index 272e17482..000000000
> --- a/drivers/event/dlb/dlb_priv.h
> +++ /dev/null
> @@ -1,513 +0,0 @@
> -/* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2016-2020 Intel Corporation
> - */
> -
> -#ifndef _DLB_PRIV_H_
> -#define _DLB_PRIV_H_
> -
> -#include <emmintrin.h>
> -#include <stdbool.h>
> -
> -#include <rte_bus_pci.h>
> -#include <rte_eventdev.h>
> -#include <eventdev_pmd.h>
> -#include <eventdev_pmd_pci.h>
> -#include <rte_pci.h>
> -
> -#include "dlb_user.h"
> -#include "dlb_log.h"
> -#include "rte_pmd_dlb.h"
> -
> -#ifndef RTE_LIBRTE_PMD_DLB_QUELL_STATS
> -#define DLB_INC_STAT(_stat, _incr_val) ((_stat) += _incr_val)
> -#else
> -#define DLB_INC_STAT(_stat, _incr_val)
> -#endif
> -
> -#define EVDEV_DLB_NAME_PMD_STR "dlb_event"
> -
> -/* command line arg strings */
> -#define NUMA_NODE_ARG "numa_node"
> -#define DLB_MAX_NUM_EVENTS "max_num_events"
> -#define DLB_NUM_DIR_CREDITS "num_dir_credits"
> -#define DEV_ID_ARG "dev_id"
> -#define DLB_DEFER_SCHED_ARG "defer_sched"
> -#define DLB_NUM_ATM_INFLIGHTS_ARG "atm_inflights"
> -
> -/* Begin HW related defines and structs */
> -
> -#define DLB_MAX_NUM_DOMAINS 32
> -#define DLB_MAX_NUM_VFS 16
> -#define DLB_MAX_NUM_LDB_QUEUES 128
> -#define DLB_MAX_NUM_LDB_PORTS 64
> -#define DLB_MAX_NUM_DIR_PORTS 128
> -#define DLB_MAX_NUM_DIR_QUEUES 128
> -#define DLB_MAX_NUM_FLOWS (64 * 1024)
> -#define DLB_MAX_NUM_LDB_CREDITS 16384
> -#define DLB_MAX_NUM_DIR_CREDITS 4096
> -#define DLB_MAX_NUM_LDB_CREDIT_POOLS 64
> -#define DLB_MAX_NUM_DIR_CREDIT_POOLS 64
> -#define DLB_MAX_NUM_HIST_LIST_ENTRIES 5120
> -#define DLB_MAX_NUM_ATM_INFLIGHTS 2048
> -#define DLB_MAX_NUM_QIDS_PER_LDB_CQ 8
> -#define DLB_QID_PRIORITIES 8
> -#define DLB_MAX_DEVICE_PATH 32
> -#define DLB_MIN_DEQUEUE_TIMEOUT_NS 1
> -#define DLB_NUM_SN_GROUPS 4
> -#define DLB_MAX_LDB_SN_ALLOC 1024
> -/* Note: "- 1" here to support the timeout range check in eventdev_autotest
> */
> -#define DLB_MAX_DEQUEUE_TIMEOUT_NS (UINT32_MAX - 1)
> -#define DLB_DEF_UNORDERED_QID_INFLIGHTS 2048
> -
> -/* 5120 total hist list entries and 64 total ldb ports, which
> - * makes for 5120/64 == 80 hist list entries per port. However, CQ
> - * depth must be a power of 2 and must also be >= HIST LIST entries.
> - * As a result we just limit the maximum dequeue depth to 64.
> - */
> -#define DLB_MIN_LDB_CQ_DEPTH 1
> -#define DLB_MIN_DIR_CQ_DEPTH 8
> -#define DLB_MIN_HARDWARE_CQ_DEPTH 8
> -#define DLB_MAX_CQ_DEPTH 64
> -#define DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT \
> -	DLB_MAX_CQ_DEPTH
> -
> -/* Static per queue/port provisioning values */
> -#define DLB_NUM_ATOMIC_INFLIGHTS_PER_QUEUE 16
> -
> -#define PP_BASE(is_dir) ((is_dir) ? DLB_DIR_PP_BASE : DLB_LDB_PP_BASE)
> -
> -#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
> -
> -#define DLB_NUM_QES_PER_CACHE_LINE 4
> -
> -#define DLB_MAX_ENQUEUE_DEPTH 64
> -#define DLB_MIN_ENQUEUE_DEPTH 4
> -
> -#define DLB_NAME_SIZE 64
> -
> -/* Use the upper 3 bits of the event priority to select the DLB priority
> */
> -#define EV_TO_DLB_PRIO(x) ((x) >> 5)
> -#define DLB_TO_EV_PRIO(x) ((x) << 5)
> -
> -enum dlb_hw_port_type {
> -	DLB_LDB,
> -	DLB_DIR,
> -
> -	/* NUM_DLB_PORT_TYPES must be last */
> -	NUM_DLB_PORT_TYPES
> -};
> -
> -#define PORT_TYPE(p) ((p)->is_directed ? DLB_DIR : DLB_LDB)
> -
> -/* Do not change - must match hardware! */
> -enum dlb_hw_sched_type {
> -	DLB_SCHED_ATOMIC = 0,
> -	DLB_SCHED_UNORDERED,
> -	DLB_SCHED_ORDERED,
> -	DLB_SCHED_DIRECTED,
> -
> -	/* DLB_NUM_HW_SCHED_TYPES must be last */
> -	DLB_NUM_HW_SCHED_TYPES
> -};
> -
> -struct dlb_devargs {
> -	int socket_id;
> -	int max_num_events;
> -	int num_dir_credits_override;
> -	int dev_id;
> -	int defer_sched;
> -	int num_atm_inflights;
> -};
> -
> -struct dlb_hw_rsrcs {
> -	int32_t nb_events_limit;
> -	uint32_t num_queues;		/* Total queues (ldb + dir) */
> -	uint32_t num_ldb_queues;	/* Number of available ldb queues */
> -	uint32_t num_ldb_ports;         /* Number of load balanced ports */
> -	uint32_t num_dir_ports;         /* Number of directed ports */
> -	uint32_t num_ldb_credits;       /* Number of load balanced credits */
> -	uint32_t num_dir_credits;       /* Number of directed credits */
> -	uint32_t reorder_window_size;   /* Size of reorder window */
> -};
> -
> -struct dlb_hw_resource_info {
> -	/**> Max resources that can be provided */
> -	struct dlb_hw_rsrcs hw_rsrc_max;
> -	int num_sched_domains;
> -	uint32_t socket_id;
> -	/**> EAL flags passed to this DLB instance, allowing the application to
> -	 * identify the pmd backend indicating hardware or software.
> -	 */
> -	const char *eal_flags;
> -};
> -
> -/* hw-specific format - do not change */
> -
> -struct dlb_event_type {
> -	uint8_t major:4;
> -	uint8_t unused:4;
> -	uint8_t sub;
> -};
> -
> -union dlb_opaque_data {
> -	uint16_t opaque_data;
> -	struct dlb_event_type event_type;
> -};
> -
> -struct dlb_msg_info {
> -	uint8_t qid;
> -	uint8_t sched_type:2;
> -	uint8_t priority:3;
> -	uint8_t msg_type:3;
> -};
> -
> -#define DLB_NEW_CMD_BYTE 0x08
> -#define DLB_FWD_CMD_BYTE 0x0A
> -#define DLB_COMP_CMD_BYTE 0x02
> -#define DLB_NOOP_CMD_BYTE 0x00
> -#define DLB_POP_CMD_BYTE 0x01
> -
> -/* hw-specific format - do not change */
> -struct dlb_enqueue_qe {
> -	uint64_t data;
> -	/* Word 3 */
> -	union dlb_opaque_data u;
> -	uint8_t qid;
> -	uint8_t sched_type:2;
> -	uint8_t priority:3;
> -	uint8_t msg_type:3;
> -	/* Word 4 */
> -	uint16_t lock_id;
> -	uint8_t meas_lat:1;
> -	uint8_t rsvd1:2;
> -	uint8_t no_dec:1;
> -	uint8_t cmp_id:4;
> -	union {
> -		uint8_t cmd_byte;
> -		struct {
> -			uint8_t cq_token:1;
> -			uint8_t qe_comp:1;
> -			uint8_t qe_frag:1;
> -			uint8_t qe_valid:1;
> -			uint8_t int_arm:1;
> -			uint8_t error:1;
> -			uint8_t rsvd:2;
> -		};
> -	};
> -};
> -
> -/* hw-specific format - do not change */
> -struct dlb_cq_pop_qe {
> -	uint64_t data;
> -	union dlb_opaque_data u;
> -	uint8_t qid;
> -	uint8_t sched_type:2;
> -	uint8_t priority:3;
> -	uint8_t msg_type:3;
> -	uint16_t tokens:10;
> -	uint16_t rsvd2:6;
> -	uint8_t meas_lat:1;
> -	uint8_t rsvd1:2;
> -	uint8_t no_dec:1;
> -	uint8_t cmp_id:4;
> -	union {
> -		uint8_t cmd_byte;
> -		struct {
> -			uint8_t cq_token:1;
> -			uint8_t qe_comp:1;
> -			uint8_t qe_frag:1;
> -			uint8_t qe_valid:1;
> -			uint8_t int_arm:1;
> -			uint8_t error:1;
> -			uint8_t rsvd:2;
> -		};
> -	};
> -};
> -
> -/* hw-specific format - do not change */
> -struct dlb_dequeue_qe {
> -	uint64_t data;
> -	union dlb_opaque_data u;
> -	uint8_t qid;
> -	uint8_t sched_type:2;
> -	uint8_t priority:3;
> -	uint8_t msg_type:3;
> -	uint16_t pp_id:10;
> -	uint16_t rsvd0:6;
> -	uint8_t debug;
> -	uint8_t cq_gen:1;
> -	uint8_t qid_depth:1;
> -	uint8_t rsvd1:3;
> -	uint8_t error:1;
> -	uint8_t rsvd2:2;
> -};
> -
> -enum dlb_port_state {
> -	PORT_CLOSED,
> -	PORT_STARTED,
> -	PORT_STOPPED
> -};
> -
> -enum dlb_configuration_state {
> -	/* The resource has not been configured */
> -	DLB_NOT_CONFIGURED,
> -	/* The resource was configured, but the device was stopped */
> -	DLB_PREV_CONFIGURED,
> -	/* The resource is currently configured */
> -	DLB_CONFIGURED
> -};
> -
> -struct dlb_port {
> -	uint32_t id;
> -	bool is_directed;
> -	bool gen_bit;
> -	uint16_t dir_credits;
> -	uint32_t dequeue_depth;
> -	enum dlb_token_pop_mode token_pop_mode;
> -	int pp_mmio_base;
> -	uint16_t cached_ldb_credits;
> -	uint16_t ldb_pushcount_at_credit_expiry;
> -	uint16_t ldb_credits;
> -	uint16_t cached_dir_credits;
> -	uint16_t dir_pushcount_at_credit_expiry;
> -	bool int_armed;
> -	bool use_rsvd_token_scheme;
> -	uint8_t cq_rsvd_token_deficit;
> -	uint16_t owed_tokens;
> -	int16_t issued_releases;
> -	int16_t token_pop_thresh;
> -	int cq_depth;
> -	uint16_t cq_idx;
> -	uint16_t cq_idx_unmasked;
> -	uint16_t cq_depth_mask;
> -	uint16_t gen_bit_shift;
> -	enum dlb_port_state state;
> -	enum dlb_configuration_state config_state;
> -	int num_mapped_qids;
> -	uint8_t *qid_mappings;
> -	struct dlb_enqueue_qe *qe4; /* Cache line's worth of QEs (4) */
> -	struct dlb_cq_pop_qe *consume_qe;
> -	struct dlb_eventdev *dlb; /* back ptr */
> -	struct dlb_eventdev_port *ev_port; /* back ptr */
> -};
> -
> -/* Per-process per-port mmio and memory pointers */
> -struct process_local_port_data {
> -	uint64_t *pp_addr;
> -	uint16_t *ldb_popcount;
> -	uint16_t *dir_popcount;
> -	struct dlb_dequeue_qe *cq_base;
> -	const struct rte_memzone *mz;
> -	bool mmaped;
> -};
> -
> -struct dlb_config {
> -	int configured;
> -	int reserved;
> -	uint32_t ldb_credit_pool_id;
> -	uint32_t dir_credit_pool_id;
> -	uint32_t num_ldb_credits;
> -	uint32_t num_dir_credits;
> -	struct dlb_create_sched_domain_args resources;
> -};
> -
> -struct dlb_hw_dev {
> -	struct dlb_config cfg;
> -	struct dlb_hw_resource_info info;
> -	void *pf_dev; /* opaque pointer to PF PMD dev (struct dlb_dev) */
> -	int device_id;
> -	uint32_t domain_id;
> -	int domain_id_valid;
> -	rte_spinlock_t resource_lock; /* for MP support */
> -} __rte_cache_aligned;
> -
> -/* End HW related defines and structs */
> -
> -/* Begin DLB PMD Eventdev related defines and structs */
> -
> -#define DLB_MAX_NUM_QUEUES \
> -	(DLB_MAX_NUM_DIR_QUEUES + DLB_MAX_NUM_LDB_QUEUES)
> -
> -#define DLB_MAX_NUM_PORTS (DLB_MAX_NUM_DIR_PORTS + DLB_MAX_NUM_LDB_PORTS)
> -#define DLB_MAX_INPUT_QUEUE_DEPTH 256
> -
> -/** Structure to hold the queue to port link establishment attributes */
> -
> -struct dlb_event_queue_link {
> -	uint8_t queue_id;
> -	uint8_t priority;
> -	bool mapped;
> -	bool valid;
> -};
> -
> -struct dlb_traffic_stats {
> -	uint64_t rx_ok;
> -	uint64_t rx_drop;
> -	uint64_t rx_interrupt_wait;
> -	uint64_t rx_umonitor_umwait;
> -	uint64_t tx_ok;
> -	uint64_t total_polls;
> -	uint64_t zero_polls;
> -	uint64_t tx_nospc_ldb_hw_credits;
> -	uint64_t tx_nospc_dir_hw_credits;
> -	uint64_t tx_nospc_inflight_max;
> -	uint64_t tx_nospc_new_event_limit;
> -	uint64_t tx_nospc_inflight_credits;
> -};
> -
> -struct dlb_port_stats {
> -	struct dlb_traffic_stats traffic;
> -	uint64_t tx_op_cnt[4]; /* indexed by rte_event.op */
> -	uint64_t tx_implicit_rel;
> -	uint64_t tx_sched_cnt[DLB_NUM_HW_SCHED_TYPES];
> -	uint64_t tx_invalid;
> -	uint64_t rx_sched_cnt[DLB_NUM_HW_SCHED_TYPES];
> -	uint64_t rx_sched_invalid;
> -	uint64_t enq_ok[DLB_MAX_NUM_QUEUES]; /* per-queue enq_ok */
> -};
> -
> -struct dlb_eventdev_port {
> -	struct dlb_port qm_port; /* hw specific data structure */
> -	struct rte_event_port_conf conf; /* user-supplied configuration */
> -	uint16_t inflight_credits; /* num credits this port has right now */
> -	uint16_t credit_update_quanta;
> -	struct dlb_eventdev *dlb; /* backlink optimization */
> -	struct dlb_port_stats stats __rte_cache_aligned;
> -	struct dlb_event_queue_link link[DLB_MAX_NUM_QIDS_PER_LDB_CQ];
> -	int num_links;
> -	uint32_t id;
> -	/* num releases yet to be completed on this port.
> -	 * Only applies to load-balanced ports.
> -	 */
> -	uint16_t outstanding_releases;
> -	uint16_t inflight_max; /* app requested max inflights for this port */
> -	/* setup_done is set when the event port is setup */
> -	bool setup_done;
> -	/* enq_configured is set when the qm port is created */
> -	bool enq_configured;
> -	uint8_t implicit_release; /* release events before dequeueing */
> -} __rte_cache_aligned;
> -
> -struct dlb_queue {
> -	uint32_t num_qid_inflights; /* User config */
> -	uint32_t num_atm_inflights; /* User config */
> -	enum dlb_configuration_state config_state;
> -	int sched_type; /* LB queue only */
> -	uint32_t id;
> -	bool is_directed;
> -};
> -
> -struct dlb_eventdev_queue {
> -	struct dlb_queue qm_queue;
> -	struct rte_event_queue_conf conf; /* User config */
> -	uint64_t enq_ok;
> -	uint32_t id;
> -	bool setup_done;
> -	uint8_t num_links;
> -};
> -
> -enum dlb_run_state {
> -	DLB_RUN_STATE_STOPPED = 0,
> -	DLB_RUN_STATE_STOPPING,
> -	DLB_RUN_STATE_STARTING,
> -	DLB_RUN_STATE_STARTED
> -};
> -
> -struct dlb_eventdev {
> -	struct dlb_eventdev_port ev_ports[DLB_MAX_NUM_PORTS];
> -	struct dlb_eventdev_queue ev_queues[DLB_MAX_NUM_QUEUES];
> -	uint8_t qm_ldb_to_ev_queue_id[DLB_MAX_NUM_QUEUES];
> -	uint8_t qm_dir_to_ev_queue_id[DLB_MAX_NUM_QUEUES];
> -
> -	/* store num stats and offset of the stats for each queue */
> -	uint16_t xstats_count_per_qid[DLB_MAX_NUM_QUEUES];
> -	uint16_t xstats_offset_for_qid[DLB_MAX_NUM_QUEUES];
> -
> -	/* store num stats and offset of the stats for each port */
> -	uint16_t xstats_count_per_port[DLB_MAX_NUM_PORTS];
> -	uint16_t xstats_offset_for_port[DLB_MAX_NUM_PORTS];
> -	struct dlb_get_num_resources_args hw_rsrc_query_results;
> -	uint32_t xstats_count_mode_queue;
> -	struct dlb_hw_dev qm_instance; /* strictly hw related */
> -	uint64_t global_dequeue_wait_ticks;
> -	struct dlb_xstats_entry *xstats;
> -	struct rte_eventdev *event_dev; /* backlink to dev */
> -	uint32_t xstats_count_mode_port;
> -	uint32_t xstats_count_mode_dev;
> -	uint32_t xstats_count;
> -	uint32_t inflights; /* use __atomic builtins to access */
> -	uint32_t new_event_limit;
> -	int max_num_events_override;
> -	int num_dir_credits_override;
> -	volatile enum dlb_run_state run_state;
> -	uint16_t num_dir_queues; /* total num of evdev dir queues requested */
> -	uint16_t num_dir_credits;
> -	uint16_t num_ldb_credits;
> -	uint16_t num_queues; /* total queues */
> -	uint16_t num_ldb_queues; /* total num of evdev ldb queues requested */
> -	uint16_t num_ports; /* total num of evdev ports requested */
> -	uint16_t num_ldb_ports; /* total num of ldb ports requested */
> -	uint16_t num_dir_ports; /* total num of dir ports requested */
> -	bool is_vdev;
> -	bool umwait_allowed;
> -	bool global_dequeue_wait; /* Not using per dequeue wait if true */
> -	bool defer_sched;
> -	unsigned int num_atm_inflights_per_queue;
> -	enum dlb_cq_poll_modes poll_mode;
> -	uint8_t revision;
> -	bool configured;
> -};
> -
> -/* End Eventdev related defines and structs */
> -
> -/* externs */
> -
> -extern struct process_local_port_data dlb_port[][NUM_DLB_PORT_TYPES];
> -
> -/* Forwards for non-inlined functions */
> -
> -void dlb_eventdev_dump(struct rte_eventdev *dev, FILE *f);
> -
> -int dlb_xstats_init(struct dlb_eventdev *dlb);
> -
> -void dlb_xstats_uninit(struct dlb_eventdev *dlb);
> -
> -int dlb_eventdev_xstats_get(const struct rte_eventdev *dev,
> -			    enum rte_event_dev_xstats_mode mode,
> -			    uint8_t queue_port_id, const unsigned int ids[],
> -			    uint64_t values[], unsigned int n);
> -
> -int dlb_eventdev_xstats_get_names(const struct rte_eventdev *dev,
> -				  enum rte_event_dev_xstats_mode mode,
> -				  uint8_t queue_port_id,
> -				  struct rte_event_dev_xstats_name *xstat_names,
> -				  unsigned int *ids, unsigned int size);
> -
> -uint64_t dlb_eventdev_xstats_get_by_name(const struct rte_eventdev *dev,
> -					 const char *name, unsigned int *id);
> -
> -int dlb_eventdev_xstats_reset(struct rte_eventdev *dev,
> -			      enum rte_event_dev_xstats_mode mode,
> -			      int16_t queue_port_id,
> -			      const uint32_t ids[],
> -			      uint32_t nb_ids);
> -
> -int test_dlb_eventdev(void);
> -
> -int dlb_primary_eventdev_probe(struct rte_eventdev *dev,
> -			       const char *name,
> -			       struct dlb_devargs *dlb_args);
> -
> -int dlb_secondary_eventdev_probe(struct rte_eventdev *dev,
> -				 const char *name);
> -
> -uint32_t dlb_get_queue_depth(struct dlb_eventdev *dlb,
> -			     struct dlb_eventdev_queue *queue);
> -
> -int dlb_parse_params(const char *params,
> -		     const char *name,
> -		     struct dlb_devargs *dlb_args);
> -
> -void dlb_entry_points_init(struct rte_eventdev *dev);
> -
> -#endif	/* _DLB_PRIV_H_ */
> diff --git a/drivers/event/dlb/dlb_selftest.c
> b/drivers/event/dlb/dlb_selftest.c
> deleted file mode 100644
> index 8ab00ba87..000000000
> --- a/drivers/event/dlb/dlb_selftest.c
> +++ /dev/null
> @@ -1,1544 +0,0 @@
> -/* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2016-2020 Intel Corporation
> - */
> -
> -#include <stdio.h>
> -#include <string.h>
> -#include <stdint.h>
> -#include <errno.h>
> -#include <unistd.h>
> -#include <sys/queue.h>
> -
> -#include <rte_memory.h>
> -#include <rte_memzone.h>
> -#include <rte_launch.h>
> -#include <rte_eal.h>
> -#include <rte_lcore.h>
> -#include <rte_debug.h>
> -#include <rte_cycles.h>
> -#include <rte_eventdev.h>
> -#include <rte_mempool.h>
> -#include <rte_mbuf.h>
> -
> -#include "dlb_priv.h"
> -#include "rte_pmd_dlb.h"
> -
> -#define MAX_PORTS 32
> -#define MAX_QIDS 32
> -#define DEFAULT_NUM_SEQ_NUMS 32
> -
> -static struct rte_mempool *eventdev_func_mempool;
> -static int evdev;
> -
> -struct test {
> -	struct rte_mempool *mbuf_pool;
> -	int nb_qids;
> -};
> -
> -/* initialization and config */
> -static inline int
> -init(struct test *t, int nb_queues, int nb_ports)
> -{
> -	struct rte_event_dev_config config = {0};
> -	struct rte_event_dev_info info;
> -	int ret;
> -
> -	memset(t, 0, sizeof(*t));
> -
> -	t->mbuf_pool = eventdev_func_mempool;
> -
> -	if (rte_event_dev_info_get(evdev, &info)) {
> -		printf("%d: Error querying device info\n", __LINE__);
> -		return -1;
> -	}
> -
> -	config.nb_event_queues = nb_queues;
> -	config.nb_event_ports = nb_ports;
> -	config.nb_event_queue_flows = info.max_event_queue_flows;
> -	config.nb_events_limit = info.max_num_events;
> -	config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
> -	config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
> -	config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
> -	config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
> -
> -	ret = rte_event_dev_configure(evdev, &config);
> -	if (ret < 0)
> -		printf("%d: Error configuring device\n", __LINE__);
> -
> -	return ret;
> -}
> -
> -static inline int
> -create_ports(int num_ports)
> -{
> -	int i;
> -
> -	if (num_ports > MAX_PORTS)
> -		return -1;
> -
> -	for (i = 0; i < num_ports; i++) {
> -		struct rte_event_port_conf conf;
> -
> -		if (rte_event_port_default_conf_get(evdev, i, &conf)) {
> -			printf("%d: Error querying default port conf\n",
> -			       __LINE__);
> -			return -1;
> -		}
> -
> -		if (rte_event_port_setup(evdev, i, &conf) < 0) {
> -			printf("%d: Error setting up port %d\n", __LINE__, i);
> -			return -1;
> -		}
> -	}
> -
> -	return 0;
> -}
> -
> -static inline int
> -create_lb_qids(struct test *t, int num_qids, uint32_t flags)
> -{
> -	int i;
> -
> -	for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
> -		struct rte_event_queue_conf conf;
> -
> -		if (rte_event_queue_default_conf_get(evdev, i, &conf)) {
> -			printf("%d: Error querying default queue conf\n",
> -			       __LINE__);
> -			return -1;
> -		}
> -
> -		conf.schedule_type = flags;
> -
> -		if (conf.schedule_type == RTE_SCHED_TYPE_PARALLEL)
> -			conf.nb_atomic_order_sequences = 0;
> -		else
> -			conf.nb_atomic_order_sequences = DEFAULT_NUM_SEQ_NUMS;
> -
> -		if (rte_event_queue_setup(evdev, i, &conf) < 0) {
> -			printf("%d: error creating qid %d\n", __LINE__, i);
> -			return -1;
> -		}
> -	}
> -
> -	t->nb_qids += num_qids;
> -	if (t->nb_qids > MAX_QIDS)
> -		return -1;
> -
> -	return 0;
> -}
> -
> -static inline int
> -create_atomic_qids(struct test *t, int num_qids)
> -{
> -	return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
> -}
> -
> -/* destruction */
> -static inline int
> -cleanup(void)
> -{
> -	rte_event_dev_stop(evdev);
> -	return rte_event_dev_close(evdev);
> -};
> -
> -static inline int
> -enqueue_timeout(uint8_t port_id, struct rte_event *ev, uint64_t tmo_us)
> -{
> -	const uint64_t start = rte_get_timer_cycles();
> -	const uint64_t ticks = (tmo_us * rte_get_timer_hz()) / 1E6;
> -
> -	while ((rte_get_timer_cycles() - start) < ticks) {
> -		if (rte_event_enqueue_burst(evdev, port_id, ev, 1) == 1)
> -			return 0;
> -
> -		if (rte_errno != -ENOSPC)
> -			return -1;
> -	}
> -
> -	return -1;
> -}
> -
> -static void
> -flush(uint8_t id __rte_unused, struct rte_event event, void *arg
> __rte_unused)
> -{
> -	rte_pktmbuf_free(event.mbuf);
> -}
> -
> -static int
> -test_stop_flush(struct test *t) /* test to check we can properly flush
> events */
> -{
> -	struct rte_event ev;
> -	uint32_t dequeue_depth;
> -	unsigned int i, count;
> -	uint8_t queue_id;
> -
> -	ev.op = RTE_EVENT_OP_NEW;
> -
> -	if (init(t, 2, 1) < 0 ||
> -	    create_ports(1) < 0 ||
> -	    create_atomic_qids(t, 2) < 0) {
> -		printf("%d: Error initializing device\n", __LINE__);
> -		return -1;
> -	}
> -
> -	if (rte_event_port_link(evdev, 0, NULL, NULL, 0) != 2) {
> -		printf("%d: Error linking queues to the port\n", __LINE__);
> -		goto err;
> -	}
> -
> -	if (rte_event_dev_start(evdev) < 0) {
> -		printf("%d: Error with start call\n", __LINE__);
> -		goto err;
> -	}
> -
> -	/* Unlink queue 1 so the PMD's stop callback has to cleanup an unlinked
> -	 * queue.
> -	 */
> -	queue_id = 1;
> -
> -	if (rte_event_port_unlink(evdev, 0, &queue_id, 1) != 1) {
> -		printf("%d: Error unlinking queue 1 from port\n", __LINE__);
> -		goto err;
> -	}
> -
> -	if (t->mbuf_pool)
> -		count = rte_mempool_avail_count(t->mbuf_pool);
> -	else {
> -		printf("%d: mbuf_pool is NULL\n", __LINE__);
> -		goto err;
> -	}
> -
> -	if (rte_event_port_attr_get(evdev,
> -				    0,
> -				    RTE_EVENT_PORT_ATTR_DEQ_DEPTH,
> -				    &dequeue_depth)) {
> -		printf("%d: Error retrieveing dequeue depth\n", __LINE__);
> -		goto err;
> -	}
> -
> -	/* Send QEs to queue 0 */
> -	for (i = 0; i < dequeue_depth + 1; i++) {
> -		ev.mbuf = rte_pktmbuf_alloc(t->mbuf_pool);
> -		ev.queue_id = 0;
> -		ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
> -
> -		if (enqueue_timeout(0, &ev, 1000)) {
> -			printf("%d: Error enqueuing events\n", __LINE__);
> -			goto err;
> -		}
> -	}
> -
> -	/* Send QEs to queue 1 */
> -	for (i = 0; i < dequeue_depth + 1; i++) {
> -		ev.mbuf = rte_pktmbuf_alloc(t->mbuf_pool);
> -		ev.queue_id = 1;
> -		ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
> -
> -		if (enqueue_timeout(0, &ev, 1000)) {
> -			printf("%d: Error enqueuing events\n", __LINE__);
> -			goto err;
> -		}
> -	}
> -
> -	/* Now the DLB is scheduling events from the port to the IQ, and at
> -	 * least one event should be remaining in each queue.
> -	 */
> -
> -	if (rte_event_dev_stop_flush_callback_register(evdev, flush, NULL)) {
> -		printf("%d: Error installing the flush callback\n", __LINE__);
> -		goto err;
> -	}
> -
> -	cleanup();
> -
> -	if (count != rte_mempool_avail_count(t->mbuf_pool)) {
> -		printf("%d: Error executing the flush callback\n", __LINE__);
> -		goto err;
> -	}
> -
> -	if (rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL)) {
> -		printf("%d: Error uninstalling the flush callback\n", __LINE__);
> -		goto err;
> -	}
> -
> -	return 0;
> -err:
> -	cleanup();
> -	return -1;
> -}
> -
> -static int
> -test_single_link(void)
> -{
> -	struct rte_event_dev_config config = {0};
> -	struct rte_event_queue_conf queue_conf;
> -	struct rte_event_port_conf port_conf;
> -	struct rte_event_dev_info info;
> -	uint8_t queue_id;
> -	int ret;
> -
> -	if (rte_event_dev_info_get(evdev, &info)) {
> -		printf("%d: Error querying device info\n", __LINE__);
> -		return -1;
> -	}
> -
> -	config.nb_event_queues = 2;
> -	config.nb_event_ports = 2;
> -	config.nb_single_link_event_port_queues = 1;
> -	config.nb_event_queue_flows = info.max_event_queue_flows;
> -	config.nb_events_limit = info.max_num_events;
> -	config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
> -	config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
> -	config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
> -	config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
> -
> -	ret = rte_event_dev_configure(evdev, &config);
> -	if (ret < 0) {
> -		printf("%d: Error configuring device\n", __LINE__);
> -		return -1;
> -	}
> -
> -	/* Create a directed port */
> -	if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
> -		printf("%d: Error querying default port conf\n", __LINE__);
> -		goto err;
> -	}
> -
> -	port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_SINGLE_LINK;
> -
> -	if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
> -		printf("%d: port 0 setup expected to succeed\n", __LINE__);
> -		goto err;
> -	}
> -
> -	/* Attempt to create another directed port */
> -	if (rte_event_port_setup(evdev, 1, &port_conf) == 0) {
> -		printf("%d: port 1 setup expected to fail\n", __LINE__);
> -		goto err;
> -	}
> -
> -	port_conf.event_port_cfg = 0;
> -
> -	/* Create a load-balanced port */
> -	if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
> -		printf("%d: port 1 setup expected to succeed\n", __LINE__);
> -		goto err;
> -	}
> -
> -	/* Create a directed queue */
> -	if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
> -		printf("%d: Error querying default queue conf\n", __LINE__);
> -		goto err;
> -	}
> -
> -	queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
> -
> -	if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
> -		printf("%d: queue 0 setup expected to succeed\n", __LINE__);
> -		goto err;
> -	}
> -
> -	/* Attempt to create another directed queue */
> -	if (rte_event_queue_setup(evdev, 1, &queue_conf) == 0) {
> -		printf("%d: queue 1 setup expected to fail\n", __LINE__);
> -		goto err;
> -	}
> -
> -	/* Create a load-balanced queue */
> -	queue_conf.event_queue_cfg = 0;
> -
> -	if (rte_event_queue_setup(evdev, 1, &queue_conf) < 0) {
> -		printf("%d: queue 1 setup expected to succeed\n", __LINE__);
> -		goto err;
> -	}
> -
> -	/* Attempt to link directed and load-balanced resources */
> -	queue_id = 1;
> -	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) == 1) {
> -		printf("%d: port 0 link expected to fail\n", __LINE__);
> -		goto err;
> -	}
> -
> -	queue_id = 0;
> -	if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) == 1) {
> -		printf("%d: port 1 link expected to fail\n", __LINE__);
> -		goto err;
> -	}
> -
> -	/* Link ports to queues */
> -	queue_id = 0;
> -	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
> -		printf("%d: port 0 link expected to succeed\n", __LINE__);
> -		goto err;
> -	}
> -
> -	queue_id = 1;
> -	if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
> -		printf("%d: port 1 link expected to succeed\n", __LINE__);
> -		goto err;
> -	}
> -
> -	return rte_event_dev_close(evdev);
> -
> -err:
> -	rte_event_dev_close(evdev);
> -	return -1;
> -}
> -
> -#define NUM_LDB_PORTS 64
> -#define NUM_LDB_QUEUES 128
> -
> -static int
> -test_info_get(void)
> -{
> -	struct rte_event_dev_config config = {0};
> -	struct rte_event_dev_info info;
> -	int ret;
> -
> -	if (rte_event_dev_info_get(evdev, &info)) {
> -		printf("%d: Error querying device info\n", __LINE__);
> -		return -1;
> -	}
> -
> -	if (info.max_event_ports != NUM_LDB_PORTS) {
> -		printf("%d: Got %u ports, expected %u\n",
> -		       __LINE__, info.max_event_ports, NUM_LDB_PORTS);
> -		goto err;
> -	}
> -
> -	if (info.max_event_queues != NUM_LDB_QUEUES) {
> -		printf("%d: Got %u queues, expected %u\n",
> -		       __LINE__, info.max_event_queues, NUM_LDB_QUEUES);
> -		goto err;
> -	}
> -
> -	config.nb_event_ports = info.max_event_ports;
> -	config.nb_event_queues = NUM_LDB_QUEUES + info.max_event_ports / 2;
> -	config.nb_single_link_event_port_queues = info.max_event_ports / 2;
> -	config.nb_event_queue_flows = info.max_event_queue_flows;
> -	config.nb_events_limit = info.max_num_events;
> -	config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
> -	config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
> -	config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
> -	config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
> -
> -	ret = rte_event_dev_configure(evdev, &config);
> -	if (ret < 0) {
> -		printf("%d: Error configuring device\n", __LINE__);
> -		return -1;
> -	}
> -
> -	if (rte_event_dev_info_get(evdev, &info)) {
> -		printf("%d: Error querying device info\n", __LINE__);
> -		goto err;
> -	}
> -
> -	/* The DLB PMD only reports load-balanced ports and queues in its
> -	 * info_get function. Confirm that these values don't include the
> -	 * directed port or queue counts.
> -	 */
> -
> -	if (info.max_event_ports != NUM_LDB_PORTS) {
> -		printf("%d: Got %u ports, expected %u\n",
> -		       __LINE__, info.max_event_ports, NUM_LDB_PORTS);
> -		goto err;
> -	}
> -
> -	if (info.max_event_queues != NUM_LDB_QUEUES) {
> -		printf("%d: Got %u queues, expected %u\n",
> -		       __LINE__, info.max_event_queues, NUM_LDB_QUEUES);
> -		goto err;
> -	}
> -
> -	ret = rte_event_dev_close(evdev);
> -	if (ret) {
> -		printf("rte_event_dev_close err %d\n", ret);
> -		goto err;
> -	}
> -
> -	return 0;
> -
> -err:
> -	rte_event_dev_close(evdev);
> -	return -1;
> -}
> -
> -static int
> -test_reconfiguration_link(void)
> -{
> -	struct rte_event_dev_config config = {0};
> -	struct rte_event_queue_conf queue_conf;
> -	struct rte_event_port_conf port_conf;
> -	struct rte_event_dev_info info;
> -	uint8_t queue_id;
> -	int ret, i;
> -
> -	if (rte_event_dev_info_get(evdev, &info)) {
> -		printf("%d: Error querying device info\n", __LINE__);
> -		return -1;
> -	}
> -
> -	config.nb_event_queues = 2;
> -	config.nb_event_ports = 2;
> -	config.nb_single_link_event_port_queues = 0;
> -	config.nb_event_queue_flows = info.max_event_queue_flows;
> -	config.nb_events_limit = info.max_num_events;
> -	config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
> -	config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
> -	config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
> -	config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
> -
> -	/* Configure the device with 2 LDB ports and 2 LDB queues */
> -	ret = rte_event_dev_configure(evdev, &config);
> -	if (ret < 0) {
> -		printf("%d: Error configuring device\n", __LINE__);
> -		return -1;
> -	}
> -
> -	/* Configure the ports and queues */
> -	if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
> -		printf("%d: Error querying default port conf\n", __LINE__);
> -		goto err;
> -	}
> -
> -	for (i = 0; i < 2; i++) {
> -		if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
> -			printf("%d: port %d setup expected to succeed\n",
> -			       __LINE__, i);
> -			goto err;
> -		}
> -	}
> -
> -	if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
> -		printf("%d: Error querying default queue conf\n", __LINE__);
> -		goto err;
> -	}
> -
> -	for (i = 0; i < 2; i++) {
> -		if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
> -			printf("%d: queue %d setup expected to succeed\n",
> -			       __LINE__, i);
> -			goto err;
> -		}
> -	}
> -
> -	/* Link P0->Q0 and P1->Q1 */
> -	for (i = 0; i < 2; i++) {
> -		queue_id = i;
> -
> -		if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
> -			printf("%d: port %d link expected to succeed\n",
> -			       __LINE__, i);
> -			goto err;
> -		}
> -	}
> -
> -	/* Start the device */
> -	if (rte_event_dev_start(evdev) < 0) {
> -		printf("%d: device start failed\n", __LINE__);
> -		goto err;
> -	}
> -
> -	/* Stop the device */
> -	rte_event_dev_stop(evdev);
> -
> -	/* Reconfigure device */
> -	ret = rte_event_dev_configure(evdev, &config);
> -	if (ret < 0) {
> -		printf("%d: Error re-configuring device\n", __LINE__);
> -		return -1;
> -	}
> -
> -	/* Configure P1 and Q1, leave P0 and Q0 to be configured by the PMD. */
> -	if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
> -		printf("%d: port 1 setup expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	if (rte_event_queue_setup(evdev, 1, &queue_conf) < 0) {
> -		printf("%d: queue 1 setup expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	/* Link P0->Q0 and Q1 */
> -	for (i = 0; i < 2; i++) {
> -		queue_id = i;
> -
> -		if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
> -			printf("%d: P0->Q%d link expected to succeed\n",
> -			       __LINE__, i);
> -			goto err;
> -		}
> -	}
> -
> -	/* Link P1->Q0 and Q1 */
> -	for (i = 0; i < 2; i++) {
> -		queue_id = i;
> -
> -		if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
> -			printf("%d: P1->Q%d link expected to succeed\n",
> -			       __LINE__, i);
> -			goto err;
> -		}
> -	}
> -
> -	/* Start the device */
> -	if (rte_event_dev_start(evdev) < 0) {
> -		printf("%d: device start failed\n", __LINE__);
> -		goto err;
> -	}
> -
> -	/* Stop the device */
> -	rte_event_dev_stop(evdev);
> -
> -	/* Configure device with 2 DIR ports and 2 DIR queues */
> -	config.nb_single_link_event_port_queues = 2;
> -
> -	ret = rte_event_dev_configure(evdev, &config);
> -	if (ret < 0) {
> -		printf("%d: Error configuring device\n", __LINE__);
> -		return -1;
> -	}
> -
> -	/* Configure the ports and queues */
> -	port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_SINGLE_LINK;
> -
> -	for (i = 0; i < 2; i++) {
> -		if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
> -			printf("%d: port %d setup expected to succeed\n",
> -			       __LINE__, i);
> -			goto err;
> -		}
> -	}
> -
> -	queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
> -
> -	for (i = 0; i < 2; i++) {
> -		if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
> -			printf("%d: queue %d setup expected to succeed\n",
> -			       __LINE__, i);
> -			goto err;
> -		}
> -	}
> -
> -	/* Link P0->Q0 and P1->Q1 */
> -	for (i = 0; i < 2; i++) {
> -		queue_id = i;
> -
> -		if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
> -			printf("%d: port %d link expected to succeed\n",
> -			       __LINE__, i);
> -			goto err;
> -		}
> -	}
> -
> -	/* Start the device */
> -	if (rte_event_dev_start(evdev) < 0) {
> -		printf("%d: device start failed\n", __LINE__);
> -		goto err;
> -	}
> -
> -	/* Stop the device */
> -	rte_event_dev_stop(evdev);
> -
> -	/* Reconfigure device */
> -	ret = rte_event_dev_configure(evdev, &config);
> -	if (ret < 0) {
> -		printf("%d: Error re-configuring device\n", __LINE__);
> -		return -1;
> -	}
> -
> -	/* Configure P1 and Q0, leave P0 and Q1 to be configured by the PMD. */
> -	if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
> -		printf("%d: port 1 setup expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
> -		printf("%d: queue 1 setup expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	/* Link P0->Q1 */
> -	queue_id = 1;
> -
> -	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
> -		printf("%d: P0->Q%d link expected to succeed\n",
> -		       __LINE__, i);
> -		goto err;
> -	}
> -
> -	/* Link P1->Q0 */
> -	queue_id = 0;
> -
> -	if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
> -		printf("%d: P1->Q%d link expected to succeed\n",
> -		       __LINE__, i);
> -		goto err;
> -	}
> -
> -	/* Start the device */
> -	if (rte_event_dev_start(evdev) < 0) {
> -		printf("%d: device start failed\n", __LINE__);
> -		goto err;
> -	}
> -
> -	rte_event_dev_stop(evdev);
> -
> -	config.nb_event_queues = 5;
> -	config.nb_event_ports = 5;
> -	config.nb_single_link_event_port_queues = 1;
> -
> -	ret = rte_event_dev_configure(evdev, &config);
> -	if (ret < 0) {
> -		printf("%d: Error re-configuring device\n", __LINE__);
> -		return -1;
> -	}
> -
> -	for (i = 0; i < config.nb_event_queues - 1; i++) {
> -		port_conf.event_port_cfg = 0;
> -		queue_conf.event_queue_cfg = 0;
> -
> -		if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
> -			printf("%d: port %d setup expected to succeed\n",
> -			       __LINE__, i);
> -			goto err;
> -		}
> -
> -		if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
> -			printf("%d: queue %d setup expected to succeed\n",
> -			       __LINE__, i);
> -			goto err;
> -		}
> -
> -		queue_id = i;
> -
> -		if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
> -			printf("%d: P%d->Q%d link expected to succeed\n",
> -			       __LINE__, i, i);
> -			goto err;
> -		}
> -	}
> -
> -	port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_SINGLE_LINK;
> -	queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
> -
> -	if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
> -		printf("%d: port %d setup expected to succeed\n",
> -		       __LINE__, i);
> -		goto err;
> -	}
> -
> -	if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
> -		printf("%d: queue %d setup expected to succeed\n",
> -		       __LINE__, i);
> -		goto err;
> -	}
> -
> -	queue_id = i;
> -
> -	if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
> -		printf("%d: P%d->Q%d link expected to succeed\n",
> -		       __LINE__, i, i);
> -		goto err;
> -	}
> -
> -	/* Start the device */
> -	if (rte_event_dev_start(evdev) < 0) {
> -		printf("%d: device start failed\n", __LINE__);
> -		goto err;
> -	}
> -
> -	/* Stop the device */
> -	rte_event_dev_stop(evdev);
> -
> -	config.nb_event_ports += 1;
> -
> -	/* Reconfigure device with 1 more load-balanced port */
> -	ret = rte_event_dev_configure(evdev, &config);
> -	if (ret < 0) {
> -		printf("%d: Error re-configuring device\n", __LINE__);
> -		return -1;
> -	}
> -
> -	port_conf.event_port_cfg = 0;
> -
> -	/* Configure the new port */
> -	if (rte_event_port_setup(evdev, config.nb_event_ports - 1,
> -				 &port_conf) < 0) {
> -		printf("%d: port 1 setup expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	/* Start the device */
> -	if (rte_event_dev_start(evdev) < 0) {
> -		printf("%d: device start failed\n", __LINE__);
> -		goto err;
> -	}
> -
> -	cleanup();
> -	return 0;
> -
> -err:
> -	cleanup();
> -	return -1;
> -}
> -
> -static int
> -test_load_balanced_traffic(void)
> -{
> -	uint64_t timeout;
> -	struct rte_event_dev_config config = {0};
> -	struct rte_event_queue_conf queue_conf;
> -	struct rte_event_port_conf port_conf;
> -	struct rte_event_dev_info info;
> -	struct rte_event ev;
> -	uint8_t queue_id;
> -	int ret;
> -
> -	if (rte_event_dev_info_get(evdev, &info)) {
> -		printf("%d: Error querying device info\n", __LINE__);
> -		return -1;
> -	}
> -
> -	config.nb_event_queues = 1;
> -	config.nb_event_ports = 1;
> -	config.nb_single_link_event_port_queues = 0;
> -	config.nb_event_queue_flows = info.max_event_queue_flows;
> -	config.nb_events_limit = info.max_num_events;
> -	config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
> -	config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
> -	config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
> -	config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
> -
> -	/* Configure the device with 1 LDB port and queue */
> -	ret = rte_event_dev_configure(evdev, &config);
> -	if (ret < 0) {
> -		printf("%d: Error configuring device\n", __LINE__);
> -		return -1;
> -	}
> -
> -	/* Configure the ports and queues */
> -	if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
> -		printf("%d: Error querying default port conf\n", __LINE__);
> -		goto err;
> -	}
> -
> -	if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
> -		printf("%d: port 0 setup expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
> -		printf("%d: Error querying default queue conf\n", __LINE__);
> -		goto err;
> -	}
> -
> -	if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
> -		printf("%d: queue 0 setup expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	/* Link P0->Q0 */
> -	queue_id = 0;
> -
> -	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
> -		printf("%d: port 0 link expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	/* Start the device */
> -	if (rte_event_dev_start(evdev) < 0) {
> -		printf("%d: device start failed\n", __LINE__);
> -		goto err;
> -	}
> -
> -	/* Enqueue 1 NEW event */
> -	ev.op = RTE_EVENT_OP_NEW;
> -	ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
> -	ev.queue_id = 0;
> -	ev.priority = 0;
> -	ev.u64 = 0;
> -
> -	if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
> -		printf("%d: NEW enqueue expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	/* Dequeue and enqueue 1 FORWARD event */
> -	timeout = 0xFFFFFFFFF;
> -	if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
> -		printf("%d: event dequeue expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	ev.op = RTE_EVENT_OP_FORWARD;
> -
> -	if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
> -		printf("%d: NEW enqueue expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	/* Dequeue and enqueue 1 RELEASE operation */
> -	if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
> -		printf("%d: event dequeue expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	ev.op = RTE_EVENT_OP_RELEASE;
> -
> -	if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
> -		printf("%d: NEW enqueue expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	cleanup();
> -	return 0;
> -
> -err:
> -	cleanup();
> -	return -1;
> -}
> -
> -static int
> -test_directed_traffic(void)
> -{
> -	uint64_t timeout;
> -	struct rte_event_dev_config config = {0};
> -	struct rte_event_queue_conf queue_conf;
> -	struct rte_event_port_conf port_conf;
> -	struct rte_event_dev_info info;
> -	struct rte_event ev;
> -	uint8_t queue_id;
> -	int ret;
> -
> -	if (rte_event_dev_info_get(evdev, &info)) {
> -		printf("%d: Error querying device info\n", __LINE__);
> -		return -1;
> -	}
> -
> -	config.nb_event_queues = 1;
> -	config.nb_event_ports = 1;
> -	config.nb_single_link_event_port_queues = 1;
> -	config.nb_event_queue_flows = info.max_event_queue_flows;
> -	config.nb_events_limit = info.max_num_events;
> -	config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
> -	config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
> -	config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
> -	config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
> -
> -	/* Configure the device with 1 DIR port and queue */
> -	ret = rte_event_dev_configure(evdev, &config);
> -	if (ret < 0) {
> -		printf("%d: Error configuring device\n", __LINE__);
> -		return -1;
> -	}
> -
> -	/* Configure the ports and queues */
> -	if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
> -		printf("%d: Error querying default port conf\n", __LINE__);
> -		goto err;
> -	}
> -
> -	port_conf.event_port_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
> -
> -	if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
> -		printf("%d: port 0 setup expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
> -		printf("%d: Error querying default queue conf\n", __LINE__);
> -		goto err;
> -	}
> -
> -	queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
> -
> -	if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
> -		printf("%d: queue 0 setup expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	/* Link P0->Q0 */
> -	queue_id = 0;
> -
> -	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
> -		printf("%d: port 0 link expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	/* Start the device */
> -	if (rte_event_dev_start(evdev) < 0) {
> -		printf("%d: device start failed\n", __LINE__);
> -		goto err;
> -	}
> -
> -	/* Enqueue 1 NEW event */
> -	ev.op = RTE_EVENT_OP_NEW;
> -	ev.queue_id = 0;
> -	ev.priority = 0;
> -	ev.u64 = 0;
> -
> -	if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
> -		printf("%d: NEW enqueue expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	/* Dequeue and enqueue 1 FORWARD event */
> -	timeout = 0xFFFFFFFFF;
> -	if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
> -		printf("%d: event dequeue expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	if (ev.queue_id != 0) {
> -		printf("%d: invalid dequeued event queue ID (%d)\n",
> -		       __LINE__, ev.queue_id);
> -		goto err;
> -	}
> -
> -	ev.op = RTE_EVENT_OP_FORWARD;
> -
> -	if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
> -		printf("%d: NEW enqueue expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	/* Dequeue and enqueue 1 RELEASE operation */
> -	if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
> -		printf("%d: event dequeue expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	ev.op = RTE_EVENT_OP_RELEASE;
> -
> -	if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
> -		printf("%d: NEW enqueue expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	cleanup();
> -	return 0;
> -
> -err:
> -	cleanup();
> -	return -1;
> -}
> -
> -static int
> -test_deferred_sched(void)
> -{
> -	uint64_t timeout;
> -	struct rte_event_dev_config config = {0};
> -	struct rte_event_queue_conf queue_conf;
> -	struct rte_event_port_conf port_conf;
> -	struct rte_event_dev_info info;
> -	const int num_events = 128;
> -	struct rte_event ev;
> -	uint8_t queue_id;
> -	int ret, i;
> -
> -	if (rte_event_dev_info_get(evdev, &info)) {
> -		printf("%d: Error querying device info\n", __LINE__);
> -		return -1;
> -	}
> -
> -	config.nb_event_queues = 1;
> -	config.nb_event_ports = 2;
> -	config.nb_single_link_event_port_queues = 0;
> -	config.nb_event_queue_flows = info.max_event_queue_flows;
> -	config.nb_events_limit = info.max_num_events;
> -	config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
> -	config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
> -	config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
> -	config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
> -
> -	/* Configure the device with 2 LDB ports and 1 queue */
> -	ret = rte_event_dev_configure(evdev, &config);
> -	if (ret < 0) {
> -		printf("%d: Error configuring device\n", __LINE__);
> -		return -1;
> -	}
> -
> -	ret = rte_pmd_dlb_set_token_pop_mode(evdev, 0, DEFERRED_POP);
> -	if (ret < 0) {
> -		printf("%d: Error setting deferred scheduling\n", __LINE__);
> -		goto err;
> -	}
> -
> -	ret = rte_pmd_dlb_set_token_pop_mode(evdev, 1, DEFERRED_POP);
> -	if (ret < 0) {
> -		printf("%d: Error setting deferred scheduling\n", __LINE__);
> -		goto err;
> -	}
> -
> -	/* Configure the ports and queues */
> -	if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
> -		printf("%d: Error querying default port conf\n", __LINE__);
> -		goto err;
> -	}
> -
> -	port_conf.dequeue_depth = 1;
> -
> -	if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
> -		printf("%d: port 0 setup expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
> -		printf("%d: port 1 setup expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
> -		printf("%d: Error querying default queue conf\n", __LINE__);
> -		goto err;
> -	}
> -
> -	queue_conf.schedule_type = RTE_SCHED_TYPE_PARALLEL;
> -	queue_conf.nb_atomic_order_sequences = 0;
> -
> -	if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
> -		printf("%d: queue 0 setup expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	/* Link P0->Q0 and P1->Q0 */
> -	queue_id = 0;
> -
> -	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
> -		printf("%d: port 0 link expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
> -		printf("%d: port 1 link expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	/* Start the device */
> -	if (rte_event_dev_start(evdev) < 0) {
> -		printf("%d: device start failed\n", __LINE__);
> -		goto err;
> -	}
> -
> -	/* Enqueue 128 NEW events */
> -	ev.op = RTE_EVENT_OP_NEW;
> -	ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
> -	ev.queue_id = 0;
> -	ev.priority = 0;
> -	ev.u64 = 0;
> -
> -	for (i = 0; i < num_events; i++) {
> -		if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
> -			printf("%d: NEW enqueue expected to succeed\n",
> -			       __LINE__);
> -			goto err;
> -		}
> -	}
> -
> -	/* Dequeue two events from port 0 (dequeue_depth * 2 due to the
> -	 * reserved token scheme)
> -	 */
> -	timeout = 0xFFFFFFFFF;
> -	if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
> -		printf("%d: event dequeue expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
> -		printf("%d: event dequeue expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	/* Dequeue (and release) all other events from port 1. Deferred
> -	 * scheduling ensures no other events are scheduled to port 0 without a
> -	 * subsequent rte_event_dequeue_burst() call.
> -	 */
> -	for (i = 0; i < num_events - 2; i++) {
> -		if (rte_event_dequeue_burst(evdev, 1, &ev, 1, timeout) != 1) {
> -			printf("%d: event dequeue expected to succeed\n",
> -			       __LINE__);
> -			goto err;
> -		}
> -
> -		ev.op = RTE_EVENT_OP_RELEASE;
> -
> -		if (rte_event_enqueue_burst(evdev, 1, &ev, 1) != 1) {
> -			printf("%d: RELEASE enqueue expected to succeed\n",
> -			       __LINE__);
> -			goto err;
> -		}
> -	}
> -
> -	cleanup();
> -	return 0;
> -
> -err:
> -	cleanup();
> -	return -1;
> -}
> -
> -static int
> -test_delayed_pop(void)
> -{
> -	uint64_t timeout;
> -	struct rte_event_dev_config config = {0};
> -	struct rte_event_queue_conf queue_conf;
> -	struct rte_event_port_conf port_conf;
> -	struct rte_event_dev_info info;
> -	int ret, i, num_events;
> -	struct rte_event ev;
> -	uint8_t queue_id;
> -
> -	if (rte_event_dev_info_get(evdev, &info)) {
> -		printf("%d: Error querying device info\n", __LINE__);
> -		return -1;
> -	}
> -
> -	config.nb_event_queues = 1;
> -	config.nb_event_ports = 1;
> -	config.nb_single_link_event_port_queues = 0;
> -	config.nb_event_queue_flows = info.max_event_queue_flows;
> -	config.nb_events_limit = info.max_num_events;
> -	config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
> -	config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
> -	config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
> -	config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
> -
> -	/* Configure the device with 1 LDB port and queue */
> -	ret = rte_event_dev_configure(evdev, &config);
> -	if (ret < 0) {
> -		printf("%d: Error configuring device\n", __LINE__);
> -		return -1;
> -	}
> -
> -	ret = rte_pmd_dlb_set_token_pop_mode(evdev, 0, DELAYED_POP);
> -	if (ret < 0) {
> -		printf("%d: Error setting deferred scheduling\n", __LINE__);
> -		goto err;
> -	}
> -
> -	/* Configure the ports and queues */
> -	if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
> -		printf("%d: Error querying default port conf\n", __LINE__);
> -		goto err;
> -	}
> -
> -	port_conf.dequeue_depth = 16;
> -	port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
> -
> -	if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
> -		printf("%d: port 0 setup expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
> -		printf("%d: Error querying default queue conf\n", __LINE__);
> -		goto err;
> -	}
> -
> -	if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
> -		printf("%d: queue 0 setup expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	/* Link P0->Q0 */
> -	queue_id = 0;
> -
> -	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
> -		printf("%d: port 0 link expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	/* Start the device */
> -	if (rte_event_dev_start(evdev) < 0) {
> -		printf("%d: device start failed\n", __LINE__);
> -		goto err;
> -	}
> -
> -	num_events = 2 * port_conf.dequeue_depth;
> -
> -	/* Enqueue 2 * dequeue_depth NEW events. Due to the PMD's reserved
> -	 * token scheme, the port will initially behave as though its
> -	 * dequeue_depth is twice the requested size.
> -	 */
> -	ev.op = RTE_EVENT_OP_NEW;
> -	ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
> -	ev.queue_id = 0;
> -	ev.priority = 0;
> -	ev.u64 = 0;
> -
> -	for (i = 0; i < num_events; i++) {
> -		if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
> -			printf("%d: NEW enqueue expected to succeed\n",
> -			       __LINE__);
> -			goto err;
> -		}
> -	}
> -
> -	/* Flush these events out of the CQ */
> -	timeout = 0xFFFFFFFFF;
> -
> -	for (i = 0; i < num_events; i++) {
> -		if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
> -			printf("%d: event dequeue expected to succeed\n",
> -			       __LINE__);
> -			goto err;
> -		}
> -	}
> -
> -	ev.op = RTE_EVENT_OP_RELEASE;
> -
> -	for (i = 0; i < num_events; i++) {
> -		if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
> -			printf("%d: RELEASE enqueue expected to succeed\n",
> -			       __LINE__);
> -			goto err;
> -		}
> -	}
> -
> -	/* Enqueue 2 * dequeue_depth NEW events again */
> -	ev.op = RTE_EVENT_OP_NEW;
> -	ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
> -	ev.queue_id = 0;
> -	ev.priority = 0;
> -	ev.u64 = 0;
> -
> -	for (i = 0; i < num_events; i++) {
> -		if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
> -			printf("%d: NEW enqueue expected to succeed\n",
> -			       __LINE__);
> -			goto err;
> -		}
> -	}
> -
> -	/* Dequeue dequeue_depth events but only release dequeue_depth - 1.
> -	 * Delayed pop won't perform the pop and no more events will be
> -	 * scheduled.
> -	 */
> -	for (i = 0; i < port_conf.dequeue_depth; i++) {
> -		if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
> -			printf("%d: event dequeue expected to succeed\n",
> -			       __LINE__);
> -			goto err;
> -		}
> -	}
> -
> -	ev.op = RTE_EVENT_OP_RELEASE;
> -
> -	for (i = 0; i < port_conf.dequeue_depth - 1; i++) {
> -		if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
> -			printf("%d: RELEASE enqueue expected to succeed\n",
> -			       __LINE__);
> -			goto err;
> -		}
> -	}
> -
> -	timeout = 0x10000;
> -
> -	ret = rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout);
> -	if (ret != 0) {
> -		printf("%d: event dequeue expected to fail (ret = %d)\n",
> -		       __LINE__, ret);
> -		goto err;
> -	}
> -
> -	/* Release one more event. This will trigger the token pop, and
> -	 * another batch of events will be scheduled to the device.
> -	 */
> -	ev.op = RTE_EVENT_OP_RELEASE;
> -
> -	if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
> -		printf("%d: RELEASE enqueue expected to succeed\n",
> -		       __LINE__);
> -		goto err;
> -	}
> -
> -	timeout = 0xFFFFFFFFF;
> -
> -	for (i = 0; i < port_conf.dequeue_depth; i++) {
> -		if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
> -			printf("%d: event dequeue expected to succeed\n",
> -			       __LINE__);
> -			goto err;
> -		}
> -	}
> -
> -	cleanup();
> -	return 0;
> -
> -err:
> -	cleanup();
> -	return -1;
> -}
> -
> -static int
> -do_selftest(void)
> -{
> -	struct test t;
> -	int ret;
> -
> -	/* Only create mbuf pool once, reuse for each test run */
> -	if (!eventdev_func_mempool) {
> -		eventdev_func_mempool =
> -			rte_pktmbuf_pool_create("EVENTDEV_DLB_SA_MBUF_POOL",
> -						(1 << 12), /* 4k buffers */
> -						32 /*MBUF_CACHE_SIZE*/,
> -						0,
> -						512, /* use very small mbufs */
> -						rte_socket_id());
> -		if (!eventdev_func_mempool) {
> -			printf("ERROR creating mempool\n");
> -			goto test_fail;
> -		}
> -	}
> -	t.mbuf_pool = eventdev_func_mempool;
> -
> -	printf("*** Running Stop Flush test...\n");
> -	ret = test_stop_flush(&t);
> -	if (ret != 0) {
> -		printf("ERROR - Stop Flush test FAILED.\n");
> -		return ret;
> -	}
> -
> -	printf("*** Running Single Link test...\n");
> -	ret = test_single_link();
> -	if (ret != 0) {
> -		printf("ERROR - Single Link test FAILED.\n");
> -
> -		goto test_fail;
> -	}
> -
> -	printf("*** Running Info Get test...\n");
> -	ret = test_info_get();
> -	if (ret != 0) {
> -		printf("ERROR - Stop Flush test FAILED.\n");
> -		return ret;
> -	}
> -
> -	printf("*** Running Reconfiguration Link test...\n");
> -	ret = test_reconfiguration_link();
> -	if (ret != 0) {
> -		printf("ERROR - Reconfiguration Link test FAILED.\n");
> -
> -		goto test_fail;
> -	}
> -
> -	printf("*** Running Load-Balanced Traffic test...\n");
> -	ret = test_load_balanced_traffic();
> -	if (ret != 0) {
> -		printf("ERROR - Load-Balanced Traffic test FAILED.\n");
> -
> -		goto test_fail;
> -	}
> -
> -	printf("*** Running Directed Traffic test...\n");
> -	ret = test_directed_traffic();
> -	if (ret != 0) {
> -		printf("ERROR - Directed Traffic test FAILED.\n");
> -
> -		goto test_fail;
> -	}
> -
> -	printf("*** Running Deferred Scheduling test...\n");
> -	ret = test_deferred_sched();
> -	if (ret != 0) {
> -		printf("ERROR - Deferred Scheduling test FAILED.\n");
> -
> -		goto test_fail;
> -	}
> -
> -	printf("*** Running Delayed Pop test...\n");
> -	ret = test_delayed_pop();
> -	if (ret != 0) {
> -		printf("ERROR - Delayed Pop test FAILED.\n");
> -
> -		goto test_fail;
> -	}
> -
> -	return 0;
> -
> -test_fail:
> -	return -1;
> -}
> -
> -int
> -test_dlb_eventdev(void)
> -{
> -	const char *dlb_eventdev_name = "dlb_event";
> -	uint8_t num_evdevs = rte_event_dev_count();
> -	int i, ret = 0;
> -	int found = 0, skipped = 0, passed = 0, failed = 0;
> -	struct rte_event_dev_info info;
> -
> -	for (i = 0; found + skipped < num_evdevs && i < RTE_EVENT_MAX_DEVS;
> -	     i++) {
> -		ret = rte_event_dev_info_get(i, &info);
> -		if (ret < 0)
> -			continue;
> -
> -		/* skip non-dlb event devices */
> -		if (strncmp(info.driver_name, dlb_eventdev_name,
> -			    sizeof(*info.driver_name)) != 0) {
> -			skipped++;
> -			continue;
> -		}
> -
> -		evdev = rte_event_dev_get_dev_id(info.driver_name);
> -		if (evdev < 0) {
> -			printf("Could not get dev_id for eventdev with name %s, i=%d\n",
> -			       info.driver_name, i);
> -			skipped++;
> -			continue;
> -		}
> -		found++;
> -		printf("Running selftest on eventdev %s\n", info.driver_name);
> -		ret = do_selftest();
> -		if (ret == 0) {
> -			passed++;
> -			printf("Selftest passed for eventdev %s\n",
> -			       info.driver_name);
> -		} else {
> -			failed++;
> -			printf("Selftest failed for eventdev %s, err=%d\n",
> -			       info.driver_name, ret);
> -		}
> -	}
> -
> -	printf("Ran selftest on %d eventdevs, %d skipped, %d passed, %d
> failed\n",
> -	       found, skipped, passed, failed);
> -	return ret;
> -}
> diff --git a/drivers/event/dlb/dlb_user.h b/drivers/event/dlb/dlb_user.h
> deleted file mode 100644
> index 2d9582b2b..000000000
> --- a/drivers/event/dlb/dlb_user.h
> +++ /dev/null
> @@ -1,814 +0,0 @@
> -/* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2016-2020 Intel Corporation
> - */
> -
> -#ifndef __DLB_USER_H
> -#define __DLB_USER_H
> -
> -#include <linux/types.h>
> -
> -#define DLB_MAX_NAME_LEN 64
> -
> -enum dlb_error {
> -	DLB_ST_SUCCESS = 0,
> -	DLB_ST_NAME_EXISTS,
> -	DLB_ST_DOMAIN_UNAVAILABLE,
> -	DLB_ST_LDB_PORTS_UNAVAILABLE,
> -	DLB_ST_DIR_PORTS_UNAVAILABLE,
> -	DLB_ST_LDB_QUEUES_UNAVAILABLE,
> -	DLB_ST_LDB_CREDITS_UNAVAILABLE,
> -	DLB_ST_DIR_CREDITS_UNAVAILABLE,
> -	DLB_ST_LDB_CREDIT_POOLS_UNAVAILABLE,
> -	DLB_ST_DIR_CREDIT_POOLS_UNAVAILABLE,
> -	DLB_ST_SEQUENCE_NUMBERS_UNAVAILABLE,
> -	DLB_ST_INVALID_DOMAIN_ID,
> -	DLB_ST_INVALID_QID_INFLIGHT_ALLOCATION,
> -	DLB_ST_ATOMIC_INFLIGHTS_UNAVAILABLE,
> -	DLB_ST_HIST_LIST_ENTRIES_UNAVAILABLE,
> -	DLB_ST_INVALID_LDB_CREDIT_POOL_ID,
> -	DLB_ST_INVALID_DIR_CREDIT_POOL_ID,
> -	DLB_ST_INVALID_POP_COUNT_VIRT_ADDR,
> -	DLB_ST_INVALID_LDB_QUEUE_ID,
> -	DLB_ST_INVALID_CQ_DEPTH,
> -	DLB_ST_INVALID_CQ_VIRT_ADDR,
> -	DLB_ST_INVALID_PORT_ID,
> -	DLB_ST_INVALID_QID,
> -	DLB_ST_INVALID_PRIORITY,
> -	DLB_ST_NO_QID_SLOTS_AVAILABLE,
> -	DLB_ST_QED_FREELIST_ENTRIES_UNAVAILABLE,
> -	DLB_ST_DQED_FREELIST_ENTRIES_UNAVAILABLE,
> -	DLB_ST_INVALID_DIR_QUEUE_ID,
> -	DLB_ST_DIR_QUEUES_UNAVAILABLE,
> -	DLB_ST_INVALID_LDB_CREDIT_LOW_WATERMARK,
> -	DLB_ST_INVALID_LDB_CREDIT_QUANTUM,
> -	DLB_ST_INVALID_DIR_CREDIT_LOW_WATERMARK,
> -	DLB_ST_INVALID_DIR_CREDIT_QUANTUM,
> -	DLB_ST_DOMAIN_NOT_CONFIGURED,
> -	DLB_ST_PID_ALREADY_ATTACHED,
> -	DLB_ST_PID_NOT_ATTACHED,
> -	DLB_ST_INTERNAL_ERROR,
> -	DLB_ST_DOMAIN_IN_USE,
> -	DLB_ST_IOMMU_MAPPING_ERROR,
> -	DLB_ST_FAIL_TO_PIN_MEMORY_PAGE,
> -	DLB_ST_UNABLE_TO_PIN_POPCOUNT_PAGES,
> -	DLB_ST_UNABLE_TO_PIN_CQ_PAGES,
> -	DLB_ST_DISCONTIGUOUS_CQ_MEMORY,
> -	DLB_ST_DISCONTIGUOUS_POP_COUNT_MEMORY,
> -	DLB_ST_DOMAIN_STARTED,
> -	DLB_ST_LARGE_POOL_NOT_SPECIFIED,
> -	DLB_ST_SMALL_POOL_NOT_SPECIFIED,
> -	DLB_ST_NEITHER_POOL_SPECIFIED,
> -	DLB_ST_DOMAIN_NOT_STARTED,
> -	DLB_ST_INVALID_MEASUREMENT_DURATION,
> -	DLB_ST_INVALID_PERF_METRIC_GROUP_ID,
> -	DLB_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES,
> -	DLB_ST_DOMAIN_RESET_FAILED,
> -	DLB_ST_MBOX_ERROR,
> -	DLB_ST_INVALID_HIST_LIST_DEPTH,
> -	DLB_ST_NO_MEMORY,
> -};
> -
> -static const char dlb_error_strings[][128] = {
> -	"DLB_ST_SUCCESS",
> -	"DLB_ST_NAME_EXISTS",
> -	"DLB_ST_DOMAIN_UNAVAILABLE",
> -	"DLB_ST_LDB_PORTS_UNAVAILABLE",
> -	"DLB_ST_DIR_PORTS_UNAVAILABLE",
> -	"DLB_ST_LDB_QUEUES_UNAVAILABLE",
> -	"DLB_ST_LDB_CREDITS_UNAVAILABLE",
> -	"DLB_ST_DIR_CREDITS_UNAVAILABLE",
> -	"DLB_ST_LDB_CREDIT_POOLS_UNAVAILABLE",
> -	"DLB_ST_DIR_CREDIT_POOLS_UNAVAILABLE",
> -	"DLB_ST_SEQUENCE_NUMBERS_UNAVAILABLE",
> -	"DLB_ST_INVALID_DOMAIN_ID",
> -	"DLB_ST_INVALID_QID_INFLIGHT_ALLOCATION",
> -	"DLB_ST_ATOMIC_INFLIGHTS_UNAVAILABLE",
> -	"DLB_ST_HIST_LIST_ENTRIES_UNAVAILABLE",
> -	"DLB_ST_INVALID_LDB_CREDIT_POOL_ID",
> -	"DLB_ST_INVALID_DIR_CREDIT_POOL_ID",
> -	"DLB_ST_INVALID_POP_COUNT_VIRT_ADDR",
> -	"DLB_ST_INVALID_LDB_QUEUE_ID",
> -	"DLB_ST_INVALID_CQ_DEPTH",
> -	"DLB_ST_INVALID_CQ_VIRT_ADDR",
> -	"DLB_ST_INVALID_PORT_ID",
> -	"DLB_ST_INVALID_QID",
> -	"DLB_ST_INVALID_PRIORITY",
> -	"DLB_ST_NO_QID_SLOTS_AVAILABLE",
> -	"DLB_ST_QED_FREELIST_ENTRIES_UNAVAILABLE",
> -	"DLB_ST_DQED_FREELIST_ENTRIES_UNAVAILABLE",
> -	"DLB_ST_INVALID_DIR_QUEUE_ID",
> -	"DLB_ST_DIR_QUEUES_UNAVAILABLE",
> -	"DLB_ST_INVALID_LDB_CREDIT_LOW_WATERMARK",
> -	"DLB_ST_INVALID_LDB_CREDIT_QUANTUM",
> -	"DLB_ST_INVALID_DIR_CREDIT_LOW_WATERMARK",
> -	"DLB_ST_INVALID_DIR_CREDIT_QUANTUM",
> -	"DLB_ST_DOMAIN_NOT_CONFIGURED",
> -	"DLB_ST_PID_ALREADY_ATTACHED",
> -	"DLB_ST_PID_NOT_ATTACHED",
> -	"DLB_ST_INTERNAL_ERROR",
> -	"DLB_ST_DOMAIN_IN_USE",
> -	"DLB_ST_IOMMU_MAPPING_ERROR",
> -	"DLB_ST_FAIL_TO_PIN_MEMORY_PAGE",
> -	"DLB_ST_UNABLE_TO_PIN_POPCOUNT_PAGES",
> -	"DLB_ST_UNABLE_TO_PIN_CQ_PAGES",
> -	"DLB_ST_DISCONTIGUOUS_CQ_MEMORY",
> -	"DLB_ST_DISCONTIGUOUS_POP_COUNT_MEMORY",
> -	"DLB_ST_DOMAIN_STARTED",
> -	"DLB_ST_LARGE_POOL_NOT_SPECIFIED",
> -	"DLB_ST_SMALL_POOL_NOT_SPECIFIED",
> -	"DLB_ST_NEITHER_POOL_SPECIFIED",
> -	"DLB_ST_DOMAIN_NOT_STARTED",
> -	"DLB_ST_INVALID_MEASUREMENT_DURATION",
> -	"DLB_ST_INVALID_PERF_METRIC_GROUP_ID",
> -	"DLB_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES",
> -	"DLB_ST_DOMAIN_RESET_FAILED",
> -	"DLB_ST_MBOX_ERROR",
> -	"DLB_ST_INVALID_HIST_LIST_DEPTH",
> -	"DLB_ST_NO_MEMORY",
> -};
> -
> -struct dlb_cmd_response {
> -	__u32 status; /* Interpret using enum dlb_error */
> -	__u32 id;
> -};
> -
> -/******************************/
> -/* 'dlb' commands	      */
> -/******************************/
> -
> -#define DLB_DEVICE_VERSION(x) (((x) >> 8) & 0xFF)
> -#define DLB_DEVICE_REVISION(x) ((x) & 0xFF)
> -
> -enum dlb_revisions {
> -	DLB_REV_A0 = 0,
> -	DLB_REV_A1 = 1,
> -	DLB_REV_A2 = 2,
> -	DLB_REV_A3 = 3,
> -	DLB_REV_B0 = 4,
> -};
> -
> -/*
> - * DLB_CMD_CREATE_SCHED_DOMAIN: Create a DLB scheduling domain and reserve
> the
> - *	resources (queues, ports, etc.) that it contains.
> - *
> - * Input parameters:
> - * - num_ldb_queues: Number of load-balanced queues.
> - * - num_ldb_ports: Number of load-balanced ports.
> - * - num_dir_ports: Number of directed ports. A directed port has one
> directed
> - *	queue, so no num_dir_queues argument is necessary.
> - * - num_atomic_inflights: This specifies the amount of temporary atomic
> QE
> - *	storage for the domain. This storage is divided among the domain's
> - *	load-balanced queues that are configured for atomic scheduling.
> - * - num_hist_list_entries: Amount of history list storage. This is
> divided
> - *	among the domain's CQs.
> - * - num_ldb_credits: Amount of load-balanced QE storage (QED). QEs occupy
> this
> - *	space until they are scheduled to a load-balanced CQ. One credit
> - *	represents the storage for one QE.
> - * - num_dir_credits: Amount of directed QE storage (DQED). QEs occupy
> this
> - *	space until they are scheduled to a directed CQ. One credit represents
> - *	the storage for one QE.
> - * - num_ldb_credit_pools: Number of pools into which the load-balanced
> credits
> - *	are placed.
> - * - num_dir_credit_pools: Number of pools into which the directed credits
> are
> - *	placed.
> - * - padding0: Reserved for future use.
> - *
> - * Output parameters:
> - * - response: pointer to a struct dlb_cmd_response.
> - *	response.status: Detailed error code. In certain cases, such as if the
> - *		response pointer is invalid, the driver won't set status.
> - *	response.id: domain ID.
> - */
> -struct dlb_create_sched_domain_args {
> -	/* Output parameters */
> -	__u64 response;
> -	/* Input parameters */
> -	__u32 num_ldb_queues;
> -	__u32 num_ldb_ports;
> -	__u32 num_dir_ports;
> -	__u32 num_atomic_inflights;
> -	__u32 num_hist_list_entries;
> -	__u32 num_ldb_credits;
> -	__u32 num_dir_credits;
> -	__u32 num_ldb_credit_pools;
> -	__u32 num_dir_credit_pools;
> -};
> -
> -/*
> - * DLB_CMD_GET_NUM_RESOURCES: Return the number of available resources
> - *	(queues, ports, etc.) that this device owns.
> - *
> - * Output parameters:
> - * - num_domains: Number of available scheduling domains.
> - * - num_ldb_queues: Number of available load-balanced queues.
> - * - num_ldb_ports: Number of available load-balanced ports.
> - * - num_dir_ports: Number of available directed ports. There is one
> directed
> - *	queue for every directed port.
> - * - num_atomic_inflights: Amount of available temporary atomic QE
> storage.
> - * - max_contiguous_atomic_inflights: When a domain is created, the
> temporary
> - *	atomic QE storage is allocated in a contiguous chunk. This return value
> - *	is the longest available contiguous range of atomic QE storage.
> - * - num_hist_list_entries: Amount of history list storage.
> - * - max_contiguous_hist_list_entries: History list storage is allocated
> in
> - *	a contiguous chunk, and this return value is the longest available
> - *	contiguous range of history list entries.
> - * - num_ldb_credits: Amount of available load-balanced QE storage.
> - * - max_contiguous_ldb_credits: QED storage is allocated in a contiguous
> - *	chunk, and this return value is the longest available contiguous range
> - *	of load-balanced credit storage.
> - * - num_dir_credits: Amount of available directed QE storage.
> - * - max_contiguous_dir_credits: DQED storage is allocated in a contiguous
> - *	chunk, and this return value is the longest available contiguous range
> - *	of directed credit storage.
> - * - num_ldb_credit_pools: Number of available load-balanced credit pools.
> - * - num_dir_credit_pools: Number of available directed credit pools.
> - * - padding0: Reserved for future use.
> - */
> -struct dlb_get_num_resources_args {
> -	/* Output parameters */
> -	__u32 num_sched_domains;
> -	__u32 num_ldb_queues;
> -	__u32 num_ldb_ports;
> -	__u32 num_dir_ports;
> -	__u32 num_atomic_inflights;
> -	__u32 max_contiguous_atomic_inflights;
> -	__u32 num_hist_list_entries;
> -	__u32 max_contiguous_hist_list_entries;
> -	__u32 num_ldb_credits;
> -	__u32 max_contiguous_ldb_credits;
> -	__u32 num_dir_credits;
> -	__u32 max_contiguous_dir_credits;
> -	__u32 num_ldb_credit_pools;
> -	__u32 num_dir_credit_pools;
> -	__u32 padding0;
> -};
> -
> -/*
> - * DLB_CMD_SET_SN_ALLOCATION: Configure a sequence number group
> - *
> - * Input parameters:
> - * - group: Sequence number group ID.
> - * - num: Number of sequence numbers per queue.
> - *
> - * Output parameters:
> - * - response: pointer to a struct dlb_cmd_response.
> - *	response.status: Detailed error code. In certain cases, such as if the
> - *		response pointer is invalid, the driver won't set status.
> - */
> -struct dlb_set_sn_allocation_args {
> -	/* Output parameters */
> -	__u64 response;
> -	/* Input parameters */
> -	__u32 group;
> -	__u32 num;
> -};
> -
> -/*
> - * DLB_CMD_GET_SN_ALLOCATION: Get a sequence number group's configuration
> - *
> - * Input parameters:
> - * - group: Sequence number group ID.
> - * - padding0: Reserved for future use.
> - *
> - * Output parameters:
> - * - response: pointer to a struct dlb_cmd_response.
> - *	response.status: Detailed error code. In certain cases, such as if the
> - *		response pointer is invalid, the driver won't set status.
> - *	response.id: Specified group's number of sequence numbers per queue.
> - */
> -struct dlb_get_sn_allocation_args {
> -	/* Output parameters */
> -	__u64 response;
> -	/* Input parameters */
> -	__u32 group;
> -	__u32 padding0;
> -};
> -
> -enum dlb_cq_poll_modes {
> -	DLB_CQ_POLL_MODE_STD,
> -	DLB_CQ_POLL_MODE_SPARSE,
> -
> -	/* NUM_DLB_CQ_POLL_MODE must be last */
> -	NUM_DLB_CQ_POLL_MODE,
> -};
> -
> -/*
> - * DLB_CMD_QUERY_CQ_POLL_MODE: Query the CQ poll mode the kernel driver is
> using
> - *
> - * Output parameters:
> - * - response: pointer to a struct dlb_cmd_response.
> - *	response.status: Detailed error code. In certain cases, such as if the
> - *		response pointer is invalid, the driver won't set status.
> - *	response.id: CQ poll mode (see enum dlb_cq_poll_modes).
> - */
> -struct dlb_query_cq_poll_mode_args {
> -	/* Output parameters */
> -	__u64 response;
> -};
> -
> -/*
> - * DLB_CMD_GET_SN_OCCUPANCY: Get a sequence number group's occupancy
> - *
> - * Each sequence number group has one or more slots, depending on its
> - * configuration. I.e.:
> - * - If configured for 1024 sequence numbers per queue, the group has 1
> slot
> - * - If configured for 512 sequence numbers per queue, the group has 2
> slots
> - *   ...
> - * - If configured for 32 sequence numbers per queue, the group has 32
> slots
> - *
> - * This ioctl returns the group's number of in-use slots. If its occupancy
> is
> - * 0, the group's sequence number allocation can be reconfigured.
> - *
> - * Input parameters:
> - * - group: Sequence number group ID.
> - * - padding0: Reserved for future use.
> - *
> - * Output parameters:
> - * - response: pointer to a struct dlb_cmd_response.
> - *	response.status: Detailed error code. In certain cases, such as if the
> - *		response pointer is invalid, the driver won't set status.
> - *	response.id: Specified group's number of used slots.
> - */
> -struct dlb_get_sn_occupancy_args {
> -	/* Output parameters */
> -	__u64 response;
> -	/* Input parameters */
> -	__u32 group;
> -	__u32 padding0;
> -};
> -
> -/*********************************/
> -/* 'scheduling domain' commands  */
> -/*********************************/
> -
> -/*
> - * DLB_DOMAIN_CMD_CREATE_LDB_POOL: Configure a load-balanced credit pool.
> - * Input parameters:
> - * - num_ldb_credits: Number of load-balanced credits (QED space) for this
> - *	pool.
> - * - padding0: Reserved for future use.
> - *
> - * Output parameters:
> - * - response: pointer to a struct dlb_cmd_response.
> - *	response.status: Detailed error code. In certain cases, such as if the
> - *		response pointer is invalid, the driver won't set status.
> - *	response.id: pool ID.
> - */
> -struct dlb_create_ldb_pool_args {
> -	/* Output parameters */
> -	__u64 response;
> -	/* Input parameters */
> -	__u32 num_ldb_credits;
> -	__u32 padding0;
> -};
> -
> -/*
> - * DLB_DOMAIN_CMD_CREATE_DIR_POOL: Configure a directed credit pool.
> - * Input parameters:
> - * - num_dir_credits: Number of directed credits (DQED space) for this
> pool.
> - * - padding0: Reserved for future use.
> - *
> - * Output parameters:
> - * - response: pointer to a struct dlb_cmd_response.
> - *	response.status: Detailed error code. In certain cases, such as if the
> - *		response pointer is invalid, the driver won't set status.
> - *	response.id: Pool ID.
> - */
> -struct dlb_create_dir_pool_args {
> -	/* Output parameters */
> -	__u64 response;
> -	/* Input parameters */
> -	__u32 num_dir_credits;
> -	__u32 padding0;
> -};
> -
> -/*
> - * DLB_DOMAIN_CMD_CREATE_LDB_QUEUE: Configure a load-balanced queue.
> - * Input parameters:
> - * - num_atomic_inflights: This specifies the amount of temporary atomic
> QE
> - *	storage for this queue. If zero, the queue will not support atomic
> - *	scheduling.
> - * - num_sequence_numbers: This specifies the number of sequence numbers
> used
> - *	by this queue. If zero, the queue will not support ordered scheduling.
> - *	If non-zero, the queue will not support unordered scheduling.
> - * - num_qid_inflights: The maximum number of QEs that can be inflight
> - *	(scheduled to a CQ but not completed) at any time. If
> - *	num_sequence_numbers is non-zero, num_qid_inflights must be set equal
> - *	to num_sequence_numbers.
> - * - padding0: Reserved for future use.
> - *
> - * Output parameters:
> - * - response: pointer to a struct dlb_cmd_response.
> - *	response.status: Detailed error code. In certain cases, such as if the
> - *		response pointer is invalid, the driver won't set status.
> - *	response.id: Queue ID.
> - */
> -struct dlb_create_ldb_queue_args {
> -	/* Output parameters */
> -	__u64 response;
> -	/* Input parameters */
> -	__u32 num_sequence_numbers;
> -	__u32 num_qid_inflights;
> -	__u32 num_atomic_inflights;
> -	__u32 padding0;
> -};
> -
> -/*
> - * DLB_DOMAIN_CMD_CREATE_DIR_QUEUE: Configure a directed queue.
> - * Input parameters:
> - * - port_id: Port ID. If the corresponding directed port is already
> created,
> - *	specify its ID here. Else this argument must be 0xFFFFFFFF to indicate
> - *	that the queue is being created before the port.
> - * - padding0: Reserved for future use.
> - *
> - * Output parameters:
> - * - response: pointer to a struct dlb_cmd_response.
> - *	response.status: Detailed error code. In certain cases, such as if the
> - *		response pointer is invalid, the driver won't set status.
> - *	response.id: Queue ID.
> - */
> -struct dlb_create_dir_queue_args {
> -	/* Output parameters */
> -	__u64 response;
> -	/* Input parameters */
> -	__s32 port_id;
> -	__u32 padding0;
> -};
> -
> -/*
> - * DLB_DOMAIN_CMD_CREATE_LDB_PORT: Configure a load-balanced port.
> - * Input parameters:
> - * - ldb_credit_pool_id: Load-balanced credit pool this port will belong
> to.
> - * - dir_credit_pool_id: Directed credit pool this port will belong to.
> - * - ldb_credit_high_watermark: Number of load-balanced credits from the
> pool
> - *	that this port will own.
> - *
> - *	If this port's scheduling domain does not have any load-balanced
> queues,
> - *	this argument is ignored and the port is given no load-balanced
> - *	credits.
> - * - dir_credit_high_watermark: Number of directed credits from the pool
> that
> - *	this port will own.
> - *
> - *	If this port's scheduling domain does not have any directed queues,
> - *	this argument is ignored and the port is given no directed credits.
> - * - ldb_credit_low_watermark: Load-balanced credit low watermark. When
> the
> - *	port's credits reach this watermark, they become eligible to be
> - *	refilled by the DLB as credits until the high watermark
> - *	(num_ldb_credits) is reached.
> - *
> - *	If this port's scheduling domain does not have any load-balanced
> queues,
> - *	this argument is ignored and the port is given no load-balanced
> - *	credits.
> - * - dir_credit_low_watermark: Directed credit low watermark. When the
> port's
> - *	credits reach this watermark, they become eligible to be refilled by
> - *	the DLB as credits until the high watermark (num_dir_credits) is
> - *	reached.
> - *
> - *	If this port's scheduling domain does not have any directed queues,
> - *	this argument is ignored and the port is given no directed credits.
> - * - ldb_credit_quantum: Number of load-balanced credits for the DLB to
> refill
> - *	per refill operation.
> - *
> - *	If this port's scheduling domain does not have any load-balanced
> queues,
> - *	this argument is ignored and the port is given no load-balanced
> - *	credits.
> - * - dir_credit_quantum: Number of directed credits for the DLB to refill
> per
> - *	refill operation.
> - *
> - *	If this port's scheduling domain does not have any directed queues,
> - *	this argument is ignored and the port is given no directed credits.
> - * - padding0: Reserved for future use.
> - * - cq_depth: Depth of the port's CQ. Must be a power-of-two between 8
> and
> - *	1024, inclusive.
> - * - cq_depth_threshold: CQ depth interrupt threshold. A value of N means
> that
> - *	the CQ interrupt won't fire until there are N or more outstanding CQ
> - *	tokens.
> - * - cq_history_list_size: Number of history list entries. This must be
> greater
> - *	than or equal to cq_depth.
> - * - padding1: Reserved for future use.
> - * - padding2: Reserved for future use.
> - *
> - * Output parameters:
> - * - response: pointer to a struct dlb_cmd_response.
> - *	response.status: Detailed error code. In certain cases, such as if the
> - *		response pointer is invalid, the driver won't set status.
> - *	response.id: port ID.
> - */
> -struct dlb_create_ldb_port_args {
> -	/* Output parameters */
> -	__u64 response;
> -	/* Input parameters */
> -	__u32 ldb_credit_pool_id;
> -	__u32 dir_credit_pool_id;
> -	__u16 ldb_credit_high_watermark;
> -	__u16 ldb_credit_low_watermark;
> -	__u16 ldb_credit_quantum;
> -	__u16 dir_credit_high_watermark;
> -	__u16 dir_credit_low_watermark;
> -	__u16 dir_credit_quantum;
> -	__u16 padding0;
> -	__u16 cq_depth;
> -	__u16 cq_depth_threshold;
> -	__u16 cq_history_list_size;
> -	__u32 padding1;
> -};
> -
> -/*
> - * DLB_DOMAIN_CMD_CREATE_DIR_PORT: Configure a directed port.
> - * Input parameters:
> - * - ldb_credit_pool_id: Load-balanced credit pool this port will belong
> to.
> - * - dir_credit_pool_id: Directed credit pool this port will belong to.
> - * - ldb_credit_high_watermark: Number of load-balanced credits from the
> pool
> - *	that this port will own.
> - *
> - *	If this port's scheduling domain does not have any load-balanced
> queues,
> - *	this argument is ignored and the port is given no load-balanced
> - *	credits.
> - * - dir_credit_high_watermark: Number of directed credits from the pool
> that
> - *	this port will own.
> - * - ldb_credit_low_watermark: Load-balanced credit low watermark. When
> the
> - *	port's credits reach this watermark, they become eligible to be
> - *	refilled by the DLB as credits until the high watermark
> - *	(num_ldb_credits) is reached.
> - *
> - *	If this port's scheduling domain does not have any load-balanced
> queues,
> - *	this argument is ignored and the port is given no load-balanced
> - *	credits.
> - * - dir_credit_low_watermark: Directed credit low watermark. When the
> port's
> - *	credits reach this watermark, they become eligible to be refilled by
> - *	the DLB as credits until the high watermark (num_dir_credits) is
> - *	reached.
> - * - ldb_credit_quantum: Number of load-balanced credits for the DLB to
> refill
> - *	per refill operation.
> - *
> - *	If this port's scheduling domain does not have any load-balanced
> queues,
> - *	this argument is ignored and the port is given no load-balanced
> - *	credits.
> - * - dir_credit_quantum: Number of directed credits for the DLB to refill
> per
> - *	refill operation.
> - * - cq_depth: Depth of the port's CQ. Must be a power-of-two between 8
> and
> - *	1024, inclusive.
> - * - cq_depth_threshold: CQ depth interrupt threshold. A value of N means
> that
> - *	the CQ interrupt won't fire until there are N or more outstanding CQ
> - *	tokens.
> - * - qid: Queue ID. If the corresponding directed queue is already
> created,
> - *	specify its ID here. Else this argument must be 0xFFFFFFFF to indicate
> - *	that the port is being created before the queue.
> - * - padding1: Reserved for future use.
> - *
> - * Output parameters:
> - * - response: pointer to a struct dlb_cmd_response.
> - *	response.status: Detailed error code. In certain cases, such as if the
> - *		response pointer is invalid, the driver won't set status.
> - *	response.id: Port ID.
> - */
> -struct dlb_create_dir_port_args {
> -	/* Output parameters */
> -	__u64 response;
> -	/* Input parameters */
> -	__u32 ldb_credit_pool_id;
> -	__u32 dir_credit_pool_id;
> -	__u16 ldb_credit_high_watermark;
> -	__u16 ldb_credit_low_watermark;
> -	__u16 ldb_credit_quantum;
> -	__u16 dir_credit_high_watermark;
> -	__u16 dir_credit_low_watermark;
> -	__u16 dir_credit_quantum;
> -	__u16 cq_depth;
> -	__u16 cq_depth_threshold;
> -	__s32 queue_id;
> -	__u32 padding1;
> -};
> -
> -/*
> - * DLB_DOMAIN_CMD_START_DOMAIN: Mark the end of the domain configuration.
> This
> - *	must be called before passing QEs into the device, and no configuration
> - *	ioctls can be issued once the domain has started. Sending QEs into the
> - *	device before calling this ioctl will result in undefined behavior.
> - * Input parameters:
> - * - (None)
> - *
> - * Output parameters:
> - * - response: pointer to a struct dlb_cmd_response.
> - *	response.status: Detailed error code. In certain cases, such as if the
> - *		response pointer is invalid, the driver won't set status.
> - */
> -struct dlb_start_domain_args {
> -	/* Output parameters */
> -	__u64 response;
> -	/* Input parameters */
> -};
> -
> -/*
> - * DLB_DOMAIN_CMD_MAP_QID: Map a load-balanced queue to a load-balanced
> port.
> - * Input parameters:
> - * - port_id: Load-balanced port ID.
> - * - qid: Load-balanced queue ID.
> - * - priority: Queue->port service priority.
> - * - padding0: Reserved for future use.
> - *
> - * Output parameters:
> - * - response: pointer to a struct dlb_cmd_response.
> - *	response.status: Detailed error code. In certain cases, such as if the
> - *		response pointer is invalid, the driver won't set status.
> - */
> -struct dlb_map_qid_args {
> -	/* Output parameters */
> -	__u64 response;
> -	/* Input parameters */
> -	__u32 port_id;
> -	__u32 qid;
> -	__u32 priority;
> -	__u32 padding0;
> -};
> -
> -/*
> - * DLB_DOMAIN_CMD_UNMAP_QID: Unmap a load-balanced queue to a
> load-balanced
> - *	port.
> - * Input parameters:
> - * - port_id: Load-balanced port ID.
> - * - qid: Load-balanced queue ID.
> - *
> - * Output parameters:
> - * - response: pointer to a struct dlb_cmd_response.
> - *	response.status: Detailed error code. In certain cases, such as if the
> - *		response pointer is invalid, the driver won't set status.
> - */
> -struct dlb_unmap_qid_args {
> -	/* Output parameters */
> -	__u64 response;
> -	/* Input parameters */
> -	__u32 port_id;
> -	__u32 qid;
> -};
> -
> -/*
> - * DLB_DOMAIN_CMD_ENABLE_LDB_PORT: Enable scheduling to a load-balanced
> port.
> - * Input parameters:
> - * - port_id: Load-balanced port ID.
> - * - padding0: Reserved for future use.
> - *
> - * Output parameters:
> - * - response: pointer to a struct dlb_cmd_response.
> - *	response.status: Detailed error code. In certain cases, such as if the
> - *		response pointer is invalid, the driver won't set status.
> - */
> -struct dlb_enable_ldb_port_args {
> -	/* Output parameters */
> -	__u64 response;
> -	/* Input parameters */
> -	__u32 port_id;
> -	__u32 padding0;
> -};
> -
> -/*
> - * DLB_DOMAIN_CMD_ENABLE_DIR_PORT: Enable scheduling to a directed port.
> - * Input parameters:
> - * - port_id: Directed port ID.
> - * - padding0: Reserved for future use.
> - *
> - * Output parameters:
> - * - response: pointer to a struct dlb_cmd_response.
> - *	response.status: Detailed error code. In certain cases, such as if the
> - *		response pointer is invalid, the driver won't set status.
> - */
> -struct dlb_enable_dir_port_args {
> -	/* Output parameters */
> -	__u64 response;
> -	/* Input parameters */
> -	__u32 port_id;
> -};
> -
> -/*
> - * DLB_DOMAIN_CMD_DISABLE_LDB_PORT: Disable scheduling to a load-balanced
> port.
> - * Input parameters:
> - * - port_id: Load-balanced port ID.
> - * - padding0: Reserved for future use.
> - *
> - * Output parameters:
> - * - response: pointer to a struct dlb_cmd_response.
> - *	response.status: Detailed error code. In certain cases, such as if the
> - *		response pointer is invalid, the driver won't set status.
> - */
> -struct dlb_disable_ldb_port_args {
> -	/* Output parameters */
> -	__u64 response;
> -	/* Input parameters */
> -	__u32 port_id;
> -	__u32 padding0;
> -};
> -
> -/*
> - * DLB_DOMAIN_CMD_DISABLE_DIR_PORT: Disable scheduling to a directed port.
> - * Input parameters:
> - * - port_id: Directed port ID.
> - * - padding0: Reserved for future use.
> - *
> - * Output parameters:
> - * - response: pointer to a struct dlb_cmd_response.
> - *	response.status: Detailed error code. In certain cases, such as if the
> - *		response pointer is invalid, the driver won't set status.
> - */
> -struct dlb_disable_dir_port_args {
> -	/* Output parameters */
> -	__u64 response;
> -	/* Input parameters */
> -	__u32 port_id;
> -	__u32 padding0;
> -};
> -
> -/*
> - * DLB_DOMAIN_CMD_GET_LDB_QUEUE_DEPTH: Get a load-balanced queue's depth.
> - * Input parameters:
> - * - queue_id: The load-balanced queue ID.
> - * - padding0: Reserved for future use.
> - *
> - * Output parameters:
> - * - response: pointer to a struct dlb_cmd_response.
> - *	response.status: Detailed error code. In certain cases, such as if the
> - *		response pointer is invalid, the driver won't set status.
> - *	response.id: queue depth.
> - */
> -struct dlb_get_ldb_queue_depth_args {
> -	/* Output parameters */
> -	__u64 response;
> -	/* Input parameters */
> -	__u32 queue_id;
> -	__u32 padding0;
> -};
> -
> -/*
> - * DLB_DOMAIN_CMD_GET_DIR_QUEUE_DEPTH: Get a directed queue's depth.
> - * Input parameters:
> - * - queue_id: The directed queue ID.
> - * - padding0: Reserved for future use.
> - *
> - * Output parameters:
> - * - response: pointer to a struct dlb_cmd_response.
> - *	response.status: Detailed error code. In certain cases, such as if the
> - *		response pointer is invalid, the driver won't set status.
> - *	response.id: queue depth.
> - */
> -struct dlb_get_dir_queue_depth_args {
> -	/* Output parameters */
> -	__u64 response;
> -	/* Input parameters */
> -	__u32 queue_id;
> -	__u32 padding0;
> -};
> -
> -/*
> - * DLB_DOMAIN_CMD_PENDING_PORT_UNMAPS: Get number of queue unmap operations
> in
> - *	progress for a load-balanced port.
> - *
> - *	Note: This is a snapshot; the number of unmap operations in progress
> - *	is subject to change at any time.
> - *
> - * Input parameters:
> - * - port_id: Load-balanced port ID.
> - *
> - * Output parameters:
> - * - response: pointer to a struct dlb_cmd_response.
> - *	response.status: Detailed error code. In certain cases, such as if the
> - *		response pointer is invalid, the driver won't set status.
> - *	response.id: number of unmaps in progress.
> - */
> -struct dlb_pending_port_unmaps_args {
> -	/* Output parameters */
> -	__u64 response;
> -	/* Input parameters */
> -	__u32 port_id;
> -	__u32 padding0;
> -};
> -
> -/*
> - * Base addresses for memory mapping the consumer queue (CQ) and popcount
> (PC)
> - * memory space, and producer port (PP) MMIO space. The CQ, PC, and PP
> - * addresses are per-port. Every address is page-separated (e.g. LDB PP 0
> is at
> - * 0x2100000 and LDB PP 1 is at 0x2101000).
> - */
> -#define DLB_LDB_CQ_BASE 0x3000000
> -#define DLB_LDB_CQ_MAX_SIZE 65536
> -#define DLB_LDB_CQ_OFFS(id) (DLB_LDB_CQ_BASE + (id) * DLB_LDB_CQ_MAX_SIZE)
> -
> -#define DLB_DIR_CQ_BASE 0x3800000
> -#define DLB_DIR_CQ_MAX_SIZE 65536
> -#define DLB_DIR_CQ_OFFS(id) (DLB_DIR_CQ_BASE + (id) * DLB_DIR_CQ_MAX_SIZE)
> -
> -#define DLB_LDB_PC_BASE 0x2300000
> -#define DLB_LDB_PC_MAX_SIZE 4096
> -#define DLB_LDB_PC_OFFS(id) (DLB_LDB_PC_BASE + (id) * DLB_LDB_PC_MAX_SIZE)
> -
> -#define DLB_DIR_PC_BASE 0x2200000
> -#define DLB_DIR_PC_MAX_SIZE 4096
> -#define DLB_DIR_PC_OFFS(id) (DLB_DIR_PC_BASE + (id) * DLB_DIR_PC_MAX_SIZE)
> -
> -#define DLB_LDB_PP_BASE 0x2100000
> -#define DLB_LDB_PP_MAX_SIZE 4096
> -#define DLB_LDB_PP_OFFS(id) (DLB_LDB_PP_BASE + (id) * DLB_LDB_PP_MAX_SIZE)
> -
> -#define DLB_DIR_PP_BASE 0x2000000
> -#define DLB_DIR_PP_MAX_SIZE 4096
> -#define DLB_DIR_PP_OFFS(id) (DLB_DIR_PP_BASE + (id) * DLB_DIR_PP_MAX_SIZE)
> -
> -#endif /* __DLB_USER_H */
> diff --git a/drivers/event/dlb/dlb_xstats.c
> b/drivers/event/dlb/dlb_xstats.c
> deleted file mode 100644
> index 5f4c59030..000000000
> --- a/drivers/event/dlb/dlb_xstats.c
> +++ /dev/null
> @@ -1,1212 +0,0 @@
> -/* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2016-2020 Intel Corporation
> - */
> -
> -#include <stdint.h>
> -#include <inttypes.h>
> -
> -#include "dlb_priv.h"
> -#include "dlb_inline_fns.h"
> -
> -enum dlb_xstats_type {
> -	/* common to device and port */
> -	rx_ok,				/**< Receive an event */
> -	rx_drop,                        /**< Error bit set in received QE */
> -	rx_interrupt_wait,		/**< Wait on an interrupt */
> -	rx_umonitor_umwait,		/**< Block using umwait */
> -	tx_ok,				/**< Transmit an event */
> -	total_polls,			/**< Call dequeue_burst */
> -	zero_polls,			/**< Call dequeue burst and return 0 */
> -	tx_nospc_ldb_hw_credits,	/**< Insufficient LDB h/w credits */
> -	tx_nospc_dir_hw_credits,	/**< Insufficient DIR h/w credits */
> -	tx_nospc_inflight_max,		/**< Reach the new_event_threshold */
> -	tx_nospc_new_event_limit,	/**< Insufficient s/w credits */
> -	tx_nospc_inflight_credits,	/**< Port has too few s/w credits */
> -	/* device specific */
> -	nb_events_limit,		/**< Maximum num of events */
> -	inflight_events,		/**< Current num events outstanding */
> -	ldb_pool_size,			/**< Num load balanced credits */
> -	dir_pool_size,			/**< Num directed credits */
> -	/* port specific */
> -	tx_new,				/**< Send an OP_NEW event */
> -	tx_fwd,				/**< Send an OP_FORWARD event */
> -	tx_rel,				/**< Send an OP_RELEASE event */
> -	tx_implicit_rel,		/**< Issue an implicit event release */
> -	tx_sched_ordered,		/**< Send a SCHED_TYPE_ORDERED event */
> -	tx_sched_unordered,		/**< Send a SCHED_TYPE_PARALLEL event */
> -	tx_sched_atomic,		/**< Send a SCHED_TYPE_ATOMIC event */
> -	tx_sched_directed,		/**< Send a directed event */
> -	tx_invalid,                     /**< Send an event with an invalid op */
> -	outstanding_releases,		/**< # of releases a port owes */
> -	max_outstanding_releases,	/**< max # of releases a port can owe */
> -	rx_sched_ordered,		/**< Dequeue an ordered event */
> -	rx_sched_unordered,		/**< Dequeue an unordered event */
> -	rx_sched_atomic,		/**< Dequeue an atomic event */
> -	rx_sched_directed,		/**< Dequeue an directed event */
> -	rx_sched_invalid,               /**< Dequeue event sched type invalid */
> -	/* common to port and queue */
> -	is_configured,			/**< Port is configured */
> -	is_load_balanced,		/**< Port is LDB */
> -	hw_id,				/**< Hardware ID */
> -	/* queue specific */
> -	num_links,			/**< Number of ports linked */
> -	sched_type,			/**< Queue sched type */
> -	enq_ok,				/**< # events enqueued to the queue */
> -	current_depth			/**< Current queue depth */
> -};
> -
> -typedef uint64_t (*dlb_xstats_fn)(struct dlb_eventdev *dlb,
> -		uint16_t obj_idx, /* port or queue id */
> -		enum dlb_xstats_type stat, int extra_arg);
> -
> -enum dlb_xstats_fn_type {
> -	DLB_XSTATS_FN_DEV,
> -	DLB_XSTATS_FN_PORT,
> -	DLB_XSTATS_FN_QUEUE
> -};
> -
> -struct dlb_xstats_entry {
> -	struct rte_event_dev_xstats_name name;
> -	uint64_t reset_value; /* an offset to be taken away to emulate resets */
> -	enum dlb_xstats_fn_type fn_id;
> -	enum dlb_xstats_type stat;
> -	enum rte_event_dev_xstats_mode mode;
> -	int extra_arg;
> -	uint16_t obj_idx;
> -	uint8_t reset_allowed; /* when set, this value can be reset */
> -};
> -
> -/* Some device stats are simply a summation of the corresponding port
> values */
> -static uint64_t
> -dlb_device_traffic_stat_get(struct dlb_eventdev *dlb, int which_stat)
> -{
> -	int i;
> -	uint64_t val = 0;
> -
> -	for (i = 0; i < DLB_MAX_NUM_PORTS; i++) {
> -		struct dlb_eventdev_port *port = &dlb->ev_ports[i];
> -
> -		if (!port->setup_done)
> -			continue;
> -
> -		switch (which_stat) {
> -		case rx_ok:
> -			val += port->stats.traffic.rx_ok;
> -			break;
> -		case rx_drop:
> -			val += port->stats.traffic.rx_drop;
> -			break;
> -		case rx_interrupt_wait:
> -			val += port->stats.traffic.rx_interrupt_wait;
> -			break;
> -		case rx_umonitor_umwait:
> -			val += port->stats.traffic.rx_umonitor_umwait;
> -			break;
> -		case tx_ok:
> -			val += port->stats.traffic.tx_ok;
> -			break;
> -		case total_polls:
> -			val += port->stats.traffic.total_polls;
> -			break;
> -		case zero_polls:
> -			val += port->stats.traffic.zero_polls;
> -			break;
> -		case tx_nospc_ldb_hw_credits:
> -			val += port->stats.traffic.tx_nospc_ldb_hw_credits;
> -			break;
> -		case tx_nospc_dir_hw_credits:
> -			val += port->stats.traffic.tx_nospc_dir_hw_credits;
> -			break;
> -		case tx_nospc_inflight_max:
> -			val += port->stats.traffic.tx_nospc_inflight_max;
> -			break;
> -		case tx_nospc_new_event_limit:
> -			val += port->stats.traffic.tx_nospc_new_event_limit;
> -			break;
> -		case tx_nospc_inflight_credits:
> -			val += port->stats.traffic.tx_nospc_inflight_credits;
> -			break;
> -		default:
> -			return -1;
> -		}
> -	}
> -	return val;
> -}
> -
> -static uint64_t
> -get_dev_stat(struct dlb_eventdev *dlb, uint16_t obj_idx __rte_unused,
> -	     enum dlb_xstats_type type, int extra_arg __rte_unused)
> -{
> -	switch (type) {
> -	case rx_ok:
> -	case rx_drop:
> -	case rx_interrupt_wait:
> -	case rx_umonitor_umwait:
> -	case tx_ok:
> -	case total_polls:
> -	case zero_polls:
> -	case tx_nospc_ldb_hw_credits:
> -	case tx_nospc_dir_hw_credits:
> -	case tx_nospc_inflight_max:
> -	case tx_nospc_new_event_limit:
> -	case tx_nospc_inflight_credits:
> -		return dlb_device_traffic_stat_get(dlb, type);
> -	case nb_events_limit:
> -		return dlb->new_event_limit;
> -	case inflight_events:
> -		return __atomic_load_n(&dlb->inflights, __ATOMIC_SEQ_CST);
> -	case ldb_pool_size:
> -		return dlb->num_ldb_credits;
> -	case dir_pool_size:
> -		return dlb->num_dir_credits;
> -	default: return -1;
> -	}
> -}
> -
> -static uint64_t
> -get_port_stat(struct dlb_eventdev *dlb, uint16_t obj_idx,
> -	      enum dlb_xstats_type type, int extra_arg __rte_unused)
> -{
> -	struct dlb_eventdev_port *ev_port = &dlb->ev_ports[obj_idx];
> -
> -	switch (type) {
> -	case rx_ok: return ev_port->stats.traffic.rx_ok;
> -
> -	case rx_drop: return ev_port->stats.traffic.rx_drop;
> -
> -	case rx_interrupt_wait: return ev_port->stats.traffic.rx_interrupt_wait;
> -
> -	case rx_umonitor_umwait:
> -		return ev_port->stats.traffic.rx_umonitor_umwait;
> -
> -	case tx_ok: return ev_port->stats.traffic.tx_ok;
> -
> -	case total_polls: return ev_port->stats.traffic.total_polls;
> -
> -	case zero_polls: return ev_port->stats.traffic.zero_polls;
> -
> -	case tx_nospc_ldb_hw_credits:
> -		return ev_port->stats.traffic.tx_nospc_ldb_hw_credits;
> -
> -	case tx_nospc_dir_hw_credits:
> -		return ev_port->stats.traffic.tx_nospc_dir_hw_credits;
> -
> -	case tx_nospc_inflight_max:
> -		return ev_port->stats.traffic.tx_nospc_inflight_max;
> -
> -	case tx_nospc_new_event_limit:
> -		return ev_port->stats.traffic.tx_nospc_new_event_limit;
> -
> -	case tx_nospc_inflight_credits:
> -		return ev_port->stats.traffic.tx_nospc_inflight_credits;
> -
> -	case is_configured: return ev_port->setup_done;
> -
> -	case is_load_balanced: return !ev_port->qm_port.is_directed;
> -
> -	case hw_id: return ev_port->qm_port.id;
> -
> -	case tx_new: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_NEW];
> -
> -	case tx_fwd: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_FORWARD];
> -
> -	case tx_rel: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_RELEASE];
> -
> -	case tx_implicit_rel: return ev_port->stats.tx_implicit_rel;
> -
> -	case tx_sched_ordered:
> -		return ev_port->stats.tx_sched_cnt[DLB_SCHED_ORDERED];
> -
> -	case tx_sched_unordered:
> -		return ev_port->stats.tx_sched_cnt[DLB_SCHED_UNORDERED];
> -
> -	case tx_sched_atomic:
> -		return ev_port->stats.tx_sched_cnt[DLB_SCHED_ATOMIC];
> -
> -	case tx_sched_directed:
> -		return ev_port->stats.tx_sched_cnt[DLB_SCHED_DIRECTED];
> -
> -	case tx_invalid: return ev_port->stats.tx_invalid;
> -
> -	case outstanding_releases: return ev_port->outstanding_releases;
> -
> -	case max_outstanding_releases:
> -		return DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
> -
> -	case rx_sched_ordered:
> -		return ev_port->stats.rx_sched_cnt[DLB_SCHED_ORDERED];
> -
> -	case rx_sched_unordered:
> -		return ev_port->stats.rx_sched_cnt[DLB_SCHED_UNORDERED];
> -
> -	case rx_sched_atomic:
> -		return ev_port->stats.rx_sched_cnt[DLB_SCHED_ATOMIC];
> -
> -	case rx_sched_directed:
> -		return ev_port->stats.rx_sched_cnt[DLB_SCHED_DIRECTED];
> -
> -	case rx_sched_invalid: return ev_port->stats.rx_sched_invalid;
> -
> -	default: return -1;
> -	}
> -}
> -
> -static uint64_t
> -get_queue_stat(struct dlb_eventdev *dlb, uint16_t obj_idx,
> -	       enum dlb_xstats_type type, int extra_arg __rte_unused)
> -{
> -	struct dlb_eventdev_queue *ev_queue = &dlb->ev_queues[obj_idx];
> -
> -	switch (type) {
> -	case is_configured: return ev_queue->setup_done;
> -
> -	case is_load_balanced: return !ev_queue->qm_queue.is_directed;
> -
> -	case hw_id: return ev_queue->qm_queue.id;
> -
> -	case num_links: return ev_queue->num_links;
> -
> -	case sched_type: return ev_queue->qm_queue.sched_type;
> -
> -	case enq_ok:
> -	{
> -		int port_count = 0;
> -		uint64_t enq_ok_tally = 0;
> -
> -		ev_queue->enq_ok = 0;
> -		for (port_count = 0; port_count < DLB_MAX_NUM_PORTS;
> -		     port_count++) {
> -			struct dlb_eventdev_port *ev_port =
> -				&dlb->ev_ports[port_count];
> -			enq_ok_tally += ev_port->stats.enq_ok[ev_queue->id];
> -		}
> -		ev_queue->enq_ok = enq_ok_tally;
> -		return ev_queue->enq_ok;
> -	}
> -
> -	case current_depth: return dlb_get_queue_depth(dlb, ev_queue);
> -
> -	default: return -1;
> -	}
> -}
> -
> -int
> -dlb_xstats_init(struct dlb_eventdev *dlb)
> -{
> -	/*
> -	 * define the stats names and types. Used to build up the device
> -	 * xstats array
> -	 * There are multiple set of stats:
> -	 *   - device-level,
> -	 *   - per-port,
> -	 *   - per-qid,
> -	 *
> -	 * For each of these sets, we have three parallel arrays, one for the
> -	 * names, the other for the stat type parameter to be passed in the fn
> -	 * call to get that stat. The third array allows resetting or not.
> -	 * All these arrays must be kept in sync
> -	 */
> -	static const char * const dev_stats[] = {
> -		"rx_ok",
> -		"rx_drop",
> -		"rx_interrupt_wait",
> -		"rx_umonitor_umwait",
> -		"tx_ok",
> -		"total_polls",
> -		"zero_polls",
> -		"tx_nospc_ldb_hw_credits",
> -		"tx_nospc_dir_hw_credits",
> -		"tx_nospc_inflight_max",
> -		"tx_nospc_new_event_limit",
> -		"tx_nospc_inflight_credits",
> -		"nb_events_limit",
> -		"inflight_events",
> -		"ldb_pool_size",
> -		"dir_pool_size",
> -	};
> -	static const enum dlb_xstats_type dev_types[] = {
> -		rx_ok,
> -		rx_drop,
> -		rx_interrupt_wait,
> -		rx_umonitor_umwait,
> -		tx_ok,
> -		total_polls,
> -		zero_polls,
> -		tx_nospc_ldb_hw_credits,
> -		tx_nospc_dir_hw_credits,
> -		tx_nospc_inflight_max,
> -		tx_nospc_new_event_limit,
> -		tx_nospc_inflight_credits,
> -		nb_events_limit,
> -		inflight_events,
> -		ldb_pool_size,
> -		dir_pool_size,
> -	};
> -	/* Note: generated device stats are not allowed to be reset. */
> -	static const uint8_t dev_reset_allowed[] = {
> -		0, /* rx_ok */
> -		0, /* rx_drop */
> -		0, /* rx_interrupt_wait */
> -		0, /* rx_umonitor_umwait */
> -		0, /* tx_ok */
> -		0, /* total_polls */
> -		0, /* zero_polls */
> -		0, /* tx_nospc_ldb_hw_credits */
> -		0, /* tx_nospc_dir_hw_credits */
> -		0, /* tx_nospc_inflight_max */
> -		0, /* tx_nospc_new_event_limit */
> -		0, /* tx_nospc_inflight_credits */
> -		0, /* nb_events_limit */
> -		0, /* inflight_events */
> -		0, /* ldb_pool_size */
> -		0, /* dir_pool_size */
> -	};
> -	static const char * const port_stats[] = {
> -		"is_configured",
> -		"is_load_balanced",
> -		"hw_id",
> -		"rx_ok",
> -		"rx_drop",
> -		"rx_interrupt_wait",
> -		"rx_umonitor_umwait",
> -		"tx_ok",
> -		"total_polls",
> -		"zero_polls",
> -		"tx_nospc_ldb_hw_credits",
> -		"tx_nospc_dir_hw_credits",
> -		"tx_nospc_inflight_max",
> -		"tx_nospc_new_event_limit",
> -		"tx_nospc_inflight_credits",
> -		"tx_new",
> -		"tx_fwd",
> -		"tx_rel",
> -		"tx_implicit_rel",
> -		"tx_sched_ordered",
> -		"tx_sched_unordered",
> -		"tx_sched_atomic",
> -		"tx_sched_directed",
> -		"tx_invalid",
> -		"outstanding_releases",
> -		"max_outstanding_releases",
> -		"rx_sched_ordered",
> -		"rx_sched_unordered",
> -		"rx_sched_atomic",
> -		"rx_sched_directed",
> -		"rx_sched_invalid"
> -	};
> -	static const enum dlb_xstats_type port_types[] = {
> -		is_configured,
> -		is_load_balanced,
> -		hw_id,
> -		rx_ok,
> -		rx_drop,
> -		rx_interrupt_wait,
> -		rx_umonitor_umwait,
> -		tx_ok,
> -		total_polls,
> -		zero_polls,
> -		tx_nospc_ldb_hw_credits,
> -		tx_nospc_dir_hw_credits,
> -		tx_nospc_inflight_max,
> -		tx_nospc_new_event_limit,
> -		tx_nospc_inflight_credits,
> -		tx_new,
> -		tx_fwd,
> -		tx_rel,
> -		tx_implicit_rel,
> -		tx_sched_ordered,
> -		tx_sched_unordered,
> -		tx_sched_atomic,
> -		tx_sched_directed,
> -		tx_invalid,
> -		outstanding_releases,
> -		max_outstanding_releases,
> -		rx_sched_ordered,
> -		rx_sched_unordered,
> -		rx_sched_atomic,
> -		rx_sched_directed,
> -		rx_sched_invalid
> -	};
> -	static const uint8_t port_reset_allowed[] = {
> -		0, /* is_configured */
> -		0, /* is_load_balanced */
> -		0, /* hw_id */
> -		1, /* rx_ok */
> -		1, /* rx_drop */
> -		1, /* rx_interrupt_wait */
> -		1, /* rx_umonitor_umwait */
> -		1, /* tx_ok */
> -		1, /* total_polls */
> -		1, /* zero_polls */
> -		1, /* tx_nospc_ldb_hw_credits */
> -		1, /* tx_nospc_dir_hw_credits */
> -		1, /* tx_nospc_inflight_max */
> -		1, /* tx_nospc_new_event_limit */
> -		1, /* tx_nospc_inflight_credits */
> -		1, /* tx_new */
> -		1, /* tx_fwd */
> -		1, /* tx_rel */
> -		1, /* tx_implicit_rel */
> -		1, /* tx_sched_ordered */
> -		1, /* tx_sched_unordered */
> -		1, /* tx_sched_atomic */
> -		1, /* tx_sched_directed */
> -		1, /* tx_invalid */
> -		0, /* outstanding_releases */
> -		0, /* max_outstanding_releases */
> -		1, /* rx_sched_ordered */
> -		1, /* rx_sched_unordered */
> -		1, /* rx_sched_atomic */
> -		1, /* rx_sched_directed */
> -		1  /* rx_sched_invalid */
> -	};
> -
> -	/* QID specific stats */
> -	static const char * const qid_stats[] = {
> -		"is_configured",
> -		"is_load_balanced",
> -		"hw_id",
> -		"num_links",
> -		"sched_type",
> -		"enq_ok",
> -		"current_depth",
> -	};
> -	static const enum dlb_xstats_type qid_types[] = {
> -		is_configured,
> -		is_load_balanced,
> -		hw_id,
> -		num_links,
> -		sched_type,
> -		enq_ok,
> -		current_depth,
> -	};
> -	static const uint8_t qid_reset_allowed[] = {
> -		0, /* is_configured */
> -		0, /* is_load_balanced */
> -		0, /* hw_id */
> -		0, /* num_links */
> -		0, /* sched_type */
> -		1, /* enq_ok */
> -		0, /* current_depth */
> -	};
> -
> -	/* ---- end of stat definitions ---- */
> -
> -	/* check sizes, since a missed comma can lead to strings being
> -	 * joined by the compiler.
> -	 */
> -	RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_types));
> -	RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_types));
> -	RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_types));
> -
> -	RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_reset_allowed));
> -	RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_reset_allowed));
> -	RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_reset_allowed));
> -
> -	/* other vars */
> -	const unsigned int count = RTE_DIM(dev_stats) +
> -			DLB_MAX_NUM_PORTS * RTE_DIM(port_stats) +
> -			DLB_MAX_NUM_QUEUES * RTE_DIM(qid_stats);
> -	unsigned int i, port, qid, stat_id = 0;
> -
> -	dlb->xstats = rte_zmalloc_socket(NULL,
> -					 sizeof(dlb->xstats[0]) * count, 0,
> -					 dlb->qm_instance.info.socket_id);
> -	if (dlb->xstats == NULL)
> -		return -ENOMEM;
> -
> -#define sname dlb->xstats[stat_id].name.name
> -	for (i = 0; i < RTE_DIM(dev_stats); i++, stat_id++) {
> -		dlb->xstats[stat_id] = (struct dlb_xstats_entry) {
> -			.fn_id = DLB_XSTATS_FN_DEV,
> -			.stat = dev_types[i],
> -			.mode = RTE_EVENT_DEV_XSTATS_DEVICE,
> -			.reset_allowed = dev_reset_allowed[i],
> -		};
> -		snprintf(sname, sizeof(sname), "dev_%s", dev_stats[i]);
> -	}
> -	dlb->xstats_count_mode_dev = stat_id;
> -
> -	for (port = 0; port < DLB_MAX_NUM_PORTS; port++) {
> -		uint32_t count_offset = stat_id;
> -
> -		dlb->xstats_offset_for_port[port] = stat_id;
> -
> -		for (i = 0; i < RTE_DIM(port_stats); i++, stat_id++) {
> -			dlb->xstats[stat_id] = (struct dlb_xstats_entry){
> -				.fn_id = DLB_XSTATS_FN_PORT,
> -				.obj_idx = port,
> -				.stat = port_types[i],
> -				.mode = RTE_EVENT_DEV_XSTATS_PORT,
> -				.reset_allowed = port_reset_allowed[i],
> -			};
> -			snprintf(sname, sizeof(sname), "port_%u_%s",
> -				 port, port_stats[i]);
> -		}
> -
> -		dlb->xstats_count_per_port[port] = stat_id - count_offset;
> -	}
> -
> -	dlb->xstats_count_mode_port = stat_id - dlb->xstats_count_mode_dev;
> -
> -	for (qid = 0; qid < DLB_MAX_NUM_QUEUES; qid++) {
> -		uint32_t count_offset = stat_id;
> -
> -		dlb->xstats_offset_for_qid[qid] = stat_id;
> -
> -		for (i = 0; i < RTE_DIM(qid_stats); i++, stat_id++) {
> -			dlb->xstats[stat_id] = (struct dlb_xstats_entry){
> -				.fn_id = DLB_XSTATS_FN_QUEUE,
> -				.obj_idx = qid,
> -				.stat = qid_types[i],
> -				.mode = RTE_EVENT_DEV_XSTATS_QUEUE,
> -				.reset_allowed = qid_reset_allowed[i],
> -			};
> -			snprintf(sname, sizeof(sname), "qid_%u_%s",
> -				 qid, qid_stats[i]);
> -		}
> -
> -		dlb->xstats_count_per_qid[qid] = stat_id - count_offset;
> -	}
> -
> -	dlb->xstats_count_mode_queue = stat_id -
> -		(dlb->xstats_count_mode_dev + dlb->xstats_count_mode_port);
> -#undef sname
> -
> -	dlb->xstats_count = stat_id;
> -
> -	return 0;
> -}
> -
> -void
> -dlb_xstats_uninit(struct dlb_eventdev *dlb)
> -{
> -	rte_free(dlb->xstats);
> -	dlb->xstats_count = 0;
> -}
> -
> -int
> -dlb_eventdev_xstats_get_names(const struct rte_eventdev *dev,
> -		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
> -		struct rte_event_dev_xstats_name *xstats_names,
> -		unsigned int *ids, unsigned int size)
> -{
> -	const struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
> -	unsigned int i;
> -	unsigned int xidx = 0;
> -	uint32_t xstats_mode_count = 0;
> -	uint32_t start_offset = 0;
> -
> -	switch (mode) {
> -	case RTE_EVENT_DEV_XSTATS_DEVICE:
> -		xstats_mode_count = dlb->xstats_count_mode_dev;
> -		break;
> -	case RTE_EVENT_DEV_XSTATS_PORT:
> -		if (queue_port_id >= DLB_MAX_NUM_PORTS)
> -			break;
> -		xstats_mode_count = dlb->xstats_count_per_port[queue_port_id];
> -		start_offset = dlb->xstats_offset_for_port[queue_port_id];
> -		break;
> -	case RTE_EVENT_DEV_XSTATS_QUEUE:
> -#if (DLB_MAX_NUM_QUEUES <= 255) /* max 8 bit value */
> -		if (queue_port_id >= DLB_MAX_NUM_QUEUES)
> -			break;
> -#endif
> -		xstats_mode_count = dlb->xstats_count_per_qid[queue_port_id];
> -		start_offset = dlb->xstats_offset_for_qid[queue_port_id];
> -		break;
> -	default:
> -		return -EINVAL;
> -	};
> -
> -	if (xstats_mode_count > size || ids == NULL || xstats_names == NULL)
> -		return xstats_mode_count;
> -
> -	for (i = 0; i < dlb->xstats_count && xidx < size; i++) {
> -		if (dlb->xstats[i].mode != mode)
> -			continue;
> -
> -		if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
> -		    queue_port_id != dlb->xstats[i].obj_idx)
> -			continue;
> -
> -		xstats_names[xidx] = dlb->xstats[i].name;
> -		if (ids)
> -			ids[xidx] = start_offset + xidx;
> -		xidx++;
> -	}
> -	return xidx;
> -}
> -
> -static int
> -dlb_xstats_update(struct dlb_eventdev *dlb,
> -		enum rte_event_dev_xstats_mode mode,
> -		uint8_t queue_port_id, const unsigned int ids[],
> -		uint64_t values[], unsigned int n, const uint32_t reset)
> -{
> -	unsigned int i;
> -	unsigned int xidx = 0;
> -	uint32_t xstats_mode_count = 0;
> -
> -	switch (mode) {
> -	case RTE_EVENT_DEV_XSTATS_DEVICE:
> -		xstats_mode_count = dlb->xstats_count_mode_dev;
> -		break;
> -	case RTE_EVENT_DEV_XSTATS_PORT:
> -		if (queue_port_id >= DLB_MAX_NUM_PORTS)
> -			goto invalid_value;
> -		xstats_mode_count = dlb->xstats_count_per_port[queue_port_id];
> -		break;
> -	case RTE_EVENT_DEV_XSTATS_QUEUE:
> -#if (DLB_MAX_NUM_QUEUES <= 255) /* max 8 bit value */
> -		if (queue_port_id >= DLB_MAX_NUM_QUEUES)
> -			goto invalid_value;
> -#endif
> -		xstats_mode_count = dlb->xstats_count_per_qid[queue_port_id];
> -		break;
> -	default:
> -		goto invalid_value;
> -	};
> -
> -	for (i = 0; i < n && xidx < xstats_mode_count; i++) {
> -		struct dlb_xstats_entry *xs = &dlb->xstats[ids[i]];
> -		dlb_xstats_fn fn;
> -
> -		if (ids[i] > dlb->xstats_count || xs->mode != mode)
> -			continue;
> -
> -		if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
> -		    queue_port_id != xs->obj_idx)
> -			continue;
> -
> -		switch (xs->fn_id) {
> -		case DLB_XSTATS_FN_DEV:
> -			fn = get_dev_stat;
> -			break;
> -		case DLB_XSTATS_FN_PORT:
> -			fn = get_port_stat;
> -			break;
> -		case DLB_XSTATS_FN_QUEUE:
> -			fn = get_queue_stat;
> -			break;
> -		default:
> -			DLB_LOG_ERR("Unexpected xstat fn_id %d\n",
> -				     xs->fn_id);
> -			return -EINVAL;
> -		}
> -
> -		uint64_t val = fn(dlb, xs->obj_idx, xs->stat,
> -				  xs->extra_arg) - xs->reset_value;
> -
> -		if (values)
> -			values[xidx] = val;
> -
> -		if (xs->reset_allowed && reset)
> -			xs->reset_value += val;
> -
> -		xidx++;
> -	}
> -
> -	return xidx;
> -
> -invalid_value:
> -	return -EINVAL;
> -}
> -
> -int
> -dlb_eventdev_xstats_get(const struct rte_eventdev *dev,
> -		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
> -		const unsigned int ids[], uint64_t values[], unsigned int n)
> -{
> -	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
> -	const uint32_t reset = 0;
> -
> -	return dlb_xstats_update(dlb, mode, queue_port_id, ids, values, n,
> -				  reset);
> -}
> -
> -uint64_t
> -dlb_eventdev_xstats_get_by_name(const struct rte_eventdev *dev,
> -				const char *name, unsigned int *id)
> -{
> -	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
> -	unsigned int i;
> -	dlb_xstats_fn fn;
> -
> -	for (i = 0; i < dlb->xstats_count; i++) {
> -		struct dlb_xstats_entry *xs = &dlb->xstats[i];
> -
> -		if (strncmp(xs->name.name, name,
> -			    RTE_EVENT_DEV_XSTATS_NAME_SIZE) == 0){
> -			if (id != NULL)
> -				*id = i;
> -
> -			switch (xs->fn_id) {
> -			case DLB_XSTATS_FN_DEV:
> -				fn = get_dev_stat;
> -				break;
> -			case DLB_XSTATS_FN_PORT:
> -				fn = get_port_stat;
> -				break;
> -			case DLB_XSTATS_FN_QUEUE:
> -				fn = get_queue_stat;
> -				break;
> -			default:
> -				DLB_LOG_ERR("Unexpected xstat fn_id %d\n",
> -					    xs->fn_id);
> -				return (uint64_t)-1;
> -			}
> -
> -			return fn(dlb, xs->obj_idx, xs->stat,
> -				  xs->extra_arg) - xs->reset_value;
> -		}
> -	}
> -	if (id != NULL)
> -		*id = (uint32_t)-1;
> -	return (uint64_t)-1;
> -}
> -
> -static void
> -dlb_xstats_reset_range(struct dlb_eventdev *dlb, uint32_t start,
> -		       uint32_t num)
> -{
> -	uint32_t i;
> -	dlb_xstats_fn fn;
> -
> -	for (i = start; i < start + num; i++) {
> -		struct dlb_xstats_entry *xs = &dlb->xstats[i];
> -
> -		if (!xs->reset_allowed)
> -			continue;
> -
> -		switch (xs->fn_id) {
> -		case DLB_XSTATS_FN_DEV:
> -			fn = get_dev_stat;
> -			break;
> -		case DLB_XSTATS_FN_PORT:
> -			fn = get_port_stat;
> -			break;
> -		case DLB_XSTATS_FN_QUEUE:
> -			fn = get_queue_stat;
> -			break;
> -		default:
> -			DLB_LOG_ERR("Unexpected xstat fn_id %d\n", xs->fn_id);
> -			return;
> -		}
> -
> -		uint64_t val = fn(dlb, xs->obj_idx, xs->stat, xs->extra_arg);
> -		xs->reset_value = val;
> -	}
> -}
> -
> -static int
> -dlb_xstats_reset_queue(struct dlb_eventdev *dlb, uint8_t queue_id,
> -		       const uint32_t ids[], uint32_t nb_ids)
> -{
> -	const uint32_t reset = 1;
> -
> -	if (ids) {
> -		uint32_t nb_reset = dlb_xstats_update(dlb,
> -					RTE_EVENT_DEV_XSTATS_QUEUE,
> -					queue_id, ids, NULL, nb_ids,
> -					reset);
> -		return nb_reset == nb_ids ? 0 : -EINVAL;
> -	}
> -
> -	if (ids == NULL)
> -		dlb_xstats_reset_range(dlb,
> -				       dlb->xstats_offset_for_qid[queue_id],
> -				       dlb->xstats_count_per_qid[queue_id]);
> -
> -	return 0;
> -}
> -
> -static int
> -dlb_xstats_reset_port(struct dlb_eventdev *dlb, uint8_t port_id,
> -		      const uint32_t ids[], uint32_t nb_ids)
> -{
> -	const uint32_t reset = 1;
> -	int offset = dlb->xstats_offset_for_port[port_id];
> -	int nb_stat = dlb->xstats_count_per_port[port_id];
> -
> -	if (ids) {
> -		uint32_t nb_reset = dlb_xstats_update(dlb,
> -					RTE_EVENT_DEV_XSTATS_PORT, port_id,
> -					ids, NULL, nb_ids,
> -					reset);
> -		return nb_reset == nb_ids ? 0 : -EINVAL;
> -	}
> -
> -	dlb_xstats_reset_range(dlb, offset, nb_stat);
> -	return 0;
> -}
> -
> -static int
> -dlb_xstats_reset_dev(struct dlb_eventdev *dlb, const uint32_t ids[],
> -		     uint32_t nb_ids)
> -{
> -	uint32_t i;
> -
> -	if (ids) {
> -		for (i = 0; i < nb_ids; i++) {
> -			uint32_t id = ids[i];
> -
> -			if (id >= dlb->xstats_count_mode_dev)
> -				return -EINVAL;
> -			dlb_xstats_reset_range(dlb, id, 1);
> -		}
> -	} else {
> -		for (i = 0; i < dlb->xstats_count_mode_dev; i++)
> -			dlb_xstats_reset_range(dlb, i, 1);
> -	}
> -
> -	return 0;
> -}
> -
> -int
> -dlb_eventdev_xstats_reset(struct rte_eventdev *dev,
> -			  enum rte_event_dev_xstats_mode mode,
> -			  int16_t queue_port_id,
> -			  const uint32_t ids[],
> -			  uint32_t nb_ids)
> -{
> -	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
> -	uint32_t i;
> -
> -	/* handle -1 for queue_port_id here, looping over all ports/queues */
> -	switch (mode) {
> -	case RTE_EVENT_DEV_XSTATS_DEVICE:
> -		if (dlb_xstats_reset_dev(dlb, ids, nb_ids))
> -			return -EINVAL;
> -		break;
> -	case RTE_EVENT_DEV_XSTATS_PORT:
> -		if (queue_port_id == -1) {
> -			for (i = 0; i < DLB_MAX_NUM_PORTS; i++) {
> -				if (dlb_xstats_reset_port(dlb, i, ids,
> -							  nb_ids))
> -					return -EINVAL;
> -			}
> -		} else if (queue_port_id < DLB_MAX_NUM_PORTS) {
> -			if (dlb_xstats_reset_port(dlb, queue_port_id, ids,
> -						  nb_ids))
> -				return -EINVAL;
> -		} else {
> -			return -EINVAL;
> -		}
> -		break;
> -	case RTE_EVENT_DEV_XSTATS_QUEUE:
> -		if (queue_port_id == -1) {
> -			for (i = 0; i < DLB_MAX_NUM_QUEUES; i++) {
> -				if (dlb_xstats_reset_queue(dlb, i, ids,
> -							   nb_ids))
> -					return -EINVAL;
> -			}
> -		} else if (queue_port_id < DLB_MAX_NUM_QUEUES) {
> -			if (dlb_xstats_reset_queue(dlb, queue_port_id, ids,
> -						   nb_ids))
> -				return -EINVAL;
> -		} else {
> -			return -EINVAL;
> -		}
> -		break;
> -	};
> -
> -	return 0;
> -}
> -
> -void
> -dlb_eventdev_dump(struct rte_eventdev *dev, FILE *f)
> -{
> -	struct dlb_eventdev *dlb;
> -	struct dlb_hw_dev *handle;
> -	int i;
> -
> -	dlb = dlb_pmd_priv(dev);
> -
> -	if (dlb == NULL) {
> -		fprintf(f, "DLB Event device cannot be dumped!\n");
> -		return;
> -	}
> -
> -	if (!dlb->configured)
> -		fprintf(f, "DLB Event device is not configured\n");
> -
> -	handle = &dlb->qm_instance;
> -
> -	fprintf(f, "================\n");
> -	fprintf(f, "DLB Device Dump\n");
> -	fprintf(f, "================\n");
> -
> -	fprintf(f, "Processor supports umonitor/umwait instructions = %s\n",
> -		dlb->umwait_allowed ? "yes" : "no");
> -
> -	/* Generic top level device information */
> -
> -	fprintf(f, "device is configured and run state =");
> -	if (dlb->run_state == DLB_RUN_STATE_STOPPED)
> -		fprintf(f, "STOPPED\n");
> -	else if (dlb->run_state == DLB_RUN_STATE_STOPPING)
> -		fprintf(f, "STOPPING\n");
> -	else if (dlb->run_state == DLB_RUN_STATE_STARTING)
> -		fprintf(f, "STARTING\n");
> -	else if (dlb->run_state == DLB_RUN_STATE_STARTED)
> -		fprintf(f, "STARTED\n");
> -	else
> -		fprintf(f, "UNEXPECTED\n");
> -
> -	fprintf(f,
> -		"dev ID=%d, dom ID=%u, sock=%u, evdev=%p\n",
> -		handle->device_id, handle->domain_id,
> -		handle->info.socket_id, dlb->event_dev);
> -
> -	fprintf(f, "num dir ports=%u, num dir queues=%u\n",
> -		dlb->num_dir_ports, dlb->num_dir_queues);
> -
> -	fprintf(f, "num ldb ports=%u, num ldb queues=%u\n",
> -		dlb->num_ldb_ports, dlb->num_ldb_queues);
> -
> -	fprintf(f, "dir_credit_pool_id=%u, num_credits=%u\n",
> -		handle->cfg.dir_credit_pool_id, handle->cfg.num_dir_credits);
> -
> -	fprintf(f, "ldb_credit_pool_id=%u, num_credits=%u\n",
> -		handle->cfg.ldb_credit_pool_id, handle->cfg.num_ldb_credits);
> -
> -	fprintf(f, "num atomic inflights=%u, hist list entries=%u\n",
> -		handle->cfg.resources.num_atomic_inflights,
> -		handle->cfg.resources.num_hist_list_entries);
> -
> -	fprintf(f, "results from most recent hw resource query:\n");
> -
> -	fprintf(f, "\tnum_sched_domains = %u\n",
> -		dlb->hw_rsrc_query_results.num_sched_domains);
> -
> -	fprintf(f, "\tnum_ldb_queues = %u\n",
> -		dlb->hw_rsrc_query_results.num_ldb_queues);
> -
> -	fprintf(f, "\tnum_ldb_ports = %u\n",
> -		dlb->hw_rsrc_query_results.num_ldb_ports);
> -
> -	fprintf(f, "\tnum_dir_ports = %u\n",
> -		dlb->hw_rsrc_query_results.num_dir_ports);
> -
> -	fprintf(f, "\tnum_atomic_inflights = %u\n",
> -		dlb->hw_rsrc_query_results.num_atomic_inflights);
> -
> -	fprintf(f, "\tmax_contiguous_atomic_inflights = %u\n",
> -		dlb->hw_rsrc_query_results.max_contiguous_atomic_inflights);
> -
> -	fprintf(f, "\tnum_hist_list_entries = %u\n",
> -		dlb->hw_rsrc_query_results.num_hist_list_entries);
> -
> -	fprintf(f, "\tmax_contiguous_hist_list_entries = %u\n",
> -		dlb->hw_rsrc_query_results.max_contiguous_hist_list_entries);
> -
> -	fprintf(f, "\tnum_ldb_credits = %u\n",
> -		dlb->hw_rsrc_query_results.num_ldb_credits);
> -
> -	fprintf(f, "\tmax_contiguous_ldb_credits = %u\n",
> -		dlb->hw_rsrc_query_results.max_contiguous_ldb_credits);
> -
> -	fprintf(f, "\tnum_dir_credits = %u\n",
> -		dlb->hw_rsrc_query_results.num_dir_credits);
> -
> -	fprintf(f, "\tmax_contiguous_dir_credits = %u\n",
> -		dlb->hw_rsrc_query_results.max_contiguous_dir_credits);
> -
> -	fprintf(f, "\tnum_ldb_credit_pools = %u\n",
> -		dlb->hw_rsrc_query_results.num_ldb_credit_pools);
> -
> -	fprintf(f, "\tnum_dir_credit_pools = %u\n",
> -		dlb->hw_rsrc_query_results.num_dir_credit_pools);
> -
> -	/* Port level information */
> -
> -	for (i = 0; i < dlb->num_ports; i++) {
> -		struct dlb_eventdev_port *p = &dlb->ev_ports[i];
> -		int j;
> -
> -		if (!p->enq_configured)
> -			fprintf(f, "Port_%d is not configured\n", i);
> -
> -		fprintf(f, "Port_%d\n", i);
> -		fprintf(f, "=======\n");
> -
> -		fprintf(f, "\tevport_%u is configured, setup done=%d\n",
> -			p->id, p->setup_done);
> -
> -		fprintf(f, "\tconfig state=%d, port state=%d\n",
> -			p->qm_port.config_state, p->qm_port.state);
> -
> -		fprintf(f, "\tport is %s\n",
> -			p->qm_port.is_directed ? "directed" : "load balanced");
> -
> -		fprintf(f, "\toutstanding releases=%u\n",
> -			p->outstanding_releases);
> -
> -		fprintf(f, "\tinflight max=%u, inflight credits=%u\n",
> -			p->inflight_max, p->inflight_credits);
> -
> -		fprintf(f, "\tcredit update quanta=%u, implicit release =%u\n",
> -			p->credit_update_quanta, p->implicit_release);
> -
> -		fprintf(f, "\tnum_links=%d, queues -> ", p->num_links);
> -
> -		for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
> -			if (p->link[j].valid)
> -				fprintf(f, "id=%u prio=%u ",
> -					p->link[j].queue_id,
> -					p->link[j].priority);
> -		}
> -		fprintf(f, "\n");
> -
> -		fprintf(f, "\thardware port id=%u\n", p->qm_port.id);
> -
> -		fprintf(f, "\tcached_ldb_credits=%u\n",
> -			p->qm_port.cached_ldb_credits);
> -
> -		fprintf(f, "\tldb_pushcount_at_credit_expiry = %u\n",
> -			p->qm_port.ldb_pushcount_at_credit_expiry);
> -
> -		fprintf(f, "\tldb_credits = %u\n",
> -			p->qm_port.ldb_credits);
> -
> -		fprintf(f, "\tcached_dir_credits = %u\n",
> -			p->qm_port.cached_dir_credits);
> -
> -		fprintf(f, "\tdir_pushcount_at_credit_expiry=%u\n",
> -			p->qm_port.dir_pushcount_at_credit_expiry);
> -
> -		fprintf(f, "\tdir_credits = %u\n",
> -			p->qm_port.dir_credits);
> -
> -		fprintf(f, "\tgenbit=%d, cq_idx=%d, cq_depth=%d\n",
> -			p->qm_port.gen_bit,
> -			p->qm_port.cq_idx,
> -			p->qm_port.cq_depth);
> -
> -		fprintf(f, "\tuse reserved token scheme=%d, cq_rsvd_token_deficit=%u\n",
> -			p->qm_port.use_rsvd_token_scheme,
> -			p->qm_port.cq_rsvd_token_deficit);
> -
> -		fprintf(f, "\tinterrupt armed=%d\n",
> -			p->qm_port.int_armed);
> -
> -		fprintf(f, "\tPort statistics\n");
> -
> -		fprintf(f, "\t\trx_ok %" PRIu64 "\n",
> -			p->stats.traffic.rx_ok);
> -
> -		fprintf(f, "\t\trx_drop %" PRIu64 "\n",
> -			p->stats.traffic.rx_drop);
> -
> -		fprintf(f, "\t\trx_interrupt_wait %" PRIu64 "\n",
> -			p->stats.traffic.rx_interrupt_wait);
> -
> -		fprintf(f, "\t\trx_umonitor_umwait %" PRIu64 "\n",
> -			p->stats.traffic.rx_umonitor_umwait);
> -
> -		fprintf(f, "\t\ttx_ok %" PRIu64 "\n",
> -			p->stats.traffic.tx_ok);
> -
> -		fprintf(f, "\t\ttotal_polls %" PRIu64 "\n",
> -			p->stats.traffic.total_polls);
> -
> -		fprintf(f, "\t\tzero_polls %" PRIu64 "\n",
> -			p->stats.traffic.zero_polls);
> -
> -		fprintf(f, "\t\ttx_nospc_ldb_hw_credits %" PRIu64 "\n",
> -			p->stats.traffic.tx_nospc_ldb_hw_credits);
> -
> -		fprintf(f, "\t\ttx_nospc_dir_hw_credits %" PRIu64 "\n",
> -			p->stats.traffic.tx_nospc_dir_hw_credits);
> -
> -		fprintf(f, "\t\ttx_nospc_inflight_max %" PRIu64 "\n",
> -			p->stats.traffic.tx_nospc_inflight_max);
> -
> -		fprintf(f, "\t\ttx_nospc_new_event_limit %" PRIu64 "\n",
> -			p->stats.traffic.tx_nospc_new_event_limit);
> -
> -		fprintf(f, "\t\ttx_nospc_inflight_credits %" PRIu64 "\n",
> -			p->stats.traffic.tx_nospc_inflight_credits);
> -
> -		fprintf(f, "\t\ttx_new %" PRIu64 "\n",
> -			p->stats.tx_op_cnt[RTE_EVENT_OP_NEW]);
> -
> -		fprintf(f, "\t\ttx_fwd %" PRIu64 "\n",
> -			p->stats.tx_op_cnt[RTE_EVENT_OP_FORWARD]);
> -
> -		fprintf(f, "\t\ttx_rel %" PRIu64 "\n",
> -			p->stats.tx_op_cnt[RTE_EVENT_OP_RELEASE]);
> -
> -		fprintf(f, "\t\ttx_implicit_rel %" PRIu64 "\n",
> -			p->stats.tx_implicit_rel);
> -
> -		fprintf(f, "\t\ttx_sched_ordered %" PRIu64 "\n",
> -			p->stats.tx_sched_cnt[DLB_SCHED_ORDERED]);
> -
> -		fprintf(f, "\t\ttx_sched_unordered %" PRIu64 "\n",
> -			p->stats.tx_sched_cnt[DLB_SCHED_UNORDERED]);
> -
> -		fprintf(f, "\t\ttx_sched_atomic %" PRIu64 "\n",
> -			p->stats.tx_sched_cnt[DLB_SCHED_ATOMIC]);
> -
> -		fprintf(f, "\t\ttx_sched_directed %" PRIu64 "\n",
> -			p->stats.tx_sched_cnt[DLB_SCHED_DIRECTED]);
> -
> -		fprintf(f, "\t\ttx_invalid %" PRIu64 "\n",
> -			p->stats.tx_invalid);
> -
> -		fprintf(f, "\t\trx_sched_ordered %" PRIu64 "\n",
> -			p->stats.rx_sched_cnt[DLB_SCHED_ORDERED]);
> -
> -		fprintf(f, "\t\trx_sched_unordered %" PRIu64 "\n",
> -			p->stats.rx_sched_cnt[DLB_SCHED_UNORDERED]);
> -
> -		fprintf(f, "\t\trx_sched_atomic %" PRIu64 "\n",
> -			p->stats.rx_sched_cnt[DLB_SCHED_ATOMIC]);
> -
> -		fprintf(f, "\t\trx_sched_directed %" PRIu64 "\n",
> -			p->stats.rx_sched_cnt[DLB_SCHED_DIRECTED]);
> -
> -		fprintf(f, "\t\trx_sched_invalid %" PRIu64 "\n",
> -			p->stats.rx_sched_invalid);
> -	}
> -
> -	/* Queue level information */
> -
> -	for (i = 0; i < dlb->num_queues; i++) {
> -		struct dlb_eventdev_queue *q = &dlb->ev_queues[i];
> -		int j, k;
> -
> -		if (!q->setup_done)
> -			fprintf(f, "Queue_%d is not configured\n", i);
> -
> -		fprintf(f, "Queue_%d\n", i);
> -		fprintf(f, "========\n");
> -
> -		fprintf(f, "\tevqueue_%u is set up\n", q->id);
> -
> -		fprintf(f, "\tqueue is %s\n",
> -			q->qm_queue.is_directed ? "directed" : "load balanced");
> -
> -		fprintf(f, "\tnum_links=%d, ports -> ", q->num_links);
> -
> -		for (j = 0; j < dlb->num_ports; j++) {
> -			struct dlb_eventdev_port *p = &dlb->ev_ports[j];
> -
> -			for (k = 0; k < DLB_MAX_NUM_QIDS_PER_LDB_CQ; k++) {
> -				if (p->link[k].valid &&
> -				    p->link[k].queue_id == q->id)
> -					fprintf(f, "id=%u prio=%u ",
> -						p->id, p->link[k].priority);
> -			}
> -		}
> -		fprintf(f, "\n");
> -
> -		 fprintf(f, "\tcurrent depth: %u events\n",
> -			 dlb_get_queue_depth(dlb, q));
> -
> -		fprintf(f, "\tnum qid inflights=%u, sched_type=%d\n",
> -			q->qm_queue.num_qid_inflights, q->qm_queue.sched_type);
> -	}
> -}
> diff --git a/drivers/event/dlb/meson.build b/drivers/event/dlb/meson.build
> deleted file mode 100644
> index bc158d2e0..000000000
> --- a/drivers/event/dlb/meson.build
> +++ /dev/null
> @@ -1,22 +0,0 @@
> -# SPDX-License-Identifier: BSD-3-Clause
> -# Copyright(c) 2019-2020 Intel Corporation
> -
> -if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
> -        build = false
> -        reason = 'only supported on x86_64 Linux'
> -        subdir_done()
> -endif
> -
> -sources = files('dlb.c',
> -		'dlb_iface.c',
> -		'dlb_xstats.c',
> -		'pf/dlb_main.c',
> -		'pf/dlb_pf.c',
> -		'pf/base/dlb_resource.c',
> -		'rte_pmd_dlb.c',
> -		'dlb_selftest.c'
> -)
> -
> -headers = files('rte_pmd_dlb.h')
> -
> -deps += ['mbuf', 'mempool', 'ring', 'pci', 'bus_pci']
> diff --git a/drivers/event/dlb/pf/base/dlb_hw_types.h
> b/drivers/event/dlb/pf/base/dlb_hw_types.h
> deleted file mode 100644
> index 4c40e2125..000000000
> --- a/drivers/event/dlb/pf/base/dlb_hw_types.h
> +++ /dev/null
> @@ -1,334 +0,0 @@
> -/* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2016-2020 Intel Corporation
> - */
> -
> -#ifndef __DLB_HW_TYPES_H
> -#define __DLB_HW_TYPES_H
> -
> -#include "../../dlb_user.h"
> -#include "dlb_osdep_types.h"
> -#include "dlb_osdep_list.h"
> -
> -#define DLB_MAX_NUM_DOMAINS 32
> -#define DLB_MAX_NUM_LDB_QUEUES 128
> -#define DLB_MAX_NUM_LDB_PORTS 64
> -#define DLB_MAX_NUM_DIR_PORTS 128
> -#define DLB_MAX_NUM_LDB_CREDITS 16384
> -#define DLB_MAX_NUM_DIR_CREDITS 4096
> -#define DLB_MAX_NUM_LDB_CREDIT_POOLS 64
> -#define DLB_MAX_NUM_DIR_CREDIT_POOLS 64
> -#define DLB_MAX_NUM_HIST_LIST_ENTRIES 5120
> -#define DLB_MAX_NUM_AQOS_ENTRIES 2048
> -#define DLB_MAX_NUM_TOTAL_OUTSTANDING_COMPLETIONS 4096
> -#define DLB_MAX_NUM_QIDS_PER_LDB_CQ 8
> -#define DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS 4
> -#define DLB_MAX_NUM_SEQUENCE_NUMBER_MODES 6
> -#define DLB_QID_PRIORITIES 8
> -#define DLB_NUM_ARB_WEIGHTS 8
> -#define DLB_MAX_WEIGHT 255
> -#define DLB_MAX_PORT_CREDIT_QUANTUM 1023
> -#define DLB_MAX_CQ_COMP_CHECK_LOOPS 409600
> -#define DLB_MAX_QID_EMPTY_CHECK_LOOPS (32 * 64 * 1024 * (800 / 30))
> -#define DLB_HZ 800000000
> -
> -/* Used for DLB A-stepping workaround for hardware write buffer lock up
> issue */
> -#define DLB_A_STEP_MAX_PORTS 128
> -
> -#define DLB_PF_DEV_ID 0x270B
> -
> -/* Interrupt related macros */
> -#define DLB_PF_NUM_NON_CQ_INTERRUPT_VECTORS 8
> -#define DLB_PF_NUM_CQ_INTERRUPT_VECTORS	 64
> -#define DLB_PF_TOTAL_NUM_INTERRUPT_VECTORS \
> -	(DLB_PF_NUM_NON_CQ_INTERRUPT_VECTORS + \
> -	 DLB_PF_NUM_CQ_INTERRUPT_VECTORS)
> -#define DLB_PF_NUM_COMPRESSED_MODE_VECTORS \
> -	(DLB_PF_NUM_NON_CQ_INTERRUPT_VECTORS + 1)
> -#define DLB_PF_NUM_PACKED_MODE_VECTORS	 DLB_PF_TOTAL_NUM_INTERRUPT_VECTORS
> -#define DLB_PF_COMPRESSED_MODE_CQ_VECTOR_ID
> DLB_PF_NUM_NON_CQ_INTERRUPT_VECTORS
> -
> -#define DLB_PF_NUM_ALARM_INTERRUPT_VECTORS 4
> -#define DLB_INT_ALARM 0
> -#define DLB_INT_INGRESS_ERROR 3
> -
> -#define DLB_ALARM_HW_SOURCE_SYS 0
> -#define DLB_ALARM_HW_SOURCE_DLB 1
> -
> -#define DLB_ALARM_HW_UNIT_CHP 1
> -#define DLB_ALARM_HW_UNIT_LSP 3
> -
> -#define DLB_ALARM_HW_CHP_AID_OUT_OF_CREDITS 6
> -#define DLB_ALARM_HW_CHP_AID_ILLEGAL_ENQ 7
> -#define DLB_ALARM_HW_LSP_AID_EXCESS_TOKEN_POPS 15
> -#define DLB_ALARM_SYS_AID_ILLEGAL_HCW 0
> -#define DLB_ALARM_SYS_AID_ILLEGAL_QID 3
> -#define DLB_ALARM_SYS_AID_DISABLED_QID 4
> -#define DLB_ALARM_SYS_AID_ILLEGAL_CQID 6
> -
> -/* Hardware-defined base addresses */
> -#define DLB_LDB_PP_BASE 0x2100000
> -#define DLB_LDB_PP_STRIDE 0x1000
> -#define DLB_LDB_PP_BOUND \
> -	(DLB_LDB_PP_BASE + DLB_LDB_PP_STRIDE * DLB_MAX_NUM_LDB_PORTS)
> -#define DLB_DIR_PP_BASE 0x2000000
> -#define DLB_DIR_PP_STRIDE 0x1000
> -#define DLB_DIR_PP_BOUND \
> -	(DLB_DIR_PP_BASE + DLB_DIR_PP_STRIDE * DLB_MAX_NUM_DIR_PORTS)
> -
> -struct dlb_freelist {
> -	u32 base;
> -	u32 bound;
> -	u32 offset;
> -};
> -
> -static inline u32 dlb_freelist_count(struct dlb_freelist *list)
> -{
> -	return (list->bound - list->base) - list->offset;
> -}
> -
> -struct dlb_hcw {
> -	u64 data;
> -	/* Word 3 */
> -	u16 opaque;
> -	u8 qid;
> -	u8 sched_type:2;
> -	u8 priority:3;
> -	u8 msg_type:3;
> -	/* Word 4 */
> -	u16 lock_id;
> -	u8 meas_lat:1;
> -	u8 rsvd1:2;
> -	u8 no_dec:1;
> -	u8 cmp_id:4;
> -	u8 cq_token:1;
> -	u8 qe_comp:1;
> -	u8 qe_frag:1;
> -	u8 qe_valid:1;
> -	u8 int_arm:1;
> -	u8 error:1;
> -	u8 rsvd:2;
> -};
> -
> -struct dlb_ldb_queue {
> -	struct dlb_list_entry domain_list;
> -	struct dlb_list_entry func_list;
> -	u32 id;
> -	u32 domain_id;
> -	u32 num_qid_inflights;
> -	struct dlb_freelist aqed_freelist;
> -	u8 sn_cfg_valid;
> -	u32 sn_group;
> -	u32 sn_slot;
> -	u32 num_mappings;
> -	u8 num_pending_additions;
> -	u8 owned;
> -	u8 configured;
> -};
> -
> -/* Directed ports and queues are paired by nature, so the driver tracks
> them
> - * with a single data structure.
> - */
> -struct dlb_dir_pq_pair {
> -	struct dlb_list_entry domain_list;
> -	struct dlb_list_entry func_list;
> -	u32 id;
> -	u32 domain_id;
> -	u8 ldb_pool_used;
> -	u8 dir_pool_used;
> -	u8 queue_configured;
> -	u8 port_configured;
> -	u8 owned;
> -	u8 enabled;
> -	u32 ref_cnt;
> -};
> -
> -enum dlb_qid_map_state {
> -	/* The slot doesn't contain a valid queue mapping */
> -	DLB_QUEUE_UNMAPPED,
> -	/* The slot contains a valid queue mapping */
> -	DLB_QUEUE_MAPPED,
> -	/* The driver is mapping a queue into this slot */
> -	DLB_QUEUE_MAP_IN_PROGRESS,
> -	/* The driver is unmapping a queue from this slot */
> -	DLB_QUEUE_UNMAP_IN_PROGRESS,
> -	/* The driver is unmapping a queue from this slot, and once complete
> -	 * will replace it with another mapping.
> -	 */
> -	DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP,
> -};
> -
> -struct dlb_ldb_port_qid_map {
> -	u16 qid;
> -	u8 priority;
> -	u16 pending_qid;
> -	u8 pending_priority;
> -	enum dlb_qid_map_state state;
> -};
> -
> -struct dlb_ldb_port {
> -	struct dlb_list_entry domain_list;
> -	struct dlb_list_entry func_list;
> -	u32 id;
> -	u32 domain_id;
> -	u8 ldb_pool_used;
> -	u8 dir_pool_used;
> -	u8 init_tkn_cnt;
> -	u32 hist_list_entry_base;
> -	u32 hist_list_entry_limit;
> -	/* The qid_map represents the hardware QID mapping state. */
> -	struct dlb_ldb_port_qid_map qid_map[DLB_MAX_NUM_QIDS_PER_LDB_CQ];
> -	u32 ref_cnt;
> -	u8 num_pending_removals;
> -	u8 num_mappings;
> -	u8 owned;
> -	u8 enabled;
> -	u8 configured;
> -};
> -
> -struct dlb_credit_pool {
> -	struct dlb_list_entry domain_list;
> -	struct dlb_list_entry func_list;
> -	u32 id;
> -	u32 domain_id;
> -	u32 total_credits;
> -	u32 avail_credits;
> -	u8 owned;
> -	u8 configured;
> -};
> -
> -struct dlb_sn_group {
> -	u32 mode;
> -	u32 sequence_numbers_per_queue;
> -	u32 slot_use_bitmap;
> -	u32 id;
> -};
> -
> -static inline bool dlb_sn_group_full(struct dlb_sn_group *group)
> -{
> -	u32 mask[6] = {
> -		0xffffffff,  /* 32 SNs per queue */
> -		0x0000ffff,  /* 64 SNs per queue */
> -		0x000000ff,  /* 128 SNs per queue */
> -		0x0000000f,  /* 256 SNs per queue */
> -		0x00000003,  /* 512 SNs per queue */
> -		0x00000001}; /* 1024 SNs per queue */
> -
> -	return group->slot_use_bitmap == mask[group->mode];
> -}
> -
> -static inline int dlb_sn_group_alloc_slot(struct dlb_sn_group *group)
> -{
> -	int bound[6] = {32, 16, 8, 4, 2, 1};
> -	int i;
> -
> -	for (i = 0; i < bound[group->mode]; i++) {
> -		if (!(group->slot_use_bitmap & (1 << i))) {
> -			group->slot_use_bitmap |= 1 << i;
> -			return i;
> -		}
> -	}
> -
> -	return -1;
> -}
> -
> -static inline void dlb_sn_group_free_slot(struct dlb_sn_group *group, int
> slot)
> -{
> -	group->slot_use_bitmap &= ~(1 << slot);
> -}
> -
> -static inline int dlb_sn_group_used_slots(struct dlb_sn_group *group)
> -{
> -	int i, cnt = 0;
> -
> -	for (i = 0; i < 32; i++)
> -		cnt += !!(group->slot_use_bitmap & (1 << i));
> -
> -	return cnt;
> -}
> -
> -struct dlb_domain {
> -	struct dlb_function_resources *parent_func;
> -	struct dlb_list_entry func_list;
> -	struct dlb_list_head used_ldb_queues;
> -	struct dlb_list_head used_ldb_ports;
> -	struct dlb_list_head used_dir_pq_pairs;
> -	struct dlb_list_head used_ldb_credit_pools;
> -	struct dlb_list_head used_dir_credit_pools;
> -	struct dlb_list_head avail_ldb_queues;
> -	struct dlb_list_head avail_ldb_ports;
> -	struct dlb_list_head avail_dir_pq_pairs;
> -	struct dlb_list_head avail_ldb_credit_pools;
> -	struct dlb_list_head avail_dir_credit_pools;
> -	u32 total_hist_list_entries;
> -	u32 avail_hist_list_entries;
> -	u32 hist_list_entry_base;
> -	u32 hist_list_entry_offset;
> -	struct dlb_freelist qed_freelist;
> -	struct dlb_freelist dqed_freelist;
> -	struct dlb_freelist aqed_freelist;
> -	u32 id;
> -	int num_pending_removals;
> -	int num_pending_additions;
> -	u8 configured;
> -	u8 started;
> -};
> -
> -struct dlb_bitmap;
> -
> -struct dlb_function_resources {
> -	u32 num_avail_domains;
> -	struct dlb_list_head avail_domains;
> -	struct dlb_list_head used_domains;
> -	u32 num_avail_ldb_queues;
> -	struct dlb_list_head avail_ldb_queues;
> -	u32 num_avail_ldb_ports;
> -	struct dlb_list_head avail_ldb_ports;
> -	u32 num_avail_dir_pq_pairs;
> -	struct dlb_list_head avail_dir_pq_pairs;
> -	struct dlb_bitmap *avail_hist_list_entries;
> -	struct dlb_bitmap *avail_qed_freelist_entries;
> -	struct dlb_bitmap *avail_dqed_freelist_entries;
> -	struct dlb_bitmap *avail_aqed_freelist_entries;
> -	u32 num_avail_ldb_credit_pools;
> -	struct dlb_list_head avail_ldb_credit_pools;
> -	u32 num_avail_dir_credit_pools;
> -	struct dlb_list_head avail_dir_credit_pools;
> -	u32 num_enabled_ldb_ports;
> -};
> -
> -/* After initialization, each resource in dlb_hw_resources is located in
> one of
> - * the following lists:
> - * -- The PF's available resources list. These are unconfigured resources
> owned
> - *	by the PF and not allocated to a DLB scheduling domain.
> - * -- A domain's available resources list. These are domain-owned
> unconfigured
> - *	resources.
> - * -- A domain's used resources list. These are domain-owned configured
> - *	resources.
> - *
> - * A resource moves to a new list when a domain is created or destroyed,
> or
> - * when the resource is configured.
> - */
> -struct dlb_hw_resources {
> -	struct dlb_ldb_queue ldb_queues[DLB_MAX_NUM_LDB_QUEUES];
> -	struct dlb_ldb_port ldb_ports[DLB_MAX_NUM_LDB_PORTS];
> -	struct dlb_dir_pq_pair dir_pq_pairs[DLB_MAX_NUM_DIR_PORTS];
> -	struct dlb_credit_pool ldb_credit_pools[DLB_MAX_NUM_LDB_CREDIT_POOLS];
> -	struct dlb_credit_pool dir_credit_pools[DLB_MAX_NUM_DIR_CREDIT_POOLS];
> -	struct dlb_sn_group sn_groups[DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS];
> -};
> -
> -struct dlb_hw {
> -	/* BAR 0 address */
> -	void  *csr_kva;
> -	unsigned long csr_phys_addr;
> -	/* BAR 2 address */
> -	void  *func_kva;
> -	unsigned long func_phys_addr;
> -
> -	/* Resource tracking */
> -	struct dlb_hw_resources rsrcs;
> -	struct dlb_function_resources pf;
> -	struct dlb_domain domains[DLB_MAX_NUM_DOMAINS];
> -};
> -
> -#endif /* __DLB_HW_TYPES_H */
> diff --git a/drivers/event/dlb/pf/base/dlb_osdep.h
> b/drivers/event/dlb/pf/base/dlb_osdep.h
> deleted file mode 100644
> index 0c119b759..000000000
> --- a/drivers/event/dlb/pf/base/dlb_osdep.h
> +++ /dev/null
> @@ -1,310 +0,0 @@
> -/* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2016-2020 Intel Corporation
> - */
> -
> -#ifndef __DLB_OSDEP_H__
> -#define __DLB_OSDEP_H__
> -
> -#include <string.h>
> -#include <time.h>
> -#include <unistd.h>
> -#include <cpuid.h>
> -#include <pthread.h>
> -#include <rte_string_fns.h>
> -#include <rte_cycles.h>
> -#include <rte_io.h>
> -#include <rte_log.h>
> -#include <rte_spinlock.h>
> -#include "../dlb_main.h"
> -#include "dlb_resource.h"
> -#include "../../dlb_log.h"
> -#include "../../dlb_user.h"
> -
> -
> -#define DLB_PCI_REG_READ(reg)        rte_read32((void *)reg)
> -#define DLB_PCI_REG_WRITE(reg, val)   rte_write32(val, (void *)reg)
> -
> -#define DLB_CSR_REG_ADDR(a, reg) ((void *)((uintptr_t)(a)->csr_kva +
> (reg)))
> -#define DLB_CSR_RD(hw, reg) \
> -	DLB_PCI_REG_READ(DLB_CSR_REG_ADDR((hw), (reg)))
> -#define DLB_CSR_WR(hw, reg, val) \
> -	DLB_PCI_REG_WRITE(DLB_CSR_REG_ADDR((hw), (reg)), (val))
> -
> -#define DLB_FUNC_REG_ADDR(a, reg) ((void *)((uintptr_t)(a)->func_kva +
> (reg)))
> -#define DLB_FUNC_RD(hw, reg) \
> -	DLB_PCI_REG_READ(DLB_FUNC_REG_ADDR((hw), (reg)))
> -#define DLB_FUNC_WR(hw, reg, val) \
> -	DLB_PCI_REG_WRITE(DLB_FUNC_REG_ADDR((hw), (reg)), (val))
> -
> -extern unsigned int dlb_unregister_timeout_s;
> -/**
> - * os_queue_unregister_timeout_s() - timeout (in seconds) to wait for
> queue
> - *                                   unregister acknowledgments.
> - */
> -static inline unsigned int os_queue_unregister_timeout_s(void)
> -{
> -	return dlb_unregister_timeout_s;
> -}
> -
> -static inline size_t os_strlcpy(char *dst, const char *src, size_t sz)
> -{
> -	return rte_strlcpy(dst, src, sz);
> -}
> -
> -/**
> - * os_udelay() - busy-wait for a number of microseconds
> - * @usecs: delay duration.
> - */
> -static inline void os_udelay(int usecs)
> -{
> -	rte_delay_us(usecs);
> -}
> -
> -/**
> - * os_msleep() - sleep for a number of milliseconds
> - * @usecs: delay duration.
> - */
> -
> -static inline void os_msleep(int msecs)
> -{
> -	rte_delay_ms(msecs);
> -}
> -
> -#define DLB_PP_BASE(__is_ldb) ((__is_ldb) ? DLB_LDB_PP_BASE :
> DLB_DIR_PP_BASE)
> -/**
> - * os_map_producer_port() - map a producer port into the caller's address
> space
> - * @hw: dlb_hw handle for a particular device.
> - * @port_id: port ID
> - * @is_ldb: true for load-balanced port, false for a directed port
> - *
> - * This function maps the requested producer port memory into the caller's
> - * address space.
> - *
> - * Return:
> - * Returns the base address at which the PP memory was mapped, else NULL.
> - */
> -static inline void *os_map_producer_port(struct dlb_hw *hw,
> -					 u8 port_id,
> -					 bool is_ldb)
> -{
> -	uint64_t addr;
> -	uint64_t pp_dma_base;
> -
> -
> -	pp_dma_base = (uintptr_t)hw->func_kva + DLB_PP_BASE(is_ldb);
> -	addr = (pp_dma_base + (PAGE_SIZE * port_id));
> -
> -	return (void *)(uintptr_t)addr;
> -
> -}
> -/**
> - * os_unmap_producer_port() - unmap a producer port
> - * @addr: mapped producer port address
> - *
> - * This function undoes os_map_producer_port() by unmapping the producer
> port
> - * memory from the caller's address space.
> - *
> - * Return:
> - * Returns the base address at which the PP memory was mapped, else NULL.
> - */
> -
> -/* PFPMD - Nothing to do here, since memory was not actually mapped by us
> */
> -static inline void os_unmap_producer_port(struct dlb_hw *hw, void *addr)
> -{
> -	RTE_SET_USED(hw);
> -	RTE_SET_USED(addr);
> -}
> -
> -/**
> - * os_fence_hcw() - fence an HCW to ensure it arrives at the device
> - * @hw: dlb_hw handle for a particular device.
> - * @pp_addr: producer port address
> - */
> -static inline void os_fence_hcw(struct dlb_hw *hw, u64 *pp_addr)
> -{
> -	RTE_SET_USED(hw);
> -
> -	/* To ensure outstanding HCWs reach the device, read the PP address. IA
> -	 * memory ordering prevents reads from passing older writes, and the
> -	 * mfence also ensures this.
> -	 */
> -	rte_mb();
> -
> -	*(volatile u64 *)pp_addr;
> -}
> -
> -/* Map to PMDs logging interface */
> -#define DLB_ERR(dev, fmt, args...) \
> -	DLB_LOG_ERR(fmt, ## args)
> -
> -#define DLB_INFO(dev, fmt, args...) \
> -	DLB_LOG_INFO(fmt, ## args)
> -
> -#define DLB_DEBUG(dev, fmt, args...) \
> -	DLB_LOG_DEBUG(fmt, ## args)
> -
> -/**
> - * DLB_HW_ERR() - log an error message
> - * @dlb: dlb_hw handle for a particular device.
> - * @...: variable string args.
> - */
> -#define DLB_HW_ERR(dlb, ...) do {	\
> -	RTE_SET_USED(dlb);		\
> -	DLB_ERR(dlb, __VA_ARGS__);	\
> -} while (0)
> -
> -/**
> - * DLB_HW_INFO() - log an info message
> - * @dlb: dlb_hw handle for a particular device.
> - * @...: variable string args.
> - */
> -#define DLB_HW_INFO(dlb, ...) do {	\
> -	RTE_SET_USED(dlb);		\
> -	DLB_INFO(dlb, __VA_ARGS__);	\
> -} while (0)
> -
> -/*** scheduling functions ***/
> -
> -/* The callback runs until it completes all outstanding QID->CQ
> - * map and unmap requests. To prevent deadlock, this function gives other
> - * threads a chance to grab the resource mutex and configure hardware.
> - */
> -static void *dlb_complete_queue_map_unmap(void *__args)
> -{
> -	struct dlb_dev *dlb_dev = (struct dlb_dev *)__args;
> -	int ret;
> -
> -	while (1) {
> -		rte_spinlock_lock(&dlb_dev->resource_mutex);
> -
> -		ret = dlb_finish_unmap_qid_procedures(&dlb_dev->hw);
> -		ret += dlb_finish_map_qid_procedures(&dlb_dev->hw);
> -
> -		if (ret != 0) {
> -			rte_spinlock_unlock(&dlb_dev->resource_mutex);
> -			/* Relinquish the CPU so the application can process
> -			 * its CQs, so this function does not deadlock.
> -			 */
> -			sched_yield();
> -		} else
> -			break;
> -	}
> -
> -	dlb_dev->worker_launched = false;
> -
> -	rte_spinlock_unlock(&dlb_dev->resource_mutex);
> -
> -	return NULL;
> -}
> -
> -
> -/**
> - * os_schedule_work() - launch a thread to process pending map and unmap
> work
> - * @hw: dlb_hw handle for a particular device.
> - *
> - * This function launches a thread that will run until all pending
> - * map and unmap procedures are complete.
> - */
> -static inline void os_schedule_work(struct dlb_hw *hw)
> -{
> -	struct dlb_dev *dlb_dev;
> -	pthread_t complete_queue_map_unmap_thread;
> -	int ret;
> -
> -	dlb_dev = container_of(hw, struct dlb_dev, hw);
> -
> -	ret = rte_ctrl_thread_create(&complete_queue_map_unmap_thread,
> -				     "dlb_queue_unmap_waiter",
> -				     NULL,
> -				     dlb_complete_queue_map_unmap,
> -				     dlb_dev);
> -	if (ret)
> -		DLB_ERR(dlb_dev,
> -		"Could not create queue complete map/unmap thread, err=%d\n",
> -			  ret);
> -	else
> -		dlb_dev->worker_launched = true;
> -}
> -
> -/**
> - * os_worker_active() - query whether the map/unmap worker thread is
> active
> - * @hw: dlb_hw handle for a particular device.
> - *
> - * This function returns a boolean indicating whether a thread (launched
> by
> - * os_schedule_work()) is active. This function is used to determine
> - * whether or not to launch a worker thread.
> - */
> -static inline bool os_worker_active(struct dlb_hw *hw)
> -{
> -	struct dlb_dev *dlb_dev;
> -
> -	dlb_dev = container_of(hw, struct dlb_dev, hw);
> -
> -	return dlb_dev->worker_launched;
> -}
> -
> -/**
> - * os_notify_user_space() - notify user space
> - * @hw: dlb_hw handle for a particular device.
> - * @domain_id: ID of domain to notify.
> - * @alert_id: alert ID.
> - * @aux_alert_data: additional alert data.
> - *
> - * This function notifies user space of an alert (such as a remote queue
> - * unregister or hardware alarm).
> - *
> - * Return:
> - * Returns 0 upon success, <0 otherwise.
> - */
> -static inline int os_notify_user_space(struct dlb_hw *hw,
> -				       u32 domain_id,
> -				       u64 alert_id,
> -				       u64 aux_alert_data)
> -{
> -	RTE_SET_USED(hw);
> -	RTE_SET_USED(domain_id);
> -	RTE_SET_USED(alert_id);
> -	RTE_SET_USED(aux_alert_data);
> -
> -	/* Not called for PF PMD */
> -	return -1;
> -}
> -
> -enum dlb_dev_revision {
> -	DLB_A0,
> -	DLB_A1,
> -	DLB_A2,
> -	DLB_A3,
> -	DLB_B0,
> -};
> -
> -/**
> - * os_get_dev_revision() - query the device_revision
> - * @hw: dlb_hw handle for a particular device.
> - */
> -static inline enum dlb_dev_revision os_get_dev_revision(struct dlb_hw *hw)
> -{
> -	uint32_t a, b, c, d, stepping;
> -
> -	RTE_SET_USED(hw);
> -
> -	__cpuid(0x1, a, b, c, d);
> -
> -	stepping = a & 0xf;
> -
> -	switch (stepping) {
> -	case 0:
> -		return DLB_A0;
> -	case 1:
> -		return DLB_A1;
> -	case 2:
> -		return DLB_A2;
> -	case 3:
> -		return DLB_A3;
> -	default:
> -		/* Treat all revisions >= 4 as B0 */
> -		return DLB_B0;
> -	}
> -}
> -
> -#endif /*  __DLB_OSDEP_H__ */
> diff --git a/drivers/event/dlb/pf/base/dlb_osdep_bitmap.h
> b/drivers/event/dlb/pf/base/dlb_osdep_bitmap.h
> deleted file mode 100644
> index 4c10c8c5d..000000000
> --- a/drivers/event/dlb/pf/base/dlb_osdep_bitmap.h
> +++ /dev/null
> @@ -1,441 +0,0 @@
> -/* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2016-2020 Intel Corporation
> - */
> -
> -#ifndef __DLB_OSDEP_BITMAP_H__
> -#define __DLB_OSDEP_BITMAP_H__
> -
> -#include <stdint.h>
> -#include <stdbool.h>
> -#include <stdio.h>
> -#include <unistd.h>
> -#include <rte_bitmap.h>
> -#include <rte_string_fns.h>
> -#include <rte_malloc.h>
> -#include <rte_errno.h>
> -#include "../dlb_main.h"
> -
> -/*************************/
> -/*** Bitmap operations ***/
> -/*************************/
> -struct dlb_bitmap {
> -	struct rte_bitmap *map;
> -	unsigned int len;
> -	struct dlb_hw *hw;
> -};
> -
> -/**
> - * dlb_bitmap_alloc() - alloc a bitmap data structure
> - * @bitmap: pointer to dlb_bitmap structure pointer.
> - * @len: number of entries in the bitmap.
> - *
> - * This function allocates a bitmap and initializes it with length @len.
> All
> - * entries are initially zero.
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise.
> - *
> - * Errors:
> - * EINVAL - bitmap is NULL or len is 0.
> - * ENOMEM - could not allocate memory for the bitmap data structure.
> - */
> -static inline int dlb_bitmap_alloc(struct dlb_hw *hw,
> -				   struct dlb_bitmap **bitmap,
> -				   unsigned int len)
> -{
> -	struct dlb_bitmap *bm;
> -	void *mem;
> -	uint32_t alloc_size;
> -	uint32_t nbits = (uint32_t) len;
> -	RTE_SET_USED(hw);
> -
> -	if (bitmap == NULL || nbits == 0)
> -		return -EINVAL;
> -
> -	/* Allocate DLB bitmap control struct */
> -	bm = rte_malloc("DLB_PF",
> -		sizeof(struct dlb_bitmap),
> -		RTE_CACHE_LINE_SIZE);
> -
> -	if (bm == NULL)
> -		return -ENOMEM;
> -
> -	/* Allocate bitmap memory */
> -	alloc_size = rte_bitmap_get_memory_footprint(nbits);
> -	mem = rte_malloc("DLB_PF_BITMAP", alloc_size, RTE_CACHE_LINE_SIZE);
> -	if (mem == NULL) {
> -		rte_free(bm);
> -		return -ENOMEM;
> -	}
> -
> -	bm->map = rte_bitmap_init(len, mem, alloc_size);
> -	if (bm->map == NULL) {
> -		rte_free(mem);
> -		rte_free(bm);
> -		return -ENOMEM;
> -	}
> -
> -	bm->len = len;
> -
> -	*bitmap = bm;
> -
> -	return 0;
> -}
> -
> -/**
> - * dlb_bitmap_free() - free a previously allocated bitmap data structure
> - * @bitmap: pointer to dlb_bitmap structure.
> - *
> - * This function frees a bitmap that was allocated with
> dlb_bitmap_alloc().
> - */
> -static inline void dlb_bitmap_free(struct dlb_bitmap *bitmap)
> -{
> -	if (bitmap == NULL)
> -		return;
> -
> -	rte_free(bitmap->map);
> -	rte_free(bitmap);
> -}
> -
> -/**
> - * dlb_bitmap_fill() - fill a bitmap with all 1s
> - * @bitmap: pointer to dlb_bitmap structure.
> - *
> - * This function sets all bitmap values to 1.
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise.
> - *
> - * Errors:
> - * EINVAL - bitmap is NULL or is uninitialized.
> - */
> -static inline int dlb_bitmap_fill(struct dlb_bitmap *bitmap)
> -{
> -	unsigned int i;
> -
> -	if (bitmap  == NULL || bitmap->map == NULL)
> -		return -EINVAL;
> -
> -	for (i = 0; i != bitmap->len; i++)
> -		rte_bitmap_set(bitmap->map, i);
> -
> -	return 0;
> -}
> -
> -/**
> - * dlb_bitmap_zero() - fill a bitmap with all 0s
> - * @bitmap: pointer to dlb_bitmap structure.
> - *
> - * This function sets all bitmap values to 0.
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise.
> - *
> - * Errors:
> - * EINVAL - bitmap is NULL or is uninitialized.
> - */
> -static inline int dlb_bitmap_zero(struct dlb_bitmap *bitmap)
> -{
> -	if (bitmap  == NULL || bitmap->map == NULL)
> -		return -EINVAL;
> -
> -	rte_bitmap_reset(bitmap->map);
> -
> -	return 0;
> -}
> -
> -/**
> - * dlb_bitmap_set() - set a bitmap entry
> - * @bitmap: pointer to dlb_bitmap structure.
> - * @bit: bit index.
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise.
> - *
> - * Errors:
> - * EINVAL - bitmap is NULL or is uninitialized, or bit is larger than the
> - *	    bitmap length.
> - */
> -static inline int dlb_bitmap_set(struct dlb_bitmap *bitmap,
> -				 unsigned int bit)
> -{
> -	if (bitmap  == NULL || bitmap->map == NULL)
> -		return -EINVAL;
> -
> -	if (bitmap->len <= bit)
> -		return -EINVAL;
> -
> -	rte_bitmap_set(bitmap->map, bit);
> -
> -	return 0;
> -}
> -
> -/**
> - * dlb_bitmap_set_range() - set a range of bitmap entries
> - * @bitmap: pointer to dlb_bitmap structure.
> - * @bit: starting bit index.
> - * @len: length of the range.
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise.
> - *
> - * Errors:
> - * EINVAL - bitmap is NULL or is uninitialized, or the range exceeds the
> bitmap
> - *	    length.
> - */
> -static inline int dlb_bitmap_set_range(struct dlb_bitmap *bitmap,
> -				       unsigned int bit,
> -				       unsigned int len)
> -{
> -	unsigned int i;
> -
> -	if (bitmap  == NULL || bitmap->map == NULL)
> -		return -EINVAL;
> -
> -	if (bitmap->len <= bit)
> -		return -EINVAL;
> -
> -	for (i = 0; i != len; i++)
> -		rte_bitmap_set(bitmap->map, bit + i);
> -
> -	return 0;
> -}
> -
> -/**
> - * dlb_bitmap_clear() - clear a bitmap entry
> - * @bitmap: pointer to dlb_bitmap structure.
> - * @bit: bit index.
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise.
> - *
> - * Errors:
> - * EINVAL - bitmap is NULL or is uninitialized, or bit is larger than the
> - *	    bitmap length.
> - */
> -static inline int dlb_bitmap_clear(struct dlb_bitmap *bitmap,
> -				   unsigned int bit)
> -{
> -	if (bitmap  == NULL || bitmap->map == NULL)
> -		return -EINVAL;
> -
> -	if (bitmap->len <= bit)
> -		return -EINVAL;
> -
> -	rte_bitmap_clear(bitmap->map, bit);
> -
> -	return 0;
> -}
> -
> -/**
> - * dlb_bitmap_clear_range() - clear a range of bitmap entries
> - * @bitmap: pointer to dlb_bitmap structure.
> - * @bit: starting bit index.
> - * @len: length of the range.
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise.
> - *
> - * Errors:
> - * EINVAL - bitmap is NULL or is uninitialized, or the range exceeds the
> bitmap
> - *	    length.
> - */
> -static inline int dlb_bitmap_clear_range(struct dlb_bitmap *bitmap,
> -					 unsigned int bit,
> -					 unsigned int len)
> -{
> -	unsigned int i;
> -
> -	if (bitmap  == NULL || bitmap->map == NULL)
> -		return -EINVAL;
> -
> -	if (bitmap->len <= bit)
> -		return -EINVAL;
> -
> -	for (i = 0; i != len; i++)
> -		rte_bitmap_clear(bitmap->map, bit + i);
> -
> -	return 0;
> -}
> -
> -/**
> - * dlb_bitmap_find_set_bit_range() - find a range of set bits
> - * @bitmap: pointer to dlb_bitmap structure.
> - * @len: length of the range.
> - *
> - * This function looks for a range of set bits of length @len.
> - *
> - * Return:
> - * Returns the base bit index upon success, < 0 otherwise.
> - *
> - * Errors:
> - * ENOENT - unable to find a length *len* range of set bits.
> - * EINVAL - bitmap is NULL or is uninitialized, or len is invalid.
> - */
> -static inline int dlb_bitmap_find_set_bit_range(struct dlb_bitmap *bitmap,
> -						unsigned int len)
> -{
> -	unsigned int i, j = 0;
> -
> -	if (bitmap  == NULL || bitmap->map  == NULL || len == 0)
> -		return -EINVAL;
> -
> -	if (bitmap->len < len)
> -		return -ENOENT;
> -
> -	for (i = 0; i != bitmap->len; i++) {
> -		if  (rte_bitmap_get(bitmap->map, i)) {
> -			if (++j == len)
> -				return i - j + 1;
> -		} else
> -			j = 0;
> -	}
> -
> -	/* No set bit range of length len? */
> -	return -ENOENT;
> -}
> -
> -/**
> - * dlb_bitmap_find_set_bit() - find the first set bit
> - * @bitmap: pointer to dlb_bitmap structure.
> - *
> - * This function looks for a single set bit.
> - *
> - * Return:
> - * Returns the base bit index upon success, < 0 otherwise.
> - *
> - * Errors:
> - * ENOENT - the bitmap contains no set bits.
> - * EINVAL - bitmap is NULL or is uninitialized, or len is invalid.
> - */
> -static inline int dlb_bitmap_find_set_bit(struct dlb_bitmap *bitmap)
> -{
> -	unsigned int i;
> -
> -	if (bitmap == NULL)
> -		return -EINVAL;
> -
> -	if (bitmap->map == NULL)
> -		return -EINVAL;
> -
> -	for (i = 0; i != bitmap->len; i++) {
> -		if  (rte_bitmap_get(bitmap->map, i))
> -			return i;
> -	}
> -
> -	return -ENOENT;
> -}
> -
> -/**
> - * dlb_bitmap_count() - returns the number of set bits
> - * @bitmap: pointer to dlb_bitmap structure.
> - *
> - * This function looks for a single set bit.
> - *
> - * Return:
> - * Returns the number of set bits upon success, <0 otherwise.
> - *
> - * Errors:
> - * EINVAL - bitmap is NULL or is uninitialized.
> - */
> -static inline int dlb_bitmap_count(struct dlb_bitmap *bitmap)
> -{
> -	int weight = 0;
> -	unsigned int i;
> -
> -	if (bitmap == NULL)
> -		return -EINVAL;
> -
> -	if (bitmap->map == NULL)
> -		return -EINVAL;
> -
> -	for (i = 0; i != bitmap->len; i++) {
> -		if  (rte_bitmap_get(bitmap->map, i))
> -			weight++;
> -	}
> -	return weight;
> -}
> -
> -/**
> - * dlb_bitmap_longest_set_range() - returns longest contiguous range of set
> bits
> - * @bitmap: pointer to dlb_bitmap structure.
> - *
> - * Return:
> - * Returns the bitmap's longest contiguous range of set bits upon success,
> - * <0 otherwise.
> - *
> - * Errors:
> - * EINVAL - bitmap is NULL or is uninitialized.
> - */
> -static inline int dlb_bitmap_longest_set_range(struct dlb_bitmap *bitmap)
> -{
> -	int max_len = 0, len = 0;
> -	unsigned int i;
> -
> -	if (bitmap == NULL)
> -		return -EINVAL;
> -
> -	if (bitmap->map == NULL)
> -		return -EINVAL;
> -
> -	for (i = 0; i != bitmap->len; i++) {
> -		if  (rte_bitmap_get(bitmap->map, i)) {
> -			len++;
> -		} else {
> -			if (len > max_len)
> -				max_len = len;
> -			len = 0;
> -		}
> -	}
> -
> -	if (len > max_len)
> -		max_len = len;
> -
> -	return max_len;
> -}
> -
> -/**
> - * dlb_bitmap_or() - store the logical 'or' of two bitmaps into a third
> - * @dest: pointer to dlb_bitmap structure, which will contain the results
> of
> - *	  the 'or' of src1 and src2.
> - * @src1: pointer to dlb_bitmap structure, will be 'or'ed with src2.
> - * @src2: pointer to dlb_bitmap structure, will be 'or'ed with src1.
> - *
> - * This function 'or's two bitmaps together and stores the result in a
> third
> - * bitmap. The source and destination bitmaps can be the same.
> - *
> - * Return:
> - * Returns the number of set bits upon success, <0 otherwise.
> - *
> - * Errors:
> - * EINVAL - One of the bitmaps is NULL or is uninitialized.
> - */
> -static inline int dlb_bitmap_or(struct dlb_bitmap *dest,
> -				struct dlb_bitmap *src1,
> -				struct dlb_bitmap *src2)
> -{
> -	unsigned int i, min;
> -	int numset = 0;
> -
> -	if (dest  == NULL || dest->map == NULL ||
> -	    src1 == NULL || src1->map == NULL ||
> -	    src2  == NULL || src2->map == NULL)
> -		return -EINVAL;
> -
> -	min = dest->len;
> -	min = (min > src1->len) ? src1->len : min;
> -	min = (min > src2->len) ? src2->len : min;
> -
> -	for (i = 0; i != min; i++) {
> -		if  (rte_bitmap_get(src1->map, i) ||
> -				rte_bitmap_get(src2->map, i)) {
> -			rte_bitmap_set(dest->map, i);
> -			numset++;
> -		} else
> -			rte_bitmap_clear(dest->map, i);
> -	}
> -
> -	return numset;
> -}
> -
> -#endif /*  __DLB_OSDEP_BITMAP_H__ */
> diff --git a/drivers/event/dlb/pf/base/dlb_osdep_list.h
> b/drivers/event/dlb/pf/base/dlb_osdep_list.h
> deleted file mode 100644
> index a53b3626e..000000000
> --- a/drivers/event/dlb/pf/base/dlb_osdep_list.h
> +++ /dev/null
> @@ -1,131 +0,0 @@
> -/* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2016-2020 Intel Corporation
> - */
> -
> -#ifndef __DLB_OSDEP_LIST_H__
> -#define __DLB_OSDEP_LIST_H__
> -
> -#include <rte_tailq.h>
> -
> -struct dlb_list_entry {
> -	TAILQ_ENTRY(dlb_list_entry) node;
> -};
> -
> -/* Dummy - just a struct definition */
> -TAILQ_HEAD(dlb_list_head, dlb_list_entry);
> -
> -/* =================
> - * TAILQ Supplements
> - * =================
> - */
> -
> -#ifndef TAILQ_FOREACH_ENTRY
> -#define TAILQ_FOREACH_ENTRY(ptr, head, name, iter)		\
> -	for ((iter) = TAILQ_FIRST(&head);			\
> -	    (iter)						\
> -		&& (ptr = container_of(iter, typeof(*(ptr)), name)); \
> -	    (iter) = TAILQ_NEXT((iter), node))
> -#endif
> -
> -#ifndef TAILQ_FOREACH_ENTRY_SAFE
> -#define TAILQ_FOREACH_ENTRY_SAFE(ptr, head, name, iter, tvar)	\
> -	for ((iter) = TAILQ_FIRST(&head);			\
> -	    (iter) &&						\
> -		(ptr = container_of(iter, typeof(*(ptr)), name)) &&\
> -		((tvar) = TAILQ_NEXT((iter), node), 1);	\
> -	    (iter) = (tvar))
> -#endif
> -
> -/* =========
> - * DLB Lists
> - * =========
> - */
> -
> -/**
> - * dlb_list_init_head() - initialize the head of a list
> - * @head: list head
> - */
> -static inline void dlb_list_init_head(struct dlb_list_head *head)
> -{
> -	TAILQ_INIT(head);
> -}
> -
> -/**
> - * dlb_list_add() - add an entry to a list
> - * @head: new entry will be added after this list header
> - * @entry: new list entry to be added
> - */
> -static inline void dlb_list_add(struct dlb_list_head *head,
> -				struct dlb_list_entry *entry)
> -{
> -	TAILQ_INSERT_TAIL(head, entry, node);
> -}
> -
> -/**
> - * @head: list head
> - * @entry: list entry to be deleted
> - */
> -static inline void dlb_list_del(struct dlb_list_head *head,
> -				struct dlb_list_entry *entry)
> -{
> -	TAILQ_REMOVE(head, entry, node);
> -}
> -
> -/**
> - * dlb_list_empty() - check if a list is empty
> - * @head: list head
> - *
> - * Return:
> - * Returns 1 if empty, 0 if not.
> - */
> -static inline bool dlb_list_empty(struct dlb_list_head *head)
> -{
> -	return TAILQ_EMPTY(head);
> -}
> -
> -/**
> - * dlb_list_empty() - check if a list is empty
> - * @src_head: list to be added
> - * @ head: where src_head will be inserted
> - */
> -static inline void dlb_list_splice(struct dlb_list_head *src_head,
> -				   struct dlb_list_head *head)
> -{
> -	TAILQ_CONCAT(head, src_head, node);
> -}
> -
> -/**
> - * DLB_LIST_HEAD() - retrieve the head of the list
> - * @head: list head
> - * @type: type of the list variable
> - * @name: name of the dlb_list within the struct
> - */
> -#define DLB_LIST_HEAD(head, type, name)				\
> -	(TAILQ_FIRST(&head) ?					\
> -		container_of(TAILQ_FIRST(&head), type, name) :	\
> -		NULL)
> -
> -/**
> - * DLB_LIST_FOR_EACH() - iterate over a list
> - * @head: list head
> - * @ptr: pointer to struct containing a struct dlb_list_entry
> - * @name: name of the dlb_list_entry field within the containing struct
> - * @iter: iterator variable
> - */
> -#define DLB_LIST_FOR_EACH(head, ptr, name, tmp_iter) \
> -	TAILQ_FOREACH_ENTRY(ptr, head, name, tmp_iter)
> -
> -/**
> - * DLB_LIST_FOR_EACH_SAFE() - iterate over a list. This loop works even if
> - * an element is removed from the list while processing it.
> - * @ptr: pointer to struct containing a struct dlb_list_entry
> - * @ptr_tmp: pointer to struct containing a struct dlb_list_entry
> (temporary)
> - * @head: list head
> - * @name: name of the dlb_list_entry field within the containing struct
> - * @iter: iterator variable
> - * @iter_tmp: iterator variable (temporary)
> - */
> -#define DLB_LIST_FOR_EACH_SAFE(head, ptr, ptr_tmp, name, tmp_iter,
> saf_iter) \
> -	TAILQ_FOREACH_ENTRY_SAFE(ptr, head, name, tmp_iter, saf_iter)
> -
> -#endif /*  __DLB_OSDEP_LIST_H__ */
> diff --git a/drivers/event/dlb/pf/base/dlb_osdep_types.h
> b/drivers/event/dlb/pf/base/dlb_osdep_types.h
> deleted file mode 100644
> index 2e9d7d8d0..000000000
> --- a/drivers/event/dlb/pf/base/dlb_osdep_types.h
> +++ /dev/null
> @@ -1,31 +0,0 @@
> -/* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2016-2020 Intel Corporation
> - */
> -
> -#ifndef __DLB_OSDEP_TYPES_H
> -#define __DLB_OSDEP_TYPES_H
> -
> -#include <linux/types.h>
> -
> -#include <inttypes.h>
> -#include <ctype.h>
> -#include <stdint.h>
> -#include <stdbool.h>
> -#include <string.h>
> -#include <unistd.h>
> -#include <errno.h>
> -
> -/* Types for user mode PF PMD */
> -typedef uint8_t         u8;
> -typedef int8_t          s8;
> -typedef uint16_t        u16;
> -typedef int16_t         s16;
> -typedef uint32_t        u32;
> -typedef int32_t         s32;
> -typedef uint64_t        u64;
> -
> -#define __iomem
> -
> -/* END types for user mode PF PMD */
> -
> -#endif /* __DLB_OSDEP_TYPES_H */
> diff --git a/drivers/event/dlb/pf/base/dlb_regs.h
> b/drivers/event/dlb/pf/base/dlb_regs.h
> deleted file mode 100644
> index a1c63f336..000000000
> --- a/drivers/event/dlb/pf/base/dlb_regs.h
> +++ /dev/null
> @@ -1,2368 +0,0 @@
> -/* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2016-2020 Intel Corporation
> - */
> -
> -#ifndef __DLB_REGS_H
> -#define __DLB_REGS_H
> -
> -#include "dlb_osdep_types.h"
> -
> -#define DLB_MSIX_MEM_VECTOR_CTRL(x) \
> -	(0x100000c + (x) * 0x10)
> -#define DLB_MSIX_MEM_VECTOR_CTRL_RST 0x1
> -union dlb_msix_mem_vector_ctrl {
> -	struct {
> -		u32 vec_mask : 1;
> -		u32 rsvd0 : 31;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_TOTAL_VAS 0x124
> -#define DLB_SYS_TOTAL_VAS_RST 0x20
> -union dlb_sys_total_vas {
> -	struct {
> -		u32 total_vas : 32;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_ALARM_PF_SYND2 0x508
> -#define DLB_SYS_ALARM_PF_SYND2_RST 0x0
> -union dlb_sys_alarm_pf_synd2 {
> -	struct {
> -		u32 lock_id : 16;
> -		u32 meas : 1;
> -		u32 debug : 7;
> -		u32 cq_pop : 1;
> -		u32 qe_uhl : 1;
> -		u32 qe_orsp : 1;
> -		u32 qe_valid : 1;
> -		u32 cq_int_rearm : 1;
> -		u32 dsi_error : 1;
> -		u32 rsvd0 : 2;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_ALARM_PF_SYND1 0x504
> -#define DLB_SYS_ALARM_PF_SYND1_RST 0x0
> -union dlb_sys_alarm_pf_synd1 {
> -	struct {
> -		u32 dsi : 16;
> -		u32 qid : 8;
> -		u32 qtype : 2;
> -		u32 qpri : 3;
> -		u32 msg_type : 3;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_ALARM_PF_SYND0 0x500
> -#define DLB_SYS_ALARM_PF_SYND0_RST 0x0
> -union dlb_sys_alarm_pf_synd0 {
> -	struct {
> -		u32 syndrome : 8;
> -		u32 rtype : 2;
> -		u32 rsvd0 : 2;
> -		u32 from_dmv : 1;
> -		u32 is_ldb : 1;
> -		u32 cls : 2;
> -		u32 aid : 6;
> -		u32 unit : 4;
> -		u32 source : 4;
> -		u32 more : 1;
> -		u32 valid : 1;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_LDB_VASQID_V(x) \
> -	(0xf60 + (x) * 0x1000)
> -#define DLB_SYS_LDB_VASQID_V_RST 0x0
> -union dlb_sys_ldb_vasqid_v {
> -	struct {
> -		u32 vasqid_v : 1;
> -		u32 rsvd0 : 31;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_DIR_VASQID_V(x) \
> -	(0xf68 + (x) * 0x1000)
> -#define DLB_SYS_DIR_VASQID_V_RST 0x0
> -union dlb_sys_dir_vasqid_v {
> -	struct {
> -		u32 vasqid_v : 1;
> -		u32 rsvd0 : 31;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_WBUF_DIR_FLAGS(x) \
> -	(0xf70 + (x) * 0x1000)
> -#define DLB_SYS_WBUF_DIR_FLAGS_RST 0x0
> -union dlb_sys_wbuf_dir_flags {
> -	struct {
> -		u32 wb_v : 4;
> -		u32 cl : 1;
> -		u32 busy : 1;
> -		u32 opt : 1;
> -		u32 rsvd0 : 25;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_WBUF_LDB_FLAGS(x) \
> -	(0xf78 + (x) * 0x1000)
> -#define DLB_SYS_WBUF_LDB_FLAGS_RST 0x0
> -union dlb_sys_wbuf_ldb_flags {
> -	struct {
> -		u32 wb_v : 4;
> -		u32 cl : 1;
> -		u32 busy : 1;
> -		u32 rsvd0 : 26;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_LDB_QID_V(x) \
> -	(0x8000034 + (x) * 0x1000)
> -#define DLB_SYS_LDB_QID_V_RST 0x0
> -union dlb_sys_ldb_qid_v {
> -	struct {
> -		u32 qid_v : 1;
> -		u32 rsvd0 : 31;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_LDB_QID_CFG_V(x) \
> -	(0x8000030 + (x) * 0x1000)
> -#define DLB_SYS_LDB_QID_CFG_V_RST 0x0
> -union dlb_sys_ldb_qid_cfg_v {
> -	struct {
> -		u32 sn_cfg_v : 1;
> -		u32 fid_cfg_v : 1;
> -		u32 rsvd0 : 30;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_DIR_QID_V(x) \
> -	(0x8000040 + (x) * 0x1000)
> -#define DLB_SYS_DIR_QID_V_RST 0x0
> -union dlb_sys_dir_qid_v {
> -	struct {
> -		u32 qid_v : 1;
> -		u32 rsvd0 : 31;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_LDB_POOL_ENBLD(x) \
> -	(0x8000070 + (x) * 0x1000)
> -#define DLB_SYS_LDB_POOL_ENBLD_RST 0x0
> -union dlb_sys_ldb_pool_enbld {
> -	struct {
> -		u32 pool_enabled : 1;
> -		u32 rsvd0 : 31;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_DIR_POOL_ENBLD(x) \
> -	(0x8000080 + (x) * 0x1000)
> -#define DLB_SYS_DIR_POOL_ENBLD_RST 0x0
> -union dlb_sys_dir_pool_enbld {
> -	struct {
> -		u32 pool_enabled : 1;
> -		u32 rsvd0 : 31;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_LDB_PP2VPP(x) \
> -	(0x8000090 + (x) * 0x1000)
> -#define DLB_SYS_LDB_PP2VPP_RST 0x0
> -union dlb_sys_ldb_pp2vpp {
> -	struct {
> -		u32 vpp : 6;
> -		u32 rsvd0 : 26;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_DIR_PP2VPP(x) \
> -	(0x8000094 + (x) * 0x1000)
> -#define DLB_SYS_DIR_PP2VPP_RST 0x0
> -union dlb_sys_dir_pp2vpp {
> -	struct {
> -		u32 vpp : 7;
> -		u32 rsvd0 : 25;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_LDB_PP_V(x) \
> -	(0x8000128 + (x) * 0x1000)
> -#define DLB_SYS_LDB_PP_V_RST 0x0
> -union dlb_sys_ldb_pp_v {
> -	struct {
> -		u32 pp_v : 1;
> -		u32 rsvd0 : 31;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_LDB_CQ_ISR(x) \
> -	(0x8000124 + (x) * 0x1000)
> -#define DLB_SYS_LDB_CQ_ISR_RST 0x0
> -/* CQ Interrupt Modes */
> -#define DLB_CQ_ISR_MODE_DIS  0
> -#define DLB_CQ_ISR_MODE_MSI  1
> -#define DLB_CQ_ISR_MODE_MSIX 2
> -union dlb_sys_ldb_cq_isr {
> -	struct {
> -		u32 vector : 6;
> -		u32 vf : 4;
> -		u32 en_code : 2;
> -		u32 rsvd0 : 20;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_LDB_CQ2VF_PF(x) \
> -	(0x8000120 + (x) * 0x1000)
> -#define DLB_SYS_LDB_CQ2VF_PF_RST 0x0
> -union dlb_sys_ldb_cq2vf_pf {
> -	struct {
> -		u32 vf : 4;
> -		u32 is_pf : 1;
> -		u32 rsvd0 : 27;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_LDB_PP2VAS(x) \
> -	(0x800011c + (x) * 0x1000)
> -#define DLB_SYS_LDB_PP2VAS_RST 0x0
> -union dlb_sys_ldb_pp2vas {
> -	struct {
> -		u32 vas : 5;
> -		u32 rsvd0 : 27;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_LDB_PP2LDBPOOL(x) \
> -	(0x8000118 + (x) * 0x1000)
> -#define DLB_SYS_LDB_PP2LDBPOOL_RST 0x0
> -union dlb_sys_ldb_pp2ldbpool {
> -	struct {
> -		u32 ldbpool : 6;
> -		u32 rsvd0 : 26;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_LDB_PP2DIRPOOL(x) \
> -	(0x8000114 + (x) * 0x1000)
> -#define DLB_SYS_LDB_PP2DIRPOOL_RST 0x0
> -union dlb_sys_ldb_pp2dirpool {
> -	struct {
> -		u32 dirpool : 6;
> -		u32 rsvd0 : 26;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_LDB_PP2VF_PF(x) \
> -	(0x8000110 + (x) * 0x1000)
> -#define DLB_SYS_LDB_PP2VF_PF_RST 0x0
> -union dlb_sys_ldb_pp2vf_pf {
> -	struct {
> -		u32 vf : 4;
> -		u32 is_pf : 1;
> -		u32 rsvd0 : 27;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_LDB_PP_ADDR_U(x) \
> -	(0x800010c + (x) * 0x1000)
> -#define DLB_SYS_LDB_PP_ADDR_U_RST 0x0
> -union dlb_sys_ldb_pp_addr_u {
> -	struct {
> -		u32 addr_u : 32;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_LDB_PP_ADDR_L(x) \
> -	(0x8000108 + (x) * 0x1000)
> -#define DLB_SYS_LDB_PP_ADDR_L_RST 0x0
> -union dlb_sys_ldb_pp_addr_l {
> -	struct {
> -		u32 rsvd0 : 7;
> -		u32 addr_l : 25;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_LDB_CQ_ADDR_U(x) \
> -	(0x8000104 + (x) * 0x1000)
> -#define DLB_SYS_LDB_CQ_ADDR_U_RST 0x0
> -union dlb_sys_ldb_cq_addr_u {
> -	struct {
> -		u32 addr_u : 32;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_LDB_CQ_ADDR_L(x) \
> -	(0x8000100 + (x) * 0x1000)
> -#define DLB_SYS_LDB_CQ_ADDR_L_RST 0x0
> -union dlb_sys_ldb_cq_addr_l {
> -	struct {
> -		u32 rsvd0 : 6;
> -		u32 addr_l : 26;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_DIR_PP_V(x) \
> -	(0x8000228 + (x) * 0x1000)
> -#define DLB_SYS_DIR_PP_V_RST 0x0
> -union dlb_sys_dir_pp_v {
> -	struct {
> -		u32 pp_v : 1;
> -		u32 mb_dm : 1;
> -		u32 rsvd0 : 30;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_DIR_CQ_ISR(x) \
> -	(0x8000224 + (x) * 0x1000)
> -#define DLB_SYS_DIR_CQ_ISR_RST 0x0
> -union dlb_sys_dir_cq_isr {
> -	struct {
> -		u32 vector : 6;
> -		u32 vf : 4;
> -		u32 en_code : 2;
> -		u32 rsvd0 : 20;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_DIR_CQ2VF_PF(x) \
> -	(0x8000220 + (x) * 0x1000)
> -#define DLB_SYS_DIR_CQ2VF_PF_RST 0x0
> -union dlb_sys_dir_cq2vf_pf {
> -	struct {
> -		u32 vf : 4;
> -		u32 is_pf : 1;
> -		u32 rsvd0 : 27;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_DIR_PP2VAS(x) \
> -	(0x800021c + (x) * 0x1000)
> -#define DLB_SYS_DIR_PP2VAS_RST 0x0
> -union dlb_sys_dir_pp2vas {
> -	struct {
> -		u32 vas : 5;
> -		u32 rsvd0 : 27;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_DIR_PP2LDBPOOL(x) \
> -	(0x8000218 + (x) * 0x1000)
> -#define DLB_SYS_DIR_PP2LDBPOOL_RST 0x0
> -union dlb_sys_dir_pp2ldbpool {
> -	struct {
> -		u32 ldbpool : 6;
> -		u32 rsvd0 : 26;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_DIR_PP2DIRPOOL(x) \
> -	(0x8000214 + (x) * 0x1000)
> -#define DLB_SYS_DIR_PP2DIRPOOL_RST 0x0
> -union dlb_sys_dir_pp2dirpool {
> -	struct {
> -		u32 dirpool : 6;
> -		u32 rsvd0 : 26;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_DIR_PP2VF_PF(x) \
> -	(0x8000210 + (x) * 0x1000)
> -#define DLB_SYS_DIR_PP2VF_PF_RST 0x0
> -union dlb_sys_dir_pp2vf_pf {
> -	struct {
> -		u32 vf : 4;
> -		u32 is_pf : 1;
> -		u32 is_hw_dsi : 1;
> -		u32 rsvd0 : 26;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_DIR_PP_ADDR_U(x) \
> -	(0x800020c + (x) * 0x1000)
> -#define DLB_SYS_DIR_PP_ADDR_U_RST 0x0
> -union dlb_sys_dir_pp_addr_u {
> -	struct {
> -		u32 addr_u : 32;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_DIR_PP_ADDR_L(x) \
> -	(0x8000208 + (x) * 0x1000)
> -#define DLB_SYS_DIR_PP_ADDR_L_RST 0x0
> -union dlb_sys_dir_pp_addr_l {
> -	struct {
> -		u32 rsvd0 : 7;
> -		u32 addr_l : 25;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_DIR_CQ_ADDR_U(x) \
> -	(0x8000204 + (x) * 0x1000)
> -#define DLB_SYS_DIR_CQ_ADDR_U_RST 0x0
> -union dlb_sys_dir_cq_addr_u {
> -	struct {
> -		u32 addr_u : 32;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_DIR_CQ_ADDR_L(x) \
> -	(0x8000200 + (x) * 0x1000)
> -#define DLB_SYS_DIR_CQ_ADDR_L_RST 0x0
> -union dlb_sys_dir_cq_addr_l {
> -	struct {
> -		u32 rsvd0 : 6;
> -		u32 addr_l : 26;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_INGRESS_ALARM_ENBL 0x300
> -#define DLB_SYS_INGRESS_ALARM_ENBL_RST 0x0
> -union dlb_sys_ingress_alarm_enbl {
> -	struct {
> -		u32 illegal_hcw : 1;
> -		u32 illegal_pp : 1;
> -		u32 disabled_pp : 1;
> -		u32 illegal_qid : 1;
> -		u32 disabled_qid : 1;
> -		u32 illegal_ldb_qid_cfg : 1;
> -		u32 illegal_cqid : 1;
> -		u32 rsvd0 : 25;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_CQ_MODE 0x30c
> -#define DLB_SYS_CQ_MODE_RST 0x0
> -union dlb_sys_cq_mode {
> -	struct {
> -		u32 ldb_cq64 : 1;
> -		u32 dir_cq64 : 1;
> -		u32 rsvd0 : 30;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_MSIX_ACK 0x400
> -#define DLB_SYS_MSIX_ACK_RST 0x0
> -union dlb_sys_msix_ack {
> -	struct {
> -		u32 msix_0_ack : 1;
> -		u32 msix_1_ack : 1;
> -		u32 msix_2_ack : 1;
> -		u32 msix_3_ack : 1;
> -		u32 msix_4_ack : 1;
> -		u32 msix_5_ack : 1;
> -		u32 msix_6_ack : 1;
> -		u32 msix_7_ack : 1;
> -		u32 msix_8_ack : 1;
> -		u32 rsvd0 : 23;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_MSIX_PASSTHRU 0x404
> -#define DLB_SYS_MSIX_PASSTHRU_RST 0x0
> -union dlb_sys_msix_passthru {
> -	struct {
> -		u32 msix_0_passthru : 1;
> -		u32 msix_1_passthru : 1;
> -		u32 msix_2_passthru : 1;
> -		u32 msix_3_passthru : 1;
> -		u32 msix_4_passthru : 1;
> -		u32 msix_5_passthru : 1;
> -		u32 msix_6_passthru : 1;
> -		u32 msix_7_passthru : 1;
> -		u32 msix_8_passthru : 1;
> -		u32 rsvd0 : 23;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_MSIX_MODE 0x408
> -#define DLB_SYS_MSIX_MODE_RST 0x0
> -/* MSI-X Modes */
> -#define DLB_MSIX_MODE_PACKED     0
> -#define DLB_MSIX_MODE_COMPRESSED 1
> -union dlb_sys_msix_mode {
> -	struct {
> -		u32 mode : 1;
> -		u32 rsvd0 : 31;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_DIR_CQ_31_0_OCC_INT_STS 0x440
> -#define DLB_SYS_DIR_CQ_31_0_OCC_INT_STS_RST 0x0
> -union dlb_sys_dir_cq_31_0_occ_int_sts {
> -	struct {
> -		u32 cq_0_occ_int : 1;
> -		u32 cq_1_occ_int : 1;
> -		u32 cq_2_occ_int : 1;
> -		u32 cq_3_occ_int : 1;
> -		u32 cq_4_occ_int : 1;
> -		u32 cq_5_occ_int : 1;
> -		u32 cq_6_occ_int : 1;
> -		u32 cq_7_occ_int : 1;
> -		u32 cq_8_occ_int : 1;
> -		u32 cq_9_occ_int : 1;
> -		u32 cq_10_occ_int : 1;
> -		u32 cq_11_occ_int : 1;
> -		u32 cq_12_occ_int : 1;
> -		u32 cq_13_occ_int : 1;
> -		u32 cq_14_occ_int : 1;
> -		u32 cq_15_occ_int : 1;
> -		u32 cq_16_occ_int : 1;
> -		u32 cq_17_occ_int : 1;
> -		u32 cq_18_occ_int : 1;
> -		u32 cq_19_occ_int : 1;
> -		u32 cq_20_occ_int : 1;
> -		u32 cq_21_occ_int : 1;
> -		u32 cq_22_occ_int : 1;
> -		u32 cq_23_occ_int : 1;
> -		u32 cq_24_occ_int : 1;
> -		u32 cq_25_occ_int : 1;
> -		u32 cq_26_occ_int : 1;
> -		u32 cq_27_occ_int : 1;
> -		u32 cq_28_occ_int : 1;
> -		u32 cq_29_occ_int : 1;
> -		u32 cq_30_occ_int : 1;
> -		u32 cq_31_occ_int : 1;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_DIR_CQ_63_32_OCC_INT_STS 0x444
> -#define DLB_SYS_DIR_CQ_63_32_OCC_INT_STS_RST 0x0
> -union dlb_sys_dir_cq_63_32_occ_int_sts {
> -	struct {
> -		u32 cq_32_occ_int : 1;
> -		u32 cq_33_occ_int : 1;
> -		u32 cq_34_occ_int : 1;
> -		u32 cq_35_occ_int : 1;
> -		u32 cq_36_occ_int : 1;
> -		u32 cq_37_occ_int : 1;
> -		u32 cq_38_occ_int : 1;
> -		u32 cq_39_occ_int : 1;
> -		u32 cq_40_occ_int : 1;
> -		u32 cq_41_occ_int : 1;
> -		u32 cq_42_occ_int : 1;
> -		u32 cq_43_occ_int : 1;
> -		u32 cq_44_occ_int : 1;
> -		u32 cq_45_occ_int : 1;
> -		u32 cq_46_occ_int : 1;
> -		u32 cq_47_occ_int : 1;
> -		u32 cq_48_occ_int : 1;
> -		u32 cq_49_occ_int : 1;
> -		u32 cq_50_occ_int : 1;
> -		u32 cq_51_occ_int : 1;
> -		u32 cq_52_occ_int : 1;
> -		u32 cq_53_occ_int : 1;
> -		u32 cq_54_occ_int : 1;
> -		u32 cq_55_occ_int : 1;
> -		u32 cq_56_occ_int : 1;
> -		u32 cq_57_occ_int : 1;
> -		u32 cq_58_occ_int : 1;
> -		u32 cq_59_occ_int : 1;
> -		u32 cq_60_occ_int : 1;
> -		u32 cq_61_occ_int : 1;
> -		u32 cq_62_occ_int : 1;
> -		u32 cq_63_occ_int : 1;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_DIR_CQ_95_64_OCC_INT_STS 0x448
> -#define DLB_SYS_DIR_CQ_95_64_OCC_INT_STS_RST 0x0
> -union dlb_sys_dir_cq_95_64_occ_int_sts {
> -	struct {
> -		u32 cq_64_occ_int : 1;
> -		u32 cq_65_occ_int : 1;
> -		u32 cq_66_occ_int : 1;
> -		u32 cq_67_occ_int : 1;
> -		u32 cq_68_occ_int : 1;
> -		u32 cq_69_occ_int : 1;
> -		u32 cq_70_occ_int : 1;
> -		u32 cq_71_occ_int : 1;
> -		u32 cq_72_occ_int : 1;
> -		u32 cq_73_occ_int : 1;
> -		u32 cq_74_occ_int : 1;
> -		u32 cq_75_occ_int : 1;
> -		u32 cq_76_occ_int : 1;
> -		u32 cq_77_occ_int : 1;
> -		u32 cq_78_occ_int : 1;
> -		u32 cq_79_occ_int : 1;
> -		u32 cq_80_occ_int : 1;
> -		u32 cq_81_occ_int : 1;
> -		u32 cq_82_occ_int : 1;
> -		u32 cq_83_occ_int : 1;
> -		u32 cq_84_occ_int : 1;
> -		u32 cq_85_occ_int : 1;
> -		u32 cq_86_occ_int : 1;
> -		u32 cq_87_occ_int : 1;
> -		u32 cq_88_occ_int : 1;
> -		u32 cq_89_occ_int : 1;
> -		u32 cq_90_occ_int : 1;
> -		u32 cq_91_occ_int : 1;
> -		u32 cq_92_occ_int : 1;
> -		u32 cq_93_occ_int : 1;
> -		u32 cq_94_occ_int : 1;
> -		u32 cq_95_occ_int : 1;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_DIR_CQ_127_96_OCC_INT_STS 0x44c
> -#define DLB_SYS_DIR_CQ_127_96_OCC_INT_STS_RST 0x0
> -union dlb_sys_dir_cq_127_96_occ_int_sts {
> -	struct {
> -		u32 cq_96_occ_int : 1;
> -		u32 cq_97_occ_int : 1;
> -		u32 cq_98_occ_int : 1;
> -		u32 cq_99_occ_int : 1;
> -		u32 cq_100_occ_int : 1;
> -		u32 cq_101_occ_int : 1;
> -		u32 cq_102_occ_int : 1;
> -		u32 cq_103_occ_int : 1;
> -		u32 cq_104_occ_int : 1;
> -		u32 cq_105_occ_int : 1;
> -		u32 cq_106_occ_int : 1;
> -		u32 cq_107_occ_int : 1;
> -		u32 cq_108_occ_int : 1;
> -		u32 cq_109_occ_int : 1;
> -		u32 cq_110_occ_int : 1;
> -		u32 cq_111_occ_int : 1;
> -		u32 cq_112_occ_int : 1;
> -		u32 cq_113_occ_int : 1;
> -		u32 cq_114_occ_int : 1;
> -		u32 cq_115_occ_int : 1;
> -		u32 cq_116_occ_int : 1;
> -		u32 cq_117_occ_int : 1;
> -		u32 cq_118_occ_int : 1;
> -		u32 cq_119_occ_int : 1;
> -		u32 cq_120_occ_int : 1;
> -		u32 cq_121_occ_int : 1;
> -		u32 cq_122_occ_int : 1;
> -		u32 cq_123_occ_int : 1;
> -		u32 cq_124_occ_int : 1;
> -		u32 cq_125_occ_int : 1;
> -		u32 cq_126_occ_int : 1;
> -		u32 cq_127_occ_int : 1;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_LDB_CQ_31_0_OCC_INT_STS 0x460
> -#define DLB_SYS_LDB_CQ_31_0_OCC_INT_STS_RST 0x0
> -union dlb_sys_ldb_cq_31_0_occ_int_sts {
> -	struct {
> -		u32 cq_0_occ_int : 1;
> -		u32 cq_1_occ_int : 1;
> -		u32 cq_2_occ_int : 1;
> -		u32 cq_3_occ_int : 1;
> -		u32 cq_4_occ_int : 1;
> -		u32 cq_5_occ_int : 1;
> -		u32 cq_6_occ_int : 1;
> -		u32 cq_7_occ_int : 1;
> -		u32 cq_8_occ_int : 1;
> -		u32 cq_9_occ_int : 1;
> -		u32 cq_10_occ_int : 1;
> -		u32 cq_11_occ_int : 1;
> -		u32 cq_12_occ_int : 1;
> -		u32 cq_13_occ_int : 1;
> -		u32 cq_14_occ_int : 1;
> -		u32 cq_15_occ_int : 1;
> -		u32 cq_16_occ_int : 1;
> -		u32 cq_17_occ_int : 1;
> -		u32 cq_18_occ_int : 1;
> -		u32 cq_19_occ_int : 1;
> -		u32 cq_20_occ_int : 1;
> -		u32 cq_21_occ_int : 1;
> -		u32 cq_22_occ_int : 1;
> -		u32 cq_23_occ_int : 1;
> -		u32 cq_24_occ_int : 1;
> -		u32 cq_25_occ_int : 1;
> -		u32 cq_26_occ_int : 1;
> -		u32 cq_27_occ_int : 1;
> -		u32 cq_28_occ_int : 1;
> -		u32 cq_29_occ_int : 1;
> -		u32 cq_30_occ_int : 1;
> -		u32 cq_31_occ_int : 1;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_LDB_CQ_63_32_OCC_INT_STS 0x464
> -#define DLB_SYS_LDB_CQ_63_32_OCC_INT_STS_RST 0x0
> -union dlb_sys_ldb_cq_63_32_occ_int_sts {
> -	struct {
> -		u32 cq_32_occ_int : 1;
> -		u32 cq_33_occ_int : 1;
> -		u32 cq_34_occ_int : 1;
> -		u32 cq_35_occ_int : 1;
> -		u32 cq_36_occ_int : 1;
> -		u32 cq_37_occ_int : 1;
> -		u32 cq_38_occ_int : 1;
> -		u32 cq_39_occ_int : 1;
> -		u32 cq_40_occ_int : 1;
> -		u32 cq_41_occ_int : 1;
> -		u32 cq_42_occ_int : 1;
> -		u32 cq_43_occ_int : 1;
> -		u32 cq_44_occ_int : 1;
> -		u32 cq_45_occ_int : 1;
> -		u32 cq_46_occ_int : 1;
> -		u32 cq_47_occ_int : 1;
> -		u32 cq_48_occ_int : 1;
> -		u32 cq_49_occ_int : 1;
> -		u32 cq_50_occ_int : 1;
> -		u32 cq_51_occ_int : 1;
> -		u32 cq_52_occ_int : 1;
> -		u32 cq_53_occ_int : 1;
> -		u32 cq_54_occ_int : 1;
> -		u32 cq_55_occ_int : 1;
> -		u32 cq_56_occ_int : 1;
> -		u32 cq_57_occ_int : 1;
> -		u32 cq_58_occ_int : 1;
> -		u32 cq_59_occ_int : 1;
> -		u32 cq_60_occ_int : 1;
> -		u32 cq_61_occ_int : 1;
> -		u32 cq_62_occ_int : 1;
> -		u32 cq_63_occ_int : 1;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_ALARM_HW_SYND 0x50c
> -#define DLB_SYS_ALARM_HW_SYND_RST 0x0
> -union dlb_sys_alarm_hw_synd {
> -	struct {
> -		u32 syndrome : 8;
> -		u32 rtype : 2;
> -		u32 rsvd0 : 2;
> -		u32 from_dmv : 1;
> -		u32 is_ldb : 1;
> -		u32 cls : 2;
> -		u32 aid : 6;
> -		u32 unit : 4;
> -		u32 source : 4;
> -		u32 more : 1;
> -		u32 valid : 1;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_SYS_SYS_ALARM_INT_ENABLE 0xc001048
> -#define DLB_SYS_SYS_ALARM_INT_ENABLE_RST 0x7fffff
> -union dlb_sys_sys_alarm_int_enable {
> -	struct {
> -		u32 cq_addr_overflow_error : 1;
> -		u32 ingress_perr : 1;
> -		u32 egress_perr : 1;
> -		u32 alarm_perr : 1;
> -		u32 vf_to_pf_isr_pend_error : 1;
> -		u32 pf_to_vf_isr_pend_error : 1;
> -		u32 timeout_error : 1;
> -		u32 dmvw_sm_error : 1;
> -		u32 pptr_sm_par_error : 1;
> -		u32 pptr_sm_len_error : 1;
> -		u32 sch_sm_error : 1;
> -		u32 wbuf_flag_error : 1;
> -		u32 dmvw_cl_error : 1;
> -		u32 dmvr_cl_error : 1;
> -		u32 cmpl_data_error : 1;
> -		u32 cmpl_error : 1;
> -		u32 fifo_underflow : 1;
> -		u32 fifo_overflow : 1;
> -		u32 sb_ep_parity_err : 1;
> -		u32 ti_parity_err : 1;
> -		u32 ri_parity_err : 1;
> -		u32 cfgm_ppw_err : 1;
> -		u32 system_csr_perr : 1;
> -		u32 rsvd0 : 9;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_CQ_LDB_TOT_SCH_CNT_CTRL(x) \
> -	(0x20000000 + (x) * 0x1000)
> -#define DLB_LSP_CQ_LDB_TOT_SCH_CNT_CTRL_RST 0x0
> -union dlb_lsp_cq_ldb_tot_sch_cnt_ctrl {
> -	struct {
> -		u32 count : 32;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_CQ_LDB_DSBL(x) \
> -	(0x20000124 + (x) * 0x1000)
> -#define DLB_LSP_CQ_LDB_DSBL_RST 0x1
> -union dlb_lsp_cq_ldb_dsbl {
> -	struct {
> -		u32 disabled : 1;
> -		u32 rsvd0 : 31;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_CQ_LDB_TOT_SCH_CNTH(x) \
> -	(0x20000120 + (x) * 0x1000)
> -#define DLB_LSP_CQ_LDB_TOT_SCH_CNTH_RST 0x0
> -union dlb_lsp_cq_ldb_tot_sch_cnth {
> -	struct {
> -		u32 count : 32;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_CQ_LDB_TOT_SCH_CNTL(x) \
> -	(0x2000011c + (x) * 0x1000)
> -#define DLB_LSP_CQ_LDB_TOT_SCH_CNTL_RST 0x0
> -union dlb_lsp_cq_ldb_tot_sch_cntl {
> -	struct {
> -		u32 count : 32;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_CQ_LDB_TKN_DEPTH_SEL(x) \
> -	(0x20000118 + (x) * 0x1000)
> -#define DLB_LSP_CQ_LDB_TKN_DEPTH_SEL_RST 0x0
> -union dlb_lsp_cq_ldb_tkn_depth_sel {
> -	struct {
> -		u32 token_depth_select : 4;
> -		u32 ignore_depth : 1;
> -		u32 enab_shallow_cq : 1;
> -		u32 rsvd0 : 26;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_CQ_LDB_TKN_CNT(x) \
> -	(0x20000114 + (x) * 0x1000)
> -#define DLB_LSP_CQ_LDB_TKN_CNT_RST 0x0
> -union dlb_lsp_cq_ldb_tkn_cnt {
> -	struct {
> -		u32 token_count : 11;
> -		u32 rsvd0 : 21;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_CQ_LDB_INFL_LIM(x) \
> -	(0x20000110 + (x) * 0x1000)
> -#define DLB_LSP_CQ_LDB_INFL_LIM_RST 0x0
> -union dlb_lsp_cq_ldb_infl_lim {
> -	struct {
> -		u32 limit : 13;
> -		u32 rsvd0 : 19;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_CQ_LDB_INFL_CNT(x) \
> -	(0x2000010c + (x) * 0x1000)
> -#define DLB_LSP_CQ_LDB_INFL_CNT_RST 0x0
> -union dlb_lsp_cq_ldb_infl_cnt {
> -	struct {
> -		u32 count : 13;
> -		u32 rsvd0 : 19;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_CQ2QID(x, y) \
> -	(0x20000104 + (x) * 0x1000 + (y) * 0x4)
> -#define DLB_LSP_CQ2QID_RST 0x0
> -union dlb_lsp_cq2qid {
> -	struct {
> -		u32 qid_p0 : 7;
> -		u32 rsvd3 : 1;
> -		u32 qid_p1 : 7;
> -		u32 rsvd2 : 1;
> -		u32 qid_p2 : 7;
> -		u32 rsvd1 : 1;
> -		u32 qid_p3 : 7;
> -		u32 rsvd0 : 1;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_CQ2PRIOV(x) \
> -	(0x20000100 + (x) * 0x1000)
> -#define DLB_LSP_CQ2PRIOV_RST 0x0
> -union dlb_lsp_cq2priov {
> -	struct {
> -		u32 prio : 24;
> -		u32 v : 8;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_CQ_DIR_DSBL(x) \
> -	(0x20000310 + (x) * 0x1000)
> -#define DLB_LSP_CQ_DIR_DSBL_RST 0x1
> -union dlb_lsp_cq_dir_dsbl {
> -	struct {
> -		u32 disabled : 1;
> -		u32 rsvd0 : 31;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(x) \
> -	(0x2000030c + (x) * 0x1000)
> -#define DLB_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST 0x0
> -union dlb_lsp_cq_dir_tkn_depth_sel_dsi {
> -	struct {
> -		u32 token_depth_select : 4;
> -		u32 disable_wb_opt : 1;
> -		u32 ignore_depth : 1;
> -		u32 rsvd0 : 26;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_CQ_DIR_TOT_SCH_CNTH(x) \
> -	(0x20000308 + (x) * 0x1000)
> -#define DLB_LSP_CQ_DIR_TOT_SCH_CNTH_RST 0x0
> -union dlb_lsp_cq_dir_tot_sch_cnth {
> -	struct {
> -		u32 count : 32;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_CQ_DIR_TOT_SCH_CNTL(x) \
> -	(0x20000304 + (x) * 0x1000)
> -#define DLB_LSP_CQ_DIR_TOT_SCH_CNTL_RST 0x0
> -union dlb_lsp_cq_dir_tot_sch_cntl {
> -	struct {
> -		u32 count : 32;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_CQ_DIR_TKN_CNT(x) \
> -	(0x20000300 + (x) * 0x1000)
> -#define DLB_LSP_CQ_DIR_TKN_CNT_RST 0x0
> -union dlb_lsp_cq_dir_tkn_cnt {
> -	struct {
> -		u32 count : 11;
> -		u32 rsvd0 : 21;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_QID_LDB_QID2CQIDX(x, y) \
> -	(0x20000400 + (x) * 0x1000 + (y) * 0x4)
> -#define DLB_LSP_QID_LDB_QID2CQIDX_RST 0x0
> -union dlb_lsp_qid_ldb_qid2cqidx {
> -	struct {
> -		u32 cq_p0 : 8;
> -		u32 cq_p1 : 8;
> -		u32 cq_p2 : 8;
> -		u32 cq_p3 : 8;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_QID_LDB_QID2CQIDX2(x, y) \
> -	(0x20000500 + (x) * 0x1000 + (y) * 0x4)
> -#define DLB_LSP_QID_LDB_QID2CQIDX2_RST 0x0
> -union dlb_lsp_qid_ldb_qid2cqidx2 {
> -	struct {
> -		u32 cq_p0 : 8;
> -		u32 cq_p1 : 8;
> -		u32 cq_p2 : 8;
> -		u32 cq_p3 : 8;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_QID_ATQ_ENQUEUE_CNT(x) \
> -	(0x2000066c + (x) * 0x1000)
> -#define DLB_LSP_QID_ATQ_ENQUEUE_CNT_RST 0x0
> -union dlb_lsp_qid_atq_enqueue_cnt {
> -	struct {
> -		u32 count : 15;
> -		u32 rsvd0 : 17;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_QID_LDB_INFL_LIM(x) \
> -	(0x2000064c + (x) * 0x1000)
> -#define DLB_LSP_QID_LDB_INFL_LIM_RST 0x0
> -union dlb_lsp_qid_ldb_infl_lim {
> -	struct {
> -		u32 limit : 13;
> -		u32 rsvd0 : 19;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_QID_LDB_INFL_CNT(x) \
> -	(0x2000062c + (x) * 0x1000)
> -#define DLB_LSP_QID_LDB_INFL_CNT_RST 0x0
> -union dlb_lsp_qid_ldb_infl_cnt {
> -	struct {
> -		u32 count : 13;
> -		u32 rsvd0 : 19;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_QID_AQED_ACTIVE_LIM(x) \
> -	(0x20000628 + (x) * 0x1000)
> -#define DLB_LSP_QID_AQED_ACTIVE_LIM_RST 0x0
> -union dlb_lsp_qid_aqed_active_lim {
> -	struct {
> -		u32 limit : 12;
> -		u32 rsvd0 : 20;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_QID_AQED_ACTIVE_CNT(x) \
> -	(0x20000624 + (x) * 0x1000)
> -#define DLB_LSP_QID_AQED_ACTIVE_CNT_RST 0x0
> -union dlb_lsp_qid_aqed_active_cnt {
> -	struct {
> -		u32 count : 12;
> -		u32 rsvd0 : 20;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_QID_LDB_ENQUEUE_CNT(x) \
> -	(0x20000604 + (x) * 0x1000)
> -#define DLB_LSP_QID_LDB_ENQUEUE_CNT_RST 0x0
> -union dlb_lsp_qid_ldb_enqueue_cnt {
> -	struct {
> -		u32 count : 15;
> -		u32 rsvd0 : 17;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_QID_LDB_REPLAY_CNT(x) \
> -	(0x20000600 + (x) * 0x1000)
> -#define DLB_LSP_QID_LDB_REPLAY_CNT_RST 0x0
> -union dlb_lsp_qid_ldb_replay_cnt {
> -	struct {
> -		u32 count : 15;
> -		u32 rsvd0 : 17;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_QID_DIR_ENQUEUE_CNT(x) \
> -	(0x20000700 + (x) * 0x1000)
> -#define DLB_LSP_QID_DIR_ENQUEUE_CNT_RST 0x0
> -union dlb_lsp_qid_dir_enqueue_cnt {
> -	struct {
> -		u32 count : 13;
> -		u32 rsvd0 : 19;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_CTRL_CONFIG_0 0x2800002c
> -#define DLB_LSP_CTRL_CONFIG_0_RST 0x12cc
> -union dlb_lsp_ctrl_config_0 {
> -	struct {
> -		u32 atm_cq_qid_priority_prot : 1;
> -		u32 ldb_arb_ignore_empty : 1;
> -		u32 ldb_arb_mode : 2;
> -		u32 ldb_arb_threshold : 18;
> -		u32 cfg_cq_sla_upd_always : 1;
> -		u32 cfg_cq_wcn_upd_always : 1;
> -		u32 spare : 8;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_1 0x28000028
> -#define DLB_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_1_RST 0x0
> -union dlb_lsp_cfg_arb_weight_atm_nalb_qid_1 {
> -	struct {
> -		u32 slot4_weight : 8;
> -		u32 slot5_weight : 8;
> -		u32 slot6_weight : 8;
> -		u32 slot7_weight : 8;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0 0x28000024
> -#define DLB_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_RST 0x0
> -union dlb_lsp_cfg_arb_weight_atm_nalb_qid_0 {
> -	struct {
> -		u32 slot0_weight : 8;
> -		u32 slot1_weight : 8;
> -		u32 slot2_weight : 8;
> -		u32 slot3_weight : 8;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_CFG_ARB_WEIGHT_LDB_QID_1 0x28000020
> -#define DLB_LSP_CFG_ARB_WEIGHT_LDB_QID_1_RST 0x0
> -union dlb_lsp_cfg_arb_weight_ldb_qid_1 {
> -	struct {
> -		u32 slot4_weight : 8;
> -		u32 slot5_weight : 8;
> -		u32 slot6_weight : 8;
> -		u32 slot7_weight : 8;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_CFG_ARB_WEIGHT_LDB_QID_0 0x2800001c
> -#define DLB_LSP_CFG_ARB_WEIGHT_LDB_QID_0_RST 0x0
> -union dlb_lsp_cfg_arb_weight_ldb_qid_0 {
> -	struct {
> -		u32 slot0_weight : 8;
> -		u32 slot1_weight : 8;
> -		u32 slot2_weight : 8;
> -		u32 slot3_weight : 8;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_LDB_SCHED_CTRL 0x28100000
> -#define DLB_LSP_LDB_SCHED_CTRL_RST 0x0
> -union dlb_lsp_ldb_sched_ctrl {
> -	struct {
> -		u32 cq : 8;
> -		u32 qidix : 3;
> -		u32 value : 1;
> -		u32 nalb_haswork_v : 1;
> -		u32 rlist_haswork_v : 1;
> -		u32 slist_haswork_v : 1;
> -		u32 inflight_ok_v : 1;
> -		u32 aqed_nfull_v : 1;
> -		u32 spare0 : 15;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_DIR_SCH_CNT_H 0x2820000c
> -#define DLB_LSP_DIR_SCH_CNT_H_RST 0x0
> -union dlb_lsp_dir_sch_cnt_h {
> -	struct {
> -		u32 count : 32;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_DIR_SCH_CNT_L 0x28200008
> -#define DLB_LSP_DIR_SCH_CNT_L_RST 0x0
> -union dlb_lsp_dir_sch_cnt_l {
> -	struct {
> -		u32 count : 32;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_LDB_SCH_CNT_H 0x28200004
> -#define DLB_LSP_LDB_SCH_CNT_H_RST 0x0
> -union dlb_lsp_ldb_sch_cnt_h {
> -	struct {
> -		u32 count : 32;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_LSP_LDB_SCH_CNT_L 0x28200000
> -#define DLB_LSP_LDB_SCH_CNT_L_RST 0x0
> -union dlb_lsp_ldb_sch_cnt_l {
> -	struct {
> -		u32 count : 32;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_DP_DIR_CSR_CTRL 0x38000018
> -#define DLB_DP_DIR_CSR_CTRL_RST 0xc0000000
> -union dlb_dp_dir_csr_ctrl {
> -	struct {
> -		u32 cfg_int_dis : 1;
> -		u32 cfg_int_dis_sbe : 1;
> -		u32 cfg_int_dis_mbe : 1;
> -		u32 spare0 : 27;
> -		u32 cfg_vasr_dis : 1;
> -		u32 cfg_int_dis_synd : 1;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_DIR_1 0x38000014
> -#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_DIR_1_RST 0xfffefdfc
> -union dlb_dp_cfg_ctrl_arb_weights_tqpri_dir_1 {
> -	struct {
> -		u32 pri4 : 8;
> -		u32 pri5 : 8;
> -		u32 pri6 : 8;
> -		u32 pri7 : 8;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_DIR_0 0x38000010
> -#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_DIR_0_RST 0xfbfaf9f8
> -union dlb_dp_cfg_ctrl_arb_weights_tqpri_dir_0 {
> -	struct {
> -		u32 pri0 : 8;
> -		u32 pri1 : 8;
> -		u32 pri2 : 8;
> -		u32 pri3 : 8;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_1 0x3800000c
> -#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_1_RST 0xfffefdfc
> -union dlb_dp_cfg_ctrl_arb_weights_tqpri_replay_1 {
> -	struct {
> -		u32 pri4 : 8;
> -		u32 pri5 : 8;
> -		u32 pri6 : 8;
> -		u32 pri7 : 8;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_0 0x38000008
> -#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_0_RST 0xfbfaf9f8
> -union dlb_dp_cfg_ctrl_arb_weights_tqpri_replay_0 {
> -	struct {
> -		u32 pri0 : 8;
> -		u32 pri1 : 8;
> -		u32 pri2 : 8;
> -		u32 pri3 : 8;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_NALB_PIPE_CTRL_ARB_WEIGHTS_TQPRI_NALB_1 0x6800001c
> -#define DLB_NALB_PIPE_CTRL_ARB_WEIGHTS_TQPRI_NALB_1_RST 0xfffefdfc
> -union dlb_nalb_pipe_ctrl_arb_weights_tqpri_nalb_1 {
> -	struct {
> -		u32 pri4 : 8;
> -		u32 pri5 : 8;
> -		u32 pri6 : 8;
> -		u32 pri7 : 8;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_NALB_PIPE_CTRL_ARB_WEIGHTS_TQPRI_NALB_0 0x68000018
> -#define DLB_NALB_PIPE_CTRL_ARB_WEIGHTS_TQPRI_NALB_0_RST 0xfbfaf9f8
> -union dlb_nalb_pipe_ctrl_arb_weights_tqpri_nalb_0 {
> -	struct {
> -		u32 pri0 : 8;
> -		u32 pri1 : 8;
> -		u32 pri2 : 8;
> -		u32 pri3 : 8;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_ATQ_1 0x68000014
> -#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_ATQ_1_RST 0xfffefdfc
> -union dlb_nalb_pipe_cfg_ctrl_arb_weights_tqpri_atq_1 {
> -	struct {
> -		u32 pri4 : 8;
> -		u32 pri5 : 8;
> -		u32 pri6 : 8;
> -		u32 pri7 : 8;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_ATQ_0 0x68000010
> -#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_ATQ_0_RST 0xfbfaf9f8
> -union dlb_nalb_pipe_cfg_ctrl_arb_weights_tqpri_atq_0 {
> -	struct {
> -		u32 pri0 : 8;
> -		u32 pri1 : 8;
> -		u32 pri2 : 8;
> -		u32 pri3 : 8;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_1 0x6800000c
> -#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_1_RST 0xfffefdfc
> -union dlb_nalb_pipe_cfg_ctrl_arb_weights_tqpri_replay_1 {
> -	struct {
> -		u32 pri4 : 8;
> -		u32 pri5 : 8;
> -		u32 pri6 : 8;
> -		u32 pri7 : 8;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_0 0x68000008
> -#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_0_RST 0xfbfaf9f8
> -union dlb_nalb_pipe_cfg_ctrl_arb_weights_tqpri_replay_0 {
> -	struct {
> -		u32 pri0 : 8;
> -		u32 pri1 : 8;
> -		u32 pri2 : 8;
> -		u32 pri3 : 8;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_ATM_PIPE_QID_LDB_QID2CQIDX(x, y) \
> -	(0x70000000 + (x) * 0x1000 + (y) * 0x4)
> -#define DLB_ATM_PIPE_QID_LDB_QID2CQIDX_RST 0x0
> -union dlb_atm_pipe_qid_ldb_qid2cqidx {
> -	struct {
> -		u32 cq_p0 : 8;
> -		u32 cq_p1 : 8;
> -		u32 cq_p2 : 8;
> -		u32 cq_p3 : 8;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_ATM_PIPE_CFG_CTRL_ARB_WEIGHTS_SCHED_BIN 0x7800000c
> -#define DLB_ATM_PIPE_CFG_CTRL_ARB_WEIGHTS_SCHED_BIN_RST 0xfffefdfc
> -union dlb_atm_pipe_cfg_ctrl_arb_weights_sched_bin {
> -	struct {
> -		u32 bin0 : 8;
> -		u32 bin1 : 8;
> -		u32 bin2 : 8;
> -		u32 bin3 : 8;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_ATM_PIPE_CTRL_ARB_WEIGHTS_RDY_BIN 0x78000008
> -#define DLB_ATM_PIPE_CTRL_ARB_WEIGHTS_RDY_BIN_RST 0xfffefdfc
> -union dlb_atm_pipe_ctrl_arb_weights_rdy_bin {
> -	struct {
> -		u32 bin0 : 8;
> -		u32 bin1 : 8;
> -		u32 bin2 : 8;
> -		u32 bin3 : 8;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_AQED_PIPE_QID_FID_LIM(x) \
> -	(0x80000014 + (x) * 0x1000)
> -#define DLB_AQED_PIPE_QID_FID_LIM_RST 0x7ff
> -union dlb_aqed_pipe_qid_fid_lim {
> -	struct {
> -		u32 qid_fid_limit : 13;
> -		u32 rsvd0 : 19;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_AQED_PIPE_FL_POP_PTR(x) \
> -	(0x80000010 + (x) * 0x1000)
> -#define DLB_AQED_PIPE_FL_POP_PTR_RST 0x0
> -union dlb_aqed_pipe_fl_pop_ptr {
> -	struct {
> -		u32 pop_ptr : 11;
> -		u32 generation : 1;
> -		u32 rsvd0 : 20;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_AQED_PIPE_FL_PUSH_PTR(x) \
> -	(0x8000000c + (x) * 0x1000)
> -#define DLB_AQED_PIPE_FL_PUSH_PTR_RST 0x0
> -union dlb_aqed_pipe_fl_push_ptr {
> -	struct {
> -		u32 push_ptr : 11;
> -		u32 generation : 1;
> -		u32 rsvd0 : 20;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_AQED_PIPE_FL_BASE(x) \
> -	(0x80000008 + (x) * 0x1000)
> -#define DLB_AQED_PIPE_FL_BASE_RST 0x0
> -union dlb_aqed_pipe_fl_base {
> -	struct {
> -		u32 base : 11;
> -		u32 rsvd0 : 21;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_AQED_PIPE_FL_LIM(x) \
> -	(0x80000004 + (x) * 0x1000)
> -#define DLB_AQED_PIPE_FL_LIM_RST 0x800
> -union dlb_aqed_pipe_fl_lim {
> -	struct {
> -		u32 limit : 11;
> -		u32 freelist_disable : 1;
> -		u32 rsvd0 : 20;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_AQED_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_ATM_0 0x88000008
> -#define DLB_AQED_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_ATM_0_RST 0xfffe
> -union dlb_aqed_pipe_cfg_ctrl_arb_weights_tqpri_atm_0 {
> -	struct {
> -		u32 pri0 : 8;
> -		u32 pri1 : 8;
> -		u32 pri2 : 8;
> -		u32 pri3 : 8;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_RO_PIPE_QID2GRPSLT(x) \
> -	(0x90000000 + (x) * 0x1000)
> -#define DLB_RO_PIPE_QID2GRPSLT_RST 0x0
> -union dlb_ro_pipe_qid2grpslt {
> -	struct {
> -		u32 slot : 5;
> -		u32 rsvd1 : 3;
> -		u32 group : 2;
> -		u32 rsvd0 : 22;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_RO_PIPE_GRP_SN_MODE 0x98000008
> -#define DLB_RO_PIPE_GRP_SN_MODE_RST 0x0
> -union dlb_ro_pipe_grp_sn_mode {
> -	struct {
> -		u32 sn_mode_0 : 3;
> -		u32 reserved0 : 5;
> -		u32 sn_mode_1 : 3;
> -		u32 reserved1 : 5;
> -		u32 sn_mode_2 : 3;
> -		u32 reserved2 : 5;
> -		u32 sn_mode_3 : 3;
> -		u32 reserved3 : 5;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_CFG_DIR_PP_SW_ALARM_EN(x) \
> -	(0xa000003c + (x) * 0x1000)
> -#define DLB_CHP_CFG_DIR_PP_SW_ALARM_EN_RST 0x1
> -union dlb_chp_cfg_dir_pp_sw_alarm_en {
> -	struct {
> -		u32 alarm_enable : 1;
> -		u32 rsvd0 : 31;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_CQ_WD_ENB(x) \
> -	(0xa0000038 + (x) * 0x1000)
> -#define DLB_CHP_DIR_CQ_WD_ENB_RST 0x0
> -union dlb_chp_dir_cq_wd_enb {
> -	struct {
> -		u32 wd_enable : 1;
> -		u32 rsvd0 : 31;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_LDB_PP2POOL(x) \
> -	(0xa0000034 + (x) * 0x1000)
> -#define DLB_CHP_DIR_LDB_PP2POOL_RST 0x0
> -union dlb_chp_dir_ldb_pp2pool {
> -	struct {
> -		u32 pool : 6;
> -		u32 rsvd0 : 26;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_DIR_PP2POOL(x) \
> -	(0xa0000030 + (x) * 0x1000)
> -#define DLB_CHP_DIR_DIR_PP2POOL_RST 0x0
> -union dlb_chp_dir_dir_pp2pool {
> -	struct {
> -		u32 pool : 6;
> -		u32 rsvd0 : 26;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_PP_LDB_CRD_CNT(x) \
> -	(0xa000002c + (x) * 0x1000)
> -#define DLB_CHP_DIR_PP_LDB_CRD_CNT_RST 0x0
> -union dlb_chp_dir_pp_ldb_crd_cnt {
> -	struct {
> -		u32 count : 16;
> -		u32 rsvd0 : 16;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_PP_DIR_CRD_CNT(x) \
> -	(0xa0000028 + (x) * 0x1000)
> -#define DLB_CHP_DIR_PP_DIR_CRD_CNT_RST 0x0
> -union dlb_chp_dir_pp_dir_crd_cnt {
> -	struct {
> -		u32 count : 14;
> -		u32 rsvd0 : 18;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_CQ_TMR_THRESHOLD(x) \
> -	(0xa0000024 + (x) * 0x1000)
> -#define DLB_CHP_DIR_CQ_TMR_THRESHOLD_RST 0x0
> -union dlb_chp_dir_cq_tmr_threshold {
> -	struct {
> -		u32 timer_thrsh : 14;
> -		u32 rsvd0 : 18;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_CQ_INT_ENB(x) \
> -	(0xa0000020 + (x) * 0x1000)
> -#define DLB_CHP_DIR_CQ_INT_ENB_RST 0x0
> -union dlb_chp_dir_cq_int_enb {
> -	struct {
> -		u32 en_tim : 1;
> -		u32 en_depth : 1;
> -		u32 rsvd0 : 30;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_CQ_INT_DEPTH_THRSH(x) \
> -	(0xa000001c + (x) * 0x1000)
> -#define DLB_CHP_DIR_CQ_INT_DEPTH_THRSH_RST 0x0
> -union dlb_chp_dir_cq_int_depth_thrsh {
> -	struct {
> -		u32 depth_threshold : 12;
> -		u32 rsvd0 : 20;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_CQ_TKN_DEPTH_SEL(x) \
> -	(0xa0000018 + (x) * 0x1000)
> -#define DLB_CHP_DIR_CQ_TKN_DEPTH_SEL_RST 0x0
> -union dlb_chp_dir_cq_tkn_depth_sel {
> -	struct {
> -		u32 token_depth_select : 4;
> -		u32 rsvd0 : 28;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_PP_LDB_MIN_CRD_QNT(x) \
> -	(0xa0000014 + (x) * 0x1000)
> -#define DLB_CHP_DIR_PP_LDB_MIN_CRD_QNT_RST 0x1
> -union dlb_chp_dir_pp_ldb_min_crd_qnt {
> -	struct {
> -		u32 quanta : 10;
> -		u32 rsvd0 : 22;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_PP_DIR_MIN_CRD_QNT(x) \
> -	(0xa0000010 + (x) * 0x1000)
> -#define DLB_CHP_DIR_PP_DIR_MIN_CRD_QNT_RST 0x1
> -union dlb_chp_dir_pp_dir_min_crd_qnt {
> -	struct {
> -		u32 quanta : 10;
> -		u32 rsvd0 : 22;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_PP_LDB_CRD_LWM(x) \
> -	(0xa000000c + (x) * 0x1000)
> -#define DLB_CHP_DIR_PP_LDB_CRD_LWM_RST 0x0
> -union dlb_chp_dir_pp_ldb_crd_lwm {
> -	struct {
> -		u32 lwm : 16;
> -		u32 rsvd0 : 16;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_PP_LDB_CRD_HWM(x) \
> -	(0xa0000008 + (x) * 0x1000)
> -#define DLB_CHP_DIR_PP_LDB_CRD_HWM_RST 0x0
> -union dlb_chp_dir_pp_ldb_crd_hwm {
> -	struct {
> -		u32 hwm : 16;
> -		u32 rsvd0 : 16;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_PP_DIR_CRD_LWM(x) \
> -	(0xa0000004 + (x) * 0x1000)
> -#define DLB_CHP_DIR_PP_DIR_CRD_LWM_RST 0x0
> -union dlb_chp_dir_pp_dir_crd_lwm {
> -	struct {
> -		u32 lwm : 14;
> -		u32 rsvd0 : 18;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_PP_DIR_CRD_HWM(x) \
> -	(0xa0000000 + (x) * 0x1000)
> -#define DLB_CHP_DIR_PP_DIR_CRD_HWM_RST 0x0
> -union dlb_chp_dir_pp_dir_crd_hwm {
> -	struct {
> -		u32 hwm : 14;
> -		u32 rsvd0 : 18;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_CFG_LDB_PP_SW_ALARM_EN(x) \
> -	(0xa0000148 + (x) * 0x1000)
> -#define DLB_CHP_CFG_LDB_PP_SW_ALARM_EN_RST 0x1
> -union dlb_chp_cfg_ldb_pp_sw_alarm_en {
> -	struct {
> -		u32 alarm_enable : 1;
> -		u32 rsvd0 : 31;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_CQ_WD_ENB(x) \
> -	(0xa0000144 + (x) * 0x1000)
> -#define DLB_CHP_LDB_CQ_WD_ENB_RST 0x0
> -union dlb_chp_ldb_cq_wd_enb {
> -	struct {
> -		u32 wd_enable : 1;
> -		u32 rsvd0 : 31;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_SN_CHK_ENBL(x) \
> -	(0xa0000140 + (x) * 0x1000)
> -#define DLB_CHP_SN_CHK_ENBL_RST 0x0
> -union dlb_chp_sn_chk_enbl {
> -	struct {
> -		u32 en : 1;
> -		u32 rsvd0 : 31;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_HIST_LIST_BASE(x) \
> -	(0xa000013c + (x) * 0x1000)
> -#define DLB_CHP_HIST_LIST_BASE_RST 0x0
> -union dlb_chp_hist_list_base {
> -	struct {
> -		u32 base : 13;
> -		u32 rsvd0 : 19;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_HIST_LIST_LIM(x) \
> -	(0xa0000138 + (x) * 0x1000)
> -#define DLB_CHP_HIST_LIST_LIM_RST 0x0
> -union dlb_chp_hist_list_lim {
> -	struct {
> -		u32 limit : 13;
> -		u32 rsvd0 : 19;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_LDB_PP2POOL(x) \
> -	(0xa0000134 + (x) * 0x1000)
> -#define DLB_CHP_LDB_LDB_PP2POOL_RST 0x0
> -union dlb_chp_ldb_ldb_pp2pool {
> -	struct {
> -		u32 pool : 6;
> -		u32 rsvd0 : 26;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_DIR_PP2POOL(x) \
> -	(0xa0000130 + (x) * 0x1000)
> -#define DLB_CHP_LDB_DIR_PP2POOL_RST 0x0
> -union dlb_chp_ldb_dir_pp2pool {
> -	struct {
> -		u32 pool : 6;
> -		u32 rsvd0 : 26;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_PP_LDB_CRD_CNT(x) \
> -	(0xa000012c + (x) * 0x1000)
> -#define DLB_CHP_LDB_PP_LDB_CRD_CNT_RST 0x0
> -union dlb_chp_ldb_pp_ldb_crd_cnt {
> -	struct {
> -		u32 count : 16;
> -		u32 rsvd0 : 16;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_PP_DIR_CRD_CNT(x) \
> -	(0xa0000128 + (x) * 0x1000)
> -#define DLB_CHP_LDB_PP_DIR_CRD_CNT_RST 0x0
> -union dlb_chp_ldb_pp_dir_crd_cnt {
> -	struct {
> -		u32 count : 14;
> -		u32 rsvd0 : 18;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_CQ_TMR_THRESHOLD(x) \
> -	(0xa0000124 + (x) * 0x1000)
> -#define DLB_CHP_LDB_CQ_TMR_THRESHOLD_RST 0x0
> -union dlb_chp_ldb_cq_tmr_threshold {
> -	struct {
> -		u32 thrsh : 14;
> -		u32 rsvd0 : 18;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_CQ_INT_ENB(x) \
> -	(0xa0000120 + (x) * 0x1000)
> -#define DLB_CHP_LDB_CQ_INT_ENB_RST 0x0
> -union dlb_chp_ldb_cq_int_enb {
> -	struct {
> -		u32 en_tim : 1;
> -		u32 en_depth : 1;
> -		u32 rsvd0 : 30;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_CQ_INT_DEPTH_THRSH(x) \
> -	(0xa000011c + (x) * 0x1000)
> -#define DLB_CHP_LDB_CQ_INT_DEPTH_THRSH_RST 0x0
> -union dlb_chp_ldb_cq_int_depth_thrsh {
> -	struct {
> -		u32 depth_threshold : 12;
> -		u32 rsvd0 : 20;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_CQ_TKN_DEPTH_SEL(x) \
> -	(0xa0000118 + (x) * 0x1000)
> -#define DLB_CHP_LDB_CQ_TKN_DEPTH_SEL_RST 0x0
> -union dlb_chp_ldb_cq_tkn_depth_sel {
> -	struct {
> -		u32 token_depth_select : 4;
> -		u32 rsvd0 : 28;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_PP_LDB_MIN_CRD_QNT(x) \
> -	(0xa0000114 + (x) * 0x1000)
> -#define DLB_CHP_LDB_PP_LDB_MIN_CRD_QNT_RST 0x1
> -union dlb_chp_ldb_pp_ldb_min_crd_qnt {
> -	struct {
> -		u32 quanta : 10;
> -		u32 rsvd0 : 22;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_PP_DIR_MIN_CRD_QNT(x) \
> -	(0xa0000110 + (x) * 0x1000)
> -#define DLB_CHP_LDB_PP_DIR_MIN_CRD_QNT_RST 0x1
> -union dlb_chp_ldb_pp_dir_min_crd_qnt {
> -	struct {
> -		u32 quanta : 10;
> -		u32 rsvd0 : 22;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_PP_LDB_CRD_LWM(x) \
> -	(0xa000010c + (x) * 0x1000)
> -#define DLB_CHP_LDB_PP_LDB_CRD_LWM_RST 0x0
> -union dlb_chp_ldb_pp_ldb_crd_lwm {
> -	struct {
> -		u32 lwm : 16;
> -		u32 rsvd0 : 16;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_PP_LDB_CRD_HWM(x) \
> -	(0xa0000108 + (x) * 0x1000)
> -#define DLB_CHP_LDB_PP_LDB_CRD_HWM_RST 0x0
> -union dlb_chp_ldb_pp_ldb_crd_hwm {
> -	struct {
> -		u32 hwm : 16;
> -		u32 rsvd0 : 16;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_PP_DIR_CRD_LWM(x) \
> -	(0xa0000104 + (x) * 0x1000)
> -#define DLB_CHP_LDB_PP_DIR_CRD_LWM_RST 0x0
> -union dlb_chp_ldb_pp_dir_crd_lwm {
> -	struct {
> -		u32 lwm : 14;
> -		u32 rsvd0 : 18;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_PP_DIR_CRD_HWM(x) \
> -	(0xa0000100 + (x) * 0x1000)
> -#define DLB_CHP_LDB_PP_DIR_CRD_HWM_RST 0x0
> -union dlb_chp_ldb_pp_dir_crd_hwm {
> -	struct {
> -		u32 hwm : 14;
> -		u32 rsvd0 : 18;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_CQ_DEPTH(x) \
> -	(0xa0000218 + (x) * 0x1000)
> -#define DLB_CHP_DIR_CQ_DEPTH_RST 0x0
> -union dlb_chp_dir_cq_depth {
> -	struct {
> -		u32 cq_depth : 11;
> -		u32 rsvd0 : 21;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_CQ_WPTR(x) \
> -	(0xa0000214 + (x) * 0x1000)
> -#define DLB_CHP_DIR_CQ_WPTR_RST 0x0
> -union dlb_chp_dir_cq_wptr {
> -	struct {
> -		u32 write_pointer : 10;
> -		u32 rsvd0 : 22;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_PP_LDB_PUSH_PTR(x) \
> -	(0xa0000210 + (x) * 0x1000)
> -#define DLB_CHP_DIR_PP_LDB_PUSH_PTR_RST 0x0
> -union dlb_chp_dir_pp_ldb_push_ptr {
> -	struct {
> -		u32 push_pointer : 16;
> -		u32 rsvd0 : 16;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_PP_DIR_PUSH_PTR(x) \
> -	(0xa000020c + (x) * 0x1000)
> -#define DLB_CHP_DIR_PP_DIR_PUSH_PTR_RST 0x0
> -union dlb_chp_dir_pp_dir_push_ptr {
> -	struct {
> -		u32 push_pointer : 16;
> -		u32 rsvd0 : 16;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_PP_STATE_RESET(x) \
> -	(0xa0000204 + (x) * 0x1000)
> -#define DLB_CHP_DIR_PP_STATE_RESET_RST 0x0
> -union dlb_chp_dir_pp_state_reset {
> -	struct {
> -		u32 rsvd1 : 7;
> -		u32 dir_type : 1;
> -		u32 rsvd0 : 23;
> -		u32 reset_pp_state : 1;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_PP_CRD_REQ_STATE(x) \
> -	(0xa0000200 + (x) * 0x1000)
> -#define DLB_CHP_DIR_PP_CRD_REQ_STATE_RST 0x0
> -union dlb_chp_dir_pp_crd_req_state {
> -	struct {
> -		u32 dir_crd_req_active_valid : 1;
> -		u32 dir_crd_req_active_check : 1;
> -		u32 dir_crd_req_active_busy : 1;
> -		u32 rsvd1 : 1;
> -		u32 ldb_crd_req_active_valid : 1;
> -		u32 ldb_crd_req_active_check : 1;
> -		u32 ldb_crd_req_active_busy : 1;
> -		u32 rsvd0 : 1;
> -		u32 no_pp_credit_update : 1;
> -		u32 crd_req_state : 23;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_CQ_DEPTH(x) \
> -	(0xa0000320 + (x) * 0x1000)
> -#define DLB_CHP_LDB_CQ_DEPTH_RST 0x0
> -union dlb_chp_ldb_cq_depth {
> -	struct {
> -		u32 depth : 11;
> -		u32 reserved : 2;
> -		u32 rsvd0 : 19;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_CQ_WPTR(x) \
> -	(0xa000031c + (x) * 0x1000)
> -#define DLB_CHP_LDB_CQ_WPTR_RST 0x0
> -union dlb_chp_ldb_cq_wptr {
> -	struct {
> -		u32 write_pointer : 10;
> -		u32 rsvd0 : 22;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_PP_LDB_PUSH_PTR(x) \
> -	(0xa0000318 + (x) * 0x1000)
> -#define DLB_CHP_LDB_PP_LDB_PUSH_PTR_RST 0x0
> -union dlb_chp_ldb_pp_ldb_push_ptr {
> -	struct {
> -		u32 push_pointer : 16;
> -		u32 rsvd0 : 16;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_PP_DIR_PUSH_PTR(x) \
> -	(0xa0000314 + (x) * 0x1000)
> -#define DLB_CHP_LDB_PP_DIR_PUSH_PTR_RST 0x0
> -union dlb_chp_ldb_pp_dir_push_ptr {
> -	struct {
> -		u32 push_pointer : 16;
> -		u32 rsvd0 : 16;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_HIST_LIST_POP_PTR(x) \
> -	(0xa000030c + (x) * 0x1000)
> -#define DLB_CHP_HIST_LIST_POP_PTR_RST 0x0
> -union dlb_chp_hist_list_pop_ptr {
> -	struct {
> -		u32 pop_ptr : 13;
> -		u32 generation : 1;
> -		u32 rsvd0 : 18;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_HIST_LIST_PUSH_PTR(x) \
> -	(0xa0000308 + (x) * 0x1000)
> -#define DLB_CHP_HIST_LIST_PUSH_PTR_RST 0x0
> -union dlb_chp_hist_list_push_ptr {
> -	struct {
> -		u32 push_ptr : 13;
> -		u32 generation : 1;
> -		u32 rsvd0 : 18;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_PP_STATE_RESET(x) \
> -	(0xa0000304 + (x) * 0x1000)
> -#define DLB_CHP_LDB_PP_STATE_RESET_RST 0x0
> -union dlb_chp_ldb_pp_state_reset {
> -	struct {
> -		u32 rsvd1 : 7;
> -		u32 dir_type : 1;
> -		u32 rsvd0 : 23;
> -		u32 reset_pp_state : 1;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_PP_CRD_REQ_STATE(x) \
> -	(0xa0000300 + (x) * 0x1000)
> -#define DLB_CHP_LDB_PP_CRD_REQ_STATE_RST 0x0
> -union dlb_chp_ldb_pp_crd_req_state {
> -	struct {
> -		u32 dir_crd_req_active_valid : 1;
> -		u32 dir_crd_req_active_check : 1;
> -		u32 dir_crd_req_active_busy : 1;
> -		u32 rsvd1 : 1;
> -		u32 ldb_crd_req_active_valid : 1;
> -		u32 ldb_crd_req_active_check : 1;
> -		u32 ldb_crd_req_active_busy : 1;
> -		u32 rsvd0 : 1;
> -		u32 no_pp_credit_update : 1;
> -		u32 crd_req_state : 23;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_ORD_QID_SN(x) \
> -	(0xa0000408 + (x) * 0x1000)
> -#define DLB_CHP_ORD_QID_SN_RST 0x0
> -union dlb_chp_ord_qid_sn {
> -	struct {
> -		u32 sn : 12;
> -		u32 rsvd0 : 20;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_ORD_QID_SN_MAP(x) \
> -	(0xa0000404 + (x) * 0x1000)
> -#define DLB_CHP_ORD_QID_SN_MAP_RST 0x0
> -union dlb_chp_ord_qid_sn_map {
> -	struct {
> -		u32 mode : 3;
> -		u32 slot : 5;
> -		u32 grp : 2;
> -		u32 rsvd0 : 22;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_POOL_CRD_CNT(x) \
> -	(0xa000050c + (x) * 0x1000)
> -#define DLB_CHP_LDB_POOL_CRD_CNT_RST 0x0
> -union dlb_chp_ldb_pool_crd_cnt {
> -	struct {
> -		u32 count : 16;
> -		u32 rsvd0 : 16;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_QED_FL_BASE(x) \
> -	(0xa0000508 + (x) * 0x1000)
> -#define DLB_CHP_QED_FL_BASE_RST 0x0
> -union dlb_chp_qed_fl_base {
> -	struct {
> -		u32 base : 14;
> -		u32 rsvd0 : 18;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_QED_FL_LIM(x) \
> -	(0xa0000504 + (x) * 0x1000)
> -#define DLB_CHP_QED_FL_LIM_RST 0x8000
> -union dlb_chp_qed_fl_lim {
> -	struct {
> -		u32 limit : 14;
> -		u32 rsvd1 : 1;
> -		u32 freelist_disable : 1;
> -		u32 rsvd0 : 16;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_POOL_CRD_LIM(x) \
> -	(0xa0000500 + (x) * 0x1000)
> -#define DLB_CHP_LDB_POOL_CRD_LIM_RST 0x0
> -union dlb_chp_ldb_pool_crd_lim {
> -	struct {
> -		u32 limit : 16;
> -		u32 rsvd0 : 16;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_QED_FL_POP_PTR(x) \
> -	(0xa0000604 + (x) * 0x1000)
> -#define DLB_CHP_QED_FL_POP_PTR_RST 0x0
> -union dlb_chp_qed_fl_pop_ptr {
> -	struct {
> -		u32 pop_ptr : 14;
> -		u32 reserved0 : 1;
> -		u32 generation : 1;
> -		u32 rsvd0 : 16;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_QED_FL_PUSH_PTR(x) \
> -	(0xa0000600 + (x) * 0x1000)
> -#define DLB_CHP_QED_FL_PUSH_PTR_RST 0x0
> -union dlb_chp_qed_fl_push_ptr {
> -	struct {
> -		u32 push_ptr : 14;
> -		u32 reserved0 : 1;
> -		u32 generation : 1;
> -		u32 rsvd0 : 16;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_POOL_CRD_CNT(x) \
> -	(0xa000070c + (x) * 0x1000)
> -#define DLB_CHP_DIR_POOL_CRD_CNT_RST 0x0
> -union dlb_chp_dir_pool_crd_cnt {
> -	struct {
> -		u32 count : 14;
> -		u32 rsvd0 : 18;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DQED_FL_BASE(x) \
> -	(0xa0000708 + (x) * 0x1000)
> -#define DLB_CHP_DQED_FL_BASE_RST 0x0
> -union dlb_chp_dqed_fl_base {
> -	struct {
> -		u32 base : 12;
> -		u32 rsvd0 : 20;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DQED_FL_LIM(x) \
> -	(0xa0000704 + (x) * 0x1000)
> -#define DLB_CHP_DQED_FL_LIM_RST 0x2000
> -union dlb_chp_dqed_fl_lim {
> -	struct {
> -		u32 limit : 12;
> -		u32 rsvd1 : 1;
> -		u32 freelist_disable : 1;
> -		u32 rsvd0 : 18;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_POOL_CRD_LIM(x) \
> -	(0xa0000700 + (x) * 0x1000)
> -#define DLB_CHP_DIR_POOL_CRD_LIM_RST 0x0
> -union dlb_chp_dir_pool_crd_lim {
> -	struct {
> -		u32 limit : 14;
> -		u32 rsvd0 : 18;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DQED_FL_POP_PTR(x) \
> -	(0xa0000804 + (x) * 0x1000)
> -#define DLB_CHP_DQED_FL_POP_PTR_RST 0x0
> -union dlb_chp_dqed_fl_pop_ptr {
> -	struct {
> -		u32 pop_ptr : 12;
> -		u32 reserved0 : 1;
> -		u32 generation : 1;
> -		u32 rsvd0 : 18;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DQED_FL_PUSH_PTR(x) \
> -	(0xa0000800 + (x) * 0x1000)
> -#define DLB_CHP_DQED_FL_PUSH_PTR_RST 0x0
> -union dlb_chp_dqed_fl_push_ptr {
> -	struct {
> -		u32 push_ptr : 12;
> -		u32 reserved0 : 1;
> -		u32 generation : 1;
> -		u32 rsvd0 : 18;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_CTRL_DIAG_02 0xa8000154
> -#define DLB_CHP_CTRL_DIAG_02_RST 0x0
> -union dlb_chp_ctrl_diag_02 {
> -	struct {
> -		u32 control : 32;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_CFG_CHP_CSR_CTRL 0xa8000130
> -#define DLB_CHP_CFG_CHP_CSR_CTRL_RST 0xc0003fff
> -#define DLB_CHP_CFG_EXCESS_TOKENS_SHIFT 12
> -union dlb_chp_cfg_chp_csr_ctrl {
> -	struct {
> -		u32 int_inf_alarm_enable_0 : 1;
> -		u32 int_inf_alarm_enable_1 : 1;
> -		u32 int_inf_alarm_enable_2 : 1;
> -		u32 int_inf_alarm_enable_3 : 1;
> -		u32 int_inf_alarm_enable_4 : 1;
> -		u32 int_inf_alarm_enable_5 : 1;
> -		u32 int_inf_alarm_enable_6 : 1;
> -		u32 int_inf_alarm_enable_7 : 1;
> -		u32 int_inf_alarm_enable_8 : 1;
> -		u32 int_inf_alarm_enable_9 : 1;
> -		u32 int_inf_alarm_enable_10 : 1;
> -		u32 int_inf_alarm_enable_11 : 1;
> -		u32 int_inf_alarm_enable_12 : 1;
> -		u32 int_cor_alarm_enable : 1;
> -		u32 csr_control_spare : 14;
> -		u32 cfg_vasr_dis : 1;
> -		u32 counter_clear : 1;
> -		u32 blk_cor_report : 1;
> -		u32 blk_cor_synd : 1;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_CQ_INTR_ARMED1 0xa8000068
> -#define DLB_CHP_LDB_CQ_INTR_ARMED1_RST 0x0
> -union dlb_chp_ldb_cq_intr_armed1 {
> -	struct {
> -		u32 armed : 32;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_LDB_CQ_INTR_ARMED0 0xa8000064
> -#define DLB_CHP_LDB_CQ_INTR_ARMED0_RST 0x0
> -union dlb_chp_ldb_cq_intr_armed0 {
> -	struct {
> -		u32 armed : 32;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_CQ_INTR_ARMED3 0xa8000024
> -#define DLB_CHP_DIR_CQ_INTR_ARMED3_RST 0x0
> -union dlb_chp_dir_cq_intr_armed3 {
> -	struct {
> -		u32 armed : 32;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_CQ_INTR_ARMED2 0xa8000020
> -#define DLB_CHP_DIR_CQ_INTR_ARMED2_RST 0x0
> -union dlb_chp_dir_cq_intr_armed2 {
> -	struct {
> -		u32 armed : 32;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_CQ_INTR_ARMED1 0xa800001c
> -#define DLB_CHP_DIR_CQ_INTR_ARMED1_RST 0x0
> -union dlb_chp_dir_cq_intr_armed1 {
> -	struct {
> -		u32 armed : 32;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CHP_DIR_CQ_INTR_ARMED0 0xa8000018
> -#define DLB_CHP_DIR_CQ_INTR_ARMED0_RST 0x0
> -union dlb_chp_dir_cq_intr_armed0 {
> -	struct {
> -		u32 armed : 32;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CFG_MSTR_DIAG_RESET_STS 0xb8000004
> -#define DLB_CFG_MSTR_DIAG_RESET_STS_RST 0x1ff
> -union dlb_cfg_mstr_diag_reset_sts {
> -	struct {
> -		u32 chp_pf_reset_done : 1;
> -		u32 rop_pf_reset_done : 1;
> -		u32 lsp_pf_reset_done : 1;
> -		u32 nalb_pf_reset_done : 1;
> -		u32 ap_pf_reset_done : 1;
> -		u32 dp_pf_reset_done : 1;
> -		u32 qed_pf_reset_done : 1;
> -		u32 dqed_pf_reset_done : 1;
> -		u32 aqed_pf_reset_done : 1;
> -		u32 rsvd1 : 6;
> -		u32 pf_reset_active : 1;
> -		u32 chp_vf_reset_done : 1;
> -		u32 rop_vf_reset_done : 1;
> -		u32 lsp_vf_reset_done : 1;
> -		u32 nalb_vf_reset_done : 1;
> -		u32 ap_vf_reset_done : 1;
> -		u32 dp_vf_reset_done : 1;
> -		u32 qed_vf_reset_done : 1;
> -		u32 dqed_vf_reset_done : 1;
> -		u32 aqed_vf_reset_done : 1;
> -		u32 rsvd0 : 6;
> -		u32 vf_reset_active : 1;
> -	} field;
> -	u32 val;
> -};
> -
> -#define DLB_CFG_MSTR_BCAST_RESET_VF_START 0xc8100000
> -#define DLB_CFG_MSTR_BCAST_RESET_VF_START_RST 0x0
> -/* HW Reset Types */
> -#define VF_RST_TYPE_CQ_LDB   0
> -#define VF_RST_TYPE_QID_LDB  1
> -#define VF_RST_TYPE_POOL_LDB 2
> -#define VF_RST_TYPE_CQ_DIR   8
> -#define VF_RST_TYPE_QID_DIR  9
> -#define VF_RST_TYPE_POOL_DIR 10
> -union dlb_cfg_mstr_bcast_reset_vf_start {
> -	struct {
> -		u32 vf_reset_start : 1;
> -		u32 reserved : 3;
> -		u32 vf_reset_type : 4;
> -		u32 vf_reset_id : 24;
> -	} field;
> -	u32 val;
> -};
> -
> -#endif /* __DLB_REGS_H */
> diff --git a/drivers/event/dlb/pf/base/dlb_resource.c
> b/drivers/event/dlb/pf/base/dlb_resource.c
> deleted file mode 100644
> index 4984de5d3..000000000
> --- a/drivers/event/dlb/pf/base/dlb_resource.c
> +++ /dev/null
> @@ -1,6904 +0,0 @@
> -/* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2016-2020 Intel Corporation
> - */
> -
> -#include "dlb_hw_types.h"
> -#include "../../dlb_user.h"
> -#include "dlb_resource.h"
> -#include "dlb_osdep.h"
> -#include "dlb_osdep_bitmap.h"
> -#include "dlb_osdep_types.h"
> -#include "dlb_regs.h"
> -#include "../../dlb_priv.h"
> -#include "../../dlb_inline_fns.h"
> -
> -#define DLB_DOM_LIST_HEAD(head, type) \
> -	DLB_LIST_HEAD((head), type, domain_list)
> -
> -#define DLB_FUNC_LIST_HEAD(head, type) \
> -	DLB_LIST_HEAD((head), type, func_list)
> -
> -#define DLB_DOM_LIST_FOR(head, ptr, iter) \
> -	DLB_LIST_FOR_EACH(head, ptr, domain_list, iter)
> -
> -#define DLB_FUNC_LIST_FOR(head, ptr, iter) \
> -	DLB_LIST_FOR_EACH(head, ptr, func_list, iter)
> -
> -#define DLB_DOM_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
> -	DLB_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, domain_list, it, it_tmp)
> -
> -#define DLB_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
> -	DLB_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)
> -
> -static inline void dlb_flush_csr(struct dlb_hw *hw)
> -{
> -	DLB_CSR_RD(hw, DLB_SYS_TOTAL_VAS);
> -}
> -
> -static void dlb_init_fn_rsrc_lists(struct dlb_function_resources *rsrc)
> -{
> -	dlb_list_init_head(&rsrc->avail_domains);
> -	dlb_list_init_head(&rsrc->used_domains);
> -	dlb_list_init_head(&rsrc->avail_ldb_queues);
> -	dlb_list_init_head(&rsrc->avail_ldb_ports);
> -	dlb_list_init_head(&rsrc->avail_dir_pq_pairs);
> -	dlb_list_init_head(&rsrc->avail_ldb_credit_pools);
> -	dlb_list_init_head(&rsrc->avail_dir_credit_pools);
> -}
> -
> -static void dlb_init_domain_rsrc_lists(struct dlb_domain *domain)
> -{
> -	dlb_list_init_head(&domain->used_ldb_queues);
> -	dlb_list_init_head(&domain->used_ldb_ports);
> -	dlb_list_init_head(&domain->used_dir_pq_pairs);
> -	dlb_list_init_head(&domain->used_ldb_credit_pools);
> -	dlb_list_init_head(&domain->used_dir_credit_pools);
> -	dlb_list_init_head(&domain->avail_ldb_queues);
> -	dlb_list_init_head(&domain->avail_ldb_ports);
> -	dlb_list_init_head(&domain->avail_dir_pq_pairs);
> -	dlb_list_init_head(&domain->avail_ldb_credit_pools);
> -	dlb_list_init_head(&domain->avail_dir_credit_pools);
> -}
> -
> -int dlb_resource_init(struct dlb_hw *hw)
> -{
> -	struct dlb_list_entry *list;
> -	unsigned int i;
> -
> -	/* For optimal load-balancing, ports that map to one or more QIDs in
> -	 * common should not be in numerical sequence. This is application
> -	 * dependent, but the driver interleaves port IDs as much as possible
> -	 * to reduce the likelihood of this. This initial allocation maximizes
> -	 * the average distance between an ID and its immediate neighbors (i.e.
> -	 * the distance from 1 to 0 and to 2, the distance from 2 to 1 and to
> -	 * 3, etc.).
> -	 */
> -	u32 init_ldb_port_allocation[DLB_MAX_NUM_LDB_PORTS] = {
> -		0,  31, 62, 29, 60, 27, 58, 25, 56, 23, 54, 21, 52, 19, 50, 17,
> -		48, 15, 46, 13, 44, 11, 42,  9, 40,  7, 38,  5, 36,  3, 34, 1,
> -		32, 63, 30, 61, 28, 59, 26, 57, 24, 55, 22, 53, 20, 51, 18, 49,
> -		16, 47, 14, 45, 12, 43, 10, 41,  8, 39,  6, 37,  4, 35,  2, 33
> -	};
> -
> -	/* Zero-out resource tracking data structures */
> -	memset(&hw->rsrcs, 0, sizeof(hw->rsrcs));
> -	memset(&hw->pf, 0, sizeof(hw->pf));
> -
> -	dlb_init_fn_rsrc_lists(&hw->pf);
> -
> -	for (i = 0; i < DLB_MAX_NUM_DOMAINS; i++) {
> -		memset(&hw->domains[i], 0, sizeof(hw->domains[i]));
> -		dlb_init_domain_rsrc_lists(&hw->domains[i]);
> -		hw->domains[i].parent_func = &hw->pf;
> -	}
> -
> -	/* Give all resources to the PF driver */
> -	hw->pf.num_avail_domains = DLB_MAX_NUM_DOMAINS;
> -	for (i = 0; i < hw->pf.num_avail_domains; i++) {
> -		list = &hw->domains[i].func_list;
> -
> -		dlb_list_add(&hw->pf.avail_domains, list);
> -	}
> -
> -	hw->pf.num_avail_ldb_queues = DLB_MAX_NUM_LDB_QUEUES;
> -	for (i = 0; i < hw->pf.num_avail_ldb_queues; i++) {
> -		list = &hw->rsrcs.ldb_queues[i].func_list;
> -
> -		dlb_list_add(&hw->pf.avail_ldb_queues, list);
> -	}
> -
> -	hw->pf.num_avail_ldb_ports = DLB_MAX_NUM_LDB_PORTS;
> -	for (i = 0; i < hw->pf.num_avail_ldb_ports; i++) {
> -		struct dlb_ldb_port *port;
> -
> -		port = &hw->rsrcs.ldb_ports[init_ldb_port_allocation[i]];
> -
> -		dlb_list_add(&hw->pf.avail_ldb_ports, &port->func_list);
> -	}
> -
> -	hw->pf.num_avail_dir_pq_pairs = DLB_MAX_NUM_DIR_PORTS;
> -	for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) {
> -		list = &hw->rsrcs.dir_pq_pairs[i].func_list;
> -
> -		dlb_list_add(&hw->pf.avail_dir_pq_pairs, list);
> -	}
> -
> -	hw->pf.num_avail_ldb_credit_pools = DLB_MAX_NUM_LDB_CREDIT_POOLS;
> -	for (i = 0; i < hw->pf.num_avail_ldb_credit_pools; i++) {
> -		list = &hw->rsrcs.ldb_credit_pools[i].func_list;
> -
> -		dlb_list_add(&hw->pf.avail_ldb_credit_pools, list);
> -	}
> -
> -	hw->pf.num_avail_dir_credit_pools = DLB_MAX_NUM_DIR_CREDIT_POOLS;
> -	for (i = 0; i < hw->pf.num_avail_dir_credit_pools; i++) {
> -		list = &hw->rsrcs.dir_credit_pools[i].func_list;
> -
> -		dlb_list_add(&hw->pf.avail_dir_credit_pools, list);
> -	}
> -
> -	/* There are 5120 history list entries, which allows us to overprovision
> -	 * the inflight limit (4096) by 1k.
> -	 */
> -	if (dlb_bitmap_alloc(hw,
> -			     &hw->pf.avail_hist_list_entries,
> -			     DLB_MAX_NUM_HIST_LIST_ENTRIES))
> -		return -1;
> -
> -	if (dlb_bitmap_fill(hw->pf.avail_hist_list_entries))
> -		return -1;
> -
> -	if (dlb_bitmap_alloc(hw,
> -			     &hw->pf.avail_qed_freelist_entries,
> -			     DLB_MAX_NUM_LDB_CREDITS))
> -		return -1;
> -
> -	if (dlb_bitmap_fill(hw->pf.avail_qed_freelist_entries))
> -		return -1;
> -
> -	if (dlb_bitmap_alloc(hw,
> -			     &hw->pf.avail_dqed_freelist_entries,
> -			     DLB_MAX_NUM_DIR_CREDITS))
> -		return -1;
> -
> -	if (dlb_bitmap_fill(hw->pf.avail_dqed_freelist_entries))
> -		return -1;
> -
> -	if (dlb_bitmap_alloc(hw,
> -			     &hw->pf.avail_aqed_freelist_entries,
> -			     DLB_MAX_NUM_AQOS_ENTRIES))
> -		return -1;
> -
> -	if (dlb_bitmap_fill(hw->pf.avail_aqed_freelist_entries))
> -		return -1;
> -
> -	/* Initialize the hardware resource IDs */
> -	for (i = 0; i < DLB_MAX_NUM_DOMAINS; i++)
> -		hw->domains[i].id = i;
> -
> -	for (i = 0; i < DLB_MAX_NUM_LDB_QUEUES; i++)
> -		hw->rsrcs.ldb_queues[i].id = i;
> -
> -	for (i = 0; i < DLB_MAX_NUM_LDB_PORTS; i++)
> -		hw->rsrcs.ldb_ports[i].id = i;
> -
> -	for (i = 0; i < DLB_MAX_NUM_DIR_PORTS; i++)
> -		hw->rsrcs.dir_pq_pairs[i].id = i;
> -
> -	for (i = 0; i < DLB_MAX_NUM_LDB_CREDIT_POOLS; i++)
> -		hw->rsrcs.ldb_credit_pools[i].id = i;
> -
> -	for (i = 0; i < DLB_MAX_NUM_DIR_CREDIT_POOLS; i++)
> -		hw->rsrcs.dir_credit_pools[i].id = i;
> -
> -	for (i = 0; i < DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
> -		hw->rsrcs.sn_groups[i].id = i;
> -		/* Default mode (0) is 32 sequence numbers per queue */
> -		hw->rsrcs.sn_groups[i].mode = 0;
> -		hw->rsrcs.sn_groups[i].sequence_numbers_per_queue = 32;
> -		hw->rsrcs.sn_groups[i].slot_use_bitmap = 0;
> -	}
> -
> -	return 0;
> -}
> -
> -void dlb_resource_free(struct dlb_hw *hw)
> -{
> -	dlb_bitmap_free(hw->pf.avail_hist_list_entries);
> -
> -	dlb_bitmap_free(hw->pf.avail_qed_freelist_entries);
> -
> -	dlb_bitmap_free(hw->pf.avail_dqed_freelist_entries);
> -
> -	dlb_bitmap_free(hw->pf.avail_aqed_freelist_entries);
> -}
> -
> -static struct dlb_domain *dlb_get_domain_from_id(struct dlb_hw *hw, u32
> id)
> -{
> -	if (id >= DLB_MAX_NUM_DOMAINS)
> -		return NULL;
> -
> -	return &hw->domains[id];
> -}
> -
> -static int dlb_attach_ldb_queues(struct dlb_hw *hw,
> -				 struct dlb_function_resources *rsrcs,
> -				 struct dlb_domain *domain,
> -				 u32 num_queues,
> -				 struct dlb_cmd_response *resp)
> -{
> -	unsigned int i, j;
> -
> -	if (rsrcs->num_avail_ldb_queues < num_queues) {
> -		resp->status = DLB_ST_LDB_QUEUES_UNAVAILABLE;
> -		return -1;
> -	}
> -
> -	for (i = 0; i < num_queues; i++) {
> -		struct dlb_ldb_queue *queue;
> -
> -		queue = DLB_FUNC_LIST_HEAD(rsrcs->avail_ldb_queues,
> -					   typeof(*queue));
> -		if (queue == NULL) {
> -			DLB_HW_ERR(hw,
> -				   "[%s()] Internal error: domain validation failed\n",
> -				   __func__);
> -			goto cleanup;
> -		}
> -
> -		dlb_list_del(&rsrcs->avail_ldb_queues, &queue->func_list);
> -
> -		queue->domain_id = domain->id;
> -		queue->owned = true;
> -
> -		dlb_list_add(&domain->avail_ldb_queues, &queue->domain_list);
> -	}
> -
> -	rsrcs->num_avail_ldb_queues -= num_queues;
> -
> -	return 0;
> -
> -cleanup:
> -
> -	/* Return the assigned queues */
> -	for (j = 0; j < i; j++) {
> -		struct dlb_ldb_queue *queue;
> -
> -		queue = DLB_FUNC_LIST_HEAD(domain->avail_ldb_queues,
> -					   typeof(*queue));
> -		/* Unrecoverable internal error */
> -		if (queue == NULL)
> -			break;
> -
> -		queue->owned = false;
> -
> -		dlb_list_del(&domain->avail_ldb_queues, &queue->domain_list);
> -
> -		dlb_list_add(&rsrcs->avail_ldb_queues, &queue->func_list);
> -	}
> -
> -	return -EFAULT;
> -}
> -
> -static struct dlb_ldb_port *
> -dlb_get_next_ldb_port(struct dlb_hw *hw,
> -		      struct dlb_function_resources *rsrcs,
> -		      u32 domain_id)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_ldb_port *port;
> -
> -	/* To reduce the odds of consecutive load-balanced ports mapping to the
> -	 * same queue(s), the driver attempts to allocate ports whose neighbors
> -	 * are owned by a different domain.
> -	 */
> -	DLB_FUNC_LIST_FOR(rsrcs->avail_ldb_ports, port, iter) {
> -		u32 next, prev;
> -		u32 phys_id;
> -
> -		phys_id = port->id;
> -		next = phys_id + 1;
> -		prev = phys_id - 1;
> -
> -		if (phys_id == DLB_MAX_NUM_LDB_PORTS - 1)
> -			next = 0;
> -		if (phys_id == 0)
> -			prev = DLB_MAX_NUM_LDB_PORTS - 1;
> -
> -		if (!hw->rsrcs.ldb_ports[next].owned ||
> -		    hw->rsrcs.ldb_ports[next].domain_id == domain_id)
> -			continue;
> -
> -		if (!hw->rsrcs.ldb_ports[prev].owned ||
> -		    hw->rsrcs.ldb_ports[prev].domain_id == domain_id)
> -			continue;
> -
> -		return port;
> -	}
> -
> -	/* Failing that, the driver looks for a port with one neighbor owned by
> -	 * a different domain and the other unallocated.
> -	 */
> -	DLB_FUNC_LIST_FOR(rsrcs->avail_ldb_ports, port, iter) {
> -		u32 next, prev;
> -		u32 phys_id;
> -
> -		phys_id = port->id;
> -		next = phys_id + 1;
> -		prev = phys_id - 1;
> -
> -		if (phys_id == DLB_MAX_NUM_LDB_PORTS - 1)
> -			next = 0;
> -		if (phys_id == 0)
> -			prev = DLB_MAX_NUM_LDB_PORTS - 1;
> -
> -		if (!hw->rsrcs.ldb_ports[prev].owned &&
> -		    hw->rsrcs.ldb_ports[next].owned &&
> -		    hw->rsrcs.ldb_ports[next].domain_id != domain_id)
> -			return port;
> -
> -		if (!hw->rsrcs.ldb_ports[next].owned &&
> -		    hw->rsrcs.ldb_ports[prev].owned &&
> -		    hw->rsrcs.ldb_ports[prev].domain_id != domain_id)
> -			return port;
> -	}
> -
> -	/* Failing that, the driver looks for a port with both neighbors
> -	 * unallocated.
> -	 */
> -	DLB_FUNC_LIST_FOR(rsrcs->avail_ldb_ports, port, iter) {
> -		u32 next, prev;
> -		u32 phys_id;
> -
> -		phys_id = port->id;
> -		next = phys_id + 1;
> -		prev = phys_id - 1;
> -
> -		if (phys_id == DLB_MAX_NUM_LDB_PORTS - 1)
> -			next = 0;
> -		if (phys_id == 0)
> -			prev = DLB_MAX_NUM_LDB_PORTS - 1;
> -
> -		if (!hw->rsrcs.ldb_ports[prev].owned &&
> -		    !hw->rsrcs.ldb_ports[next].owned)
> -			return port;
> -	}
> -
> -	/* If all else fails, the driver returns the next available port. */
> -	return DLB_FUNC_LIST_HEAD(rsrcs->avail_ldb_ports, typeof(*port));
> -}
> -
> -static int dlb_attach_ldb_ports(struct dlb_hw *hw,
> -				struct dlb_function_resources *rsrcs,
> -				struct dlb_domain *domain,
> -				u32 num_ports,
> -				struct dlb_cmd_response *resp)
> -{
> -	unsigned int i, j;
> -
> -	if (rsrcs->num_avail_ldb_ports < num_ports) {
> -		resp->status = DLB_ST_LDB_PORTS_UNAVAILABLE;
> -		return -1;
> -	}
> -
> -	for (i = 0; i < num_ports; i++) {
> -		struct dlb_ldb_port *port;
> -
> -		port = dlb_get_next_ldb_port(hw, rsrcs, domain->id);
> -
> -		if (port == NULL) {
> -			DLB_HW_ERR(hw,
> -				   "[%s()] Internal error: domain validation failed\n",
> -				   __func__);
> -			goto cleanup;
> -		}
> -
> -		dlb_list_del(&rsrcs->avail_ldb_ports, &port->func_list);
> -
> -		port->domain_id = domain->id;
> -		port->owned = true;
> -
> -		dlb_list_add(&domain->avail_ldb_ports, &port->domain_list);
> -	}
> -
> -	rsrcs->num_avail_ldb_ports -= num_ports;
> -
> -	return 0;
> -
> -cleanup:
> -
> -	/* Return the assigned ports */
> -	for (j = 0; j < i; j++) {
> -		struct dlb_ldb_port *port;
> -
> -		port = DLB_FUNC_LIST_HEAD(domain->avail_ldb_ports,
> -					  typeof(*port));
> -		/* Unrecoverable internal error */
> -		if (port == NULL)
> -			break;
> -
> -		port->owned = false;
> -
> -		dlb_list_del(&domain->avail_ldb_ports, &port->domain_list);
> -
> -		dlb_list_add(&rsrcs->avail_ldb_ports, &port->func_list);
> -	}
> -
> -	return -EFAULT;
> -}
> -
> -static int dlb_attach_dir_ports(struct dlb_hw *hw,
> -				struct dlb_function_resources *rsrcs,
> -				struct dlb_domain *domain,
> -				u32 num_ports,
> -				struct dlb_cmd_response *resp)
> -{
> -	unsigned int i, j;
> -
> -	if (rsrcs->num_avail_dir_pq_pairs < num_ports) {
> -		resp->status = DLB_ST_DIR_PORTS_UNAVAILABLE;
> -		return -1;
> -	}
> -
> -	for (i = 0; i < num_ports; i++) {
> -		struct dlb_dir_pq_pair *port;
> -
> -		port = DLB_FUNC_LIST_HEAD(rsrcs->avail_dir_pq_pairs,
> -					  typeof(*port));
> -		if (port == NULL) {
> -			DLB_HW_ERR(hw,
> -				   "[%s()] Internal error: domain validation failed\n",
> -				   __func__);
> -			goto cleanup;
> -		}
> -
> -		dlb_list_del(&rsrcs->avail_dir_pq_pairs, &port->func_list);
> -
> -		port->domain_id = domain->id;
> -		port->owned = true;
> -
> -		dlb_list_add(&domain->avail_dir_pq_pairs, &port->domain_list);
> -	}
> -
> -	rsrcs->num_avail_dir_pq_pairs -= num_ports;
> -
> -	return 0;
> -
> -cleanup:
> -
> -	/* Return the assigned ports */
> -	for (j = 0; j < i; j++) {
> -		struct dlb_dir_pq_pair *port;
> -
> -		port = DLB_FUNC_LIST_HEAD(domain->avail_dir_pq_pairs,
> -					  typeof(*port));
> -		/* Unrecoverable internal error */
> -		if (port == NULL)
> -			break;
> -
> -		port->owned = false;
> -
> -		dlb_list_del(&domain->avail_dir_pq_pairs, &port->domain_list);
> -
> -		dlb_list_add(&rsrcs->avail_dir_pq_pairs, &port->func_list);
> -	}
> -
> -	return -EFAULT;
> -}
> -
> -static int dlb_attach_ldb_credits(struct dlb_function_resources *rsrcs,
> -				  struct dlb_domain *domain,
> -				  u32 num_credits,
> -				  struct dlb_cmd_response *resp)
> -{
> -	struct dlb_bitmap *bitmap = rsrcs->avail_qed_freelist_entries;
> -
> -	if (dlb_bitmap_count(bitmap) < (int)num_credits) {
> -		resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;
> -		return -1;
> -	}
> -
> -	if (num_credits) {
> -		int base;
> -
> -		base = dlb_bitmap_find_set_bit_range(bitmap, num_credits);
> -		if (base < 0)
> -			goto error;
> -
> -		domain->qed_freelist.base = base;
> -		domain->qed_freelist.bound = base + num_credits;
> -		domain->qed_freelist.offset = 0;
> -
> -		dlb_bitmap_clear_range(bitmap, base, num_credits);
> -	}
> -
> -	return 0;
> -
> -error:
> -	resp->status = DLB_ST_QED_FREELIST_ENTRIES_UNAVAILABLE;
> -	return -1;
> -}
> -
> -static int dlb_attach_dir_credits(struct dlb_function_resources *rsrcs,
> -				  struct dlb_domain *domain,
> -				  u32 num_credits,
> -				  struct dlb_cmd_response *resp)
> -{
> -	struct dlb_bitmap *bitmap = rsrcs->avail_dqed_freelist_entries;
> -
> -	if (dlb_bitmap_count(bitmap) < (int)num_credits) {
> -		resp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;
> -		return -1;
> -	}
> -
> -	if (num_credits) {
> -		int base;
> -
> -		base = dlb_bitmap_find_set_bit_range(bitmap, num_credits);
> -		if (base < 0)
> -			goto error;
> -
> -		domain->dqed_freelist.base = base;
> -		domain->dqed_freelist.bound = base + num_credits;
> -		domain->dqed_freelist.offset = 0;
> -
> -		dlb_bitmap_clear_range(bitmap, base, num_credits);
> -	}
> -
> -	return 0;
> -
> -error:
> -	resp->status = DLB_ST_DQED_FREELIST_ENTRIES_UNAVAILABLE;
> -	return -1;
> -}
> -
> -static int dlb_attach_ldb_credit_pools(struct dlb_hw *hw,
> -				       struct dlb_function_resources *rsrcs,
> -				       struct dlb_domain *domain,
> -				       u32 num_credit_pools,
> -				       struct dlb_cmd_response *resp)
> -{
> -	unsigned int i, j;
> -
> -	if (rsrcs->num_avail_ldb_credit_pools < num_credit_pools) {
> -		resp->status = DLB_ST_LDB_CREDIT_POOLS_UNAVAILABLE;
> -		return -1;
> -	}
> -
> -	for (i = 0; i < num_credit_pools; i++) {
> -		struct dlb_credit_pool *pool;
> -
> -		pool = DLB_FUNC_LIST_HEAD(rsrcs->avail_ldb_credit_pools,
> -					  typeof(*pool));
> -		if (pool == NULL) {
> -			DLB_HW_ERR(hw,
> -				   "[%s()] Internal error: domain validation failed\n",
> -				   __func__);
> -			goto cleanup;
> -		}
> -
> -		dlb_list_del(&rsrcs->avail_ldb_credit_pools,
> -			     &pool->func_list);
> -
> -		pool->domain_id = domain->id;
> -		pool->owned = true;
> -
> -		dlb_list_add(&domain->avail_ldb_credit_pools,
> -			     &pool->domain_list);
> -	}
> -
> -	rsrcs->num_avail_ldb_credit_pools -= num_credit_pools;
> -
> -	return 0;
> -
> -cleanup:
> -
> -	/* Return the assigned credit pools */
> -	for (j = 0; j < i; j++) {
> -		struct dlb_credit_pool *pool;
> -
> -		pool = DLB_FUNC_LIST_HEAD(domain->avail_ldb_credit_pools,
> -					  typeof(*pool));
> -		/* Unrecoverable internal error */
> -		if (pool == NULL)
> -			break;
> -
> -		pool->owned = false;
> -
> -		dlb_list_del(&domain->avail_ldb_credit_pools,
> -			     &pool->domain_list);
> -
> -		dlb_list_add(&rsrcs->avail_ldb_credit_pools,
> -			     &pool->func_list);
> -	}
> -
> -	return -EFAULT;
> -}
> -
> -static int dlb_attach_dir_credit_pools(struct dlb_hw *hw,
> -				       struct dlb_function_resources *rsrcs,
> -				       struct dlb_domain *domain,
> -				       u32 num_credit_pools,
> -				       struct dlb_cmd_response *resp)
> -{
> -	unsigned int i, j;
> -
> -	if (rsrcs->num_avail_dir_credit_pools < num_credit_pools) {
> -		resp->status = DLB_ST_DIR_CREDIT_POOLS_UNAVAILABLE;
> -		return -1;
> -	}
> -
> -	for (i = 0; i < num_credit_pools; i++) {
> -		struct dlb_credit_pool *pool;
> -
> -		pool = DLB_FUNC_LIST_HEAD(rsrcs->avail_dir_credit_pools,
> -					  typeof(*pool));
> -		if (pool == NULL) {
> -			DLB_HW_ERR(hw,
> -				   "[%s()] Internal error: domain validation failed\n",
> -				   __func__);
> -			goto cleanup;
> -		}
> -
> -		dlb_list_del(&rsrcs->avail_dir_credit_pools,
> -			     &pool->func_list);
> -
> -		pool->domain_id = domain->id;
> -		pool->owned = true;
> -
> -		dlb_list_add(&domain->avail_dir_credit_pools,
> -			     &pool->domain_list);
> -	}
> -
> -	rsrcs->num_avail_dir_credit_pools -= num_credit_pools;
> -
> -	return 0;
> -
> -cleanup:
> -
> -	/* Return the assigned credit pools */
> -	for (j = 0; j < i; j++) {
> -		struct dlb_credit_pool *pool;
> -
> -		pool = DLB_FUNC_LIST_HEAD(domain->avail_dir_credit_pools,
> -					  typeof(*pool));
> -		/* Unrecoverable internal error */
> -		if (pool == NULL)
> -			break;
> -
> -		pool->owned = false;
> -
> -		dlb_list_del(&domain->avail_dir_credit_pools,
> -			     &pool->domain_list);
> -
> -		dlb_list_add(&rsrcs->avail_dir_credit_pools,
> -			     &pool->func_list);
> -	}
> -
> -	return -EFAULT;
> -}
> -
> -static int
> -dlb_attach_domain_hist_list_entries(struct dlb_function_resources *rsrcs,
> -				    struct dlb_domain *domain,
> -				    u32 num_hist_list_entries,
> -				    struct dlb_cmd_response *resp)
> -{
> -	struct dlb_bitmap *bitmap;
> -	int base;
> -
> -	if (num_hist_list_entries) {
> -		bitmap = rsrcs->avail_hist_list_entries;
> -
> -		base = dlb_bitmap_find_set_bit_range(bitmap,
> -						     num_hist_list_entries);
> -		if (base < 0)
> -			goto error;
> -
> -		domain->total_hist_list_entries = num_hist_list_entries;
> -		domain->avail_hist_list_entries = num_hist_list_entries;
> -		domain->hist_list_entry_base = base;
> -		domain->hist_list_entry_offset = 0;
> -
> -		dlb_bitmap_clear_range(bitmap, base, num_hist_list_entries);
> -	}
> -	return 0;
> -
> -error:
> -	resp->status = DLB_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
> -	return -1;
> -}
> -
> -static int dlb_attach_atomic_inflights(struct dlb_function_resources
> *rsrcs,
> -				       struct dlb_domain *domain,
> -				       u32 num_atomic_inflights,
> -				       struct dlb_cmd_response *resp)
> -{
> -	if (num_atomic_inflights) {
> -		struct dlb_bitmap *bitmap =
> -			rsrcs->avail_aqed_freelist_entries;
> -		int base;
> -
> -		base = dlb_bitmap_find_set_bit_range(bitmap,
> -						     num_atomic_inflights);
> -		if (base < 0)
> -			goto error;
> -
> -		domain->aqed_freelist.base = base;
> -		domain->aqed_freelist.bound = base + num_atomic_inflights;
> -		domain->aqed_freelist.offset = 0;
> -
> -		dlb_bitmap_clear_range(bitmap, base, num_atomic_inflights);
> -	}
> -
> -	return 0;
> -
> -error:
> -	resp->status = DLB_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
> -	return -1;
> -}
> -
> -
> -static int
> -dlb_domain_attach_resources(struct dlb_hw *hw,
> -			    struct dlb_function_resources *rsrcs,
> -			    struct dlb_domain *domain,
> -			    struct dlb_create_sched_domain_args *args,
> -			    struct dlb_cmd_response *resp)
> -{
> -	int ret;
> -
> -	ret = dlb_attach_ldb_queues(hw,
> -				    rsrcs,
> -				    domain,
> -				    args->num_ldb_queues,
> -				    resp);
> -	if (ret < 0)
> -		return ret;
> -
> -	ret = dlb_attach_ldb_ports(hw,
> -				   rsrcs,
> -				   domain,
> -				   args->num_ldb_ports,
> -				   resp);
> -	if (ret < 0)
> -		return ret;
> -
> -	ret = dlb_attach_dir_ports(hw,
> -				   rsrcs,
> -				   domain,
> -				   args->num_dir_ports,
> -				   resp);
> -	if (ret < 0)
> -		return ret;
> -
> -	ret = dlb_attach_ldb_credits(rsrcs,
> -				     domain,
> -				     args->num_ldb_credits,
> -				     resp);
> -	if (ret < 0)
> -		return ret;
> -
> -	ret = dlb_attach_dir_credits(rsrcs,
> -				     domain,
> -				     args->num_dir_credits,
> -				     resp);
> -	if (ret < 0)
> -		return ret;
> -
> -	ret = dlb_attach_ldb_credit_pools(hw,
> -					  rsrcs,
> -					  domain,
> -					  args->num_ldb_credit_pools,
> -					  resp);
> -	if (ret < 0)
> -		return ret;
> -
> -	ret = dlb_attach_dir_credit_pools(hw,
> -					  rsrcs,
> -					  domain,
> -					  args->num_dir_credit_pools,
> -					  resp);
> -	if (ret < 0)
> -		return ret;
> -
> -	ret = dlb_attach_domain_hist_list_entries(rsrcs,
> -						  domain,
> -						  args->num_hist_list_entries,
> -						  resp);
> -	if (ret < 0)
> -		return ret;
> -
> -	ret = dlb_attach_atomic_inflights(rsrcs,
> -					  domain,
> -					  args->num_atomic_inflights,
> -					  resp);
> -	if (ret < 0)
> -		return ret;
> -
> -	domain->configured = true;
> -
> -	domain->started = false;
> -
> -	rsrcs->num_avail_domains--;
> -
> -	return 0;
> -}
> -
> -static void dlb_ldb_port_cq_enable(struct dlb_hw *hw,
> -				   struct dlb_ldb_port *port)
> -{
> -	union dlb_lsp_cq_ldb_dsbl reg;
> -
> -	/* Don't re-enable the port if a removal is pending. The caller should
> -	 * mark this port as enabled (if it isn't already), and when the
> -	 * removal completes the port will be enabled.
> -	 */
> -	if (port->num_pending_removals)
> -		return;
> -
> -	reg.field.disabled = 0;
> -
> -	DLB_CSR_WR(hw, DLB_LSP_CQ_LDB_DSBL(port->id), reg.val);
> -
> -	dlb_flush_csr(hw);
> -}
> -
> -static void dlb_dir_port_cq_enable(struct dlb_hw *hw,
> -				   struct dlb_dir_pq_pair *port)
> -{
> -	union dlb_lsp_cq_dir_dsbl reg;
> -
> -	reg.field.disabled = 0;
> -
> -	DLB_CSR_WR(hw, DLB_LSP_CQ_DIR_DSBL(port->id), reg.val);
> -
> -	dlb_flush_csr(hw);
> -}
> -
> -
> -static void dlb_ldb_port_cq_disable(struct dlb_hw *hw,
> -				    struct dlb_ldb_port *port)
> -{
> -	union dlb_lsp_cq_ldb_dsbl reg;
> -
> -	reg.field.disabled = 1;
> -
> -	DLB_CSR_WR(hw, DLB_LSP_CQ_LDB_DSBL(port->id), reg.val);
> -
> -	dlb_flush_csr(hw);
> -}
> -
> -static void dlb_dir_port_cq_disable(struct dlb_hw *hw,
> -				    struct dlb_dir_pq_pair *port)
> -{
> -	union dlb_lsp_cq_dir_dsbl reg;
> -
> -	reg.field.disabled = 1;
> -
> -	DLB_CSR_WR(hw, DLB_LSP_CQ_DIR_DSBL(port->id), reg.val);
> -
> -	dlb_flush_csr(hw);
> -}
> -
> -
> -
> -void dlb_disable_dp_vasr_feature(struct dlb_hw *hw)
> -{
> -	union dlb_dp_dir_csr_ctrl r0;
> -
> -	r0.val = DLB_CSR_RD(hw, DLB_DP_DIR_CSR_CTRL);
> -
> -	r0.field.cfg_vasr_dis = 1;
> -
> -	DLB_CSR_WR(hw, DLB_DP_DIR_CSR_CTRL, r0.val);
> -}
> -
> -void dlb_enable_excess_tokens_alarm(struct dlb_hw *hw)
> -{
> -	union dlb_chp_cfg_chp_csr_ctrl r0;
> -
> -	r0.val = DLB_CSR_RD(hw, DLB_CHP_CFG_CHP_CSR_CTRL);
> -
> -	r0.val |= 1 << DLB_CHP_CFG_EXCESS_TOKENS_SHIFT;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_CFG_CHP_CSR_CTRL, r0.val);
> -}
> -
> -void dlb_hw_enable_sparse_ldb_cq_mode(struct dlb_hw *hw)
> -{
> -	union dlb_sys_cq_mode r0;
> -
> -	r0.val = DLB_CSR_RD(hw, DLB_SYS_CQ_MODE);
> -
> -	r0.field.ldb_cq64 = 1;
> -
> -	DLB_CSR_WR(hw, DLB_SYS_CQ_MODE, r0.val);
> -}
> -
> -void dlb_hw_enable_sparse_dir_cq_mode(struct dlb_hw *hw)
> -{
> -	union dlb_sys_cq_mode r0;
> -
> -	r0.val = DLB_CSR_RD(hw, DLB_SYS_CQ_MODE);
> -
> -	r0.field.dir_cq64 = 1;
> -
> -	DLB_CSR_WR(hw, DLB_SYS_CQ_MODE, r0.val);
> -}
> -
> -void dlb_hw_disable_pf_to_vf_isr_pend_err(struct dlb_hw *hw)
> -{
> -	union dlb_sys_sys_alarm_int_enable r0;
> -
> -	r0.val = DLB_CSR_RD(hw, DLB_SYS_SYS_ALARM_INT_ENABLE);
> -
> -	r0.field.pf_to_vf_isr_pend_error = 0;
> -
> -	DLB_CSR_WR(hw, DLB_SYS_SYS_ALARM_INT_ENABLE, r0.val);
> -}
> -
> -static unsigned int
> -dlb_get_num_ports_in_use(struct dlb_hw *hw)
> -{
> -	unsigned int i, n = 0;
> -
> -	for (i = 0; i < DLB_MAX_NUM_LDB_PORTS; i++)
> -		if (hw->rsrcs.ldb_ports[i].owned)
> -			n++;
> -
> -	for (i = 0; i < DLB_MAX_NUM_DIR_PORTS; i++)
> -		if (hw->rsrcs.dir_pq_pairs[i].owned)
> -			n++;
> -
> -	return n;
> -}
> -
> -static bool dlb_port_find_slot(struct dlb_ldb_port *port,
> -			       enum dlb_qid_map_state state,
> -			       int *slot)
> -{
> -	int i;
> -
> -	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
> -		if (port->qid_map[i].state == state)
> -			break;
> -	}
> -
> -	*slot = i;
> -
> -	return (i < DLB_MAX_NUM_QIDS_PER_LDB_CQ);
> -}
> -
> -static bool dlb_port_find_slot_queue(struct dlb_ldb_port *port,
> -				     enum dlb_qid_map_state state,
> -				     struct dlb_ldb_queue *queue,
> -				     int *slot)
> -{
> -	int i;
> -
> -	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
> -		if (port->qid_map[i].state == state &&
> -		    port->qid_map[i].qid == queue->id)
> -			break;
> -	}
> -
> -	*slot = i;
> -
> -	return (i < DLB_MAX_NUM_QIDS_PER_LDB_CQ);
> -}
> -
> -static int dlb_port_slot_state_transition(struct dlb_hw *hw,
> -					  struct dlb_ldb_port *port,
> -					  struct dlb_ldb_queue *queue,
> -					  int slot,
> -					  enum dlb_qid_map_state new_state)
> -{
> -	enum dlb_qid_map_state curr_state = port->qid_map[slot].state;
> -	struct dlb_domain *domain;
> -
> -	domain = dlb_get_domain_from_id(hw, port->domain_id);
> -	if (domain == NULL) {
> -		DLB_HW_ERR(hw,
> -			   "[%s()] Internal error: unable to find domain %d\n",
> -			   __func__, port->domain_id);
> -		return -EFAULT;
> -	}
> -
> -	switch (curr_state) {
> -	case DLB_QUEUE_UNMAPPED:
> -		switch (new_state) {
> -		case DLB_QUEUE_MAPPED:
> -			queue->num_mappings++;
> -			port->num_mappings++;
> -			break;
> -		case DLB_QUEUE_MAP_IN_PROGRESS:
> -			queue->num_pending_additions++;
> -			domain->num_pending_additions++;
> -			break;
> -		default:
> -			goto error;
> -		}
> -		break;
> -	case DLB_QUEUE_MAPPED:
> -		switch (new_state) {
> -		case DLB_QUEUE_UNMAPPED:
> -			queue->num_mappings--;
> -			port->num_mappings--;
> -			break;
> -		case DLB_QUEUE_UNMAP_IN_PROGRESS:
> -			port->num_pending_removals++;
> -			domain->num_pending_removals++;
> -			break;
> -		case DLB_QUEUE_MAPPED:
> -			/* Priority change, nothing to update */
> -			break;
> -		default:
> -			goto error;
> -		}
> -		break;
> -	case DLB_QUEUE_MAP_IN_PROGRESS:
> -		switch (new_state) {
> -		case DLB_QUEUE_UNMAPPED:
> -			queue->num_pending_additions--;
> -			domain->num_pending_additions--;
> -			break;
> -		case DLB_QUEUE_MAPPED:
> -			queue->num_mappings++;
> -			port->num_mappings++;
> -			queue->num_pending_additions--;
> -			domain->num_pending_additions--;
> -			break;
> -		default:
> -			goto error;
> -		}
> -		break;
> -	case DLB_QUEUE_UNMAP_IN_PROGRESS:
> -		switch (new_state) {
> -		case DLB_QUEUE_UNMAPPED:
> -			port->num_pending_removals--;
> -			domain->num_pending_removals--;
> -			queue->num_mappings--;
> -			port->num_mappings--;
> -			break;
> -		case DLB_QUEUE_MAPPED:
> -			port->num_pending_removals--;
> -			domain->num_pending_removals--;
> -			break;
> -		case DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP:
> -			/* Nothing to update */
> -			break;
> -		default:
> -			goto error;
> -		}
> -		break;
> -	case DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP:
> -		switch (new_state) {
> -		case DLB_QUEUE_UNMAP_IN_PROGRESS:
> -			/* Nothing to update */
> -			break;
> -		case DLB_QUEUE_UNMAPPED:
> -			/* An UNMAP_IN_PROGRESS_PENDING_MAP slot briefly
> -			 * becomes UNMAPPED before it transitions to
> -			 * MAP_IN_PROGRESS.
> -			 */
> -			queue->num_mappings--;
> -			port->num_mappings--;
> -			port->num_pending_removals--;
> -			domain->num_pending_removals--;
> -			break;
> -		default:
> -			goto error;
> -		}
> -		break;
> -	default:
> -		goto error;
> -	}
> -
> -	port->qid_map[slot].state = new_state;
> -
> -	DLB_HW_INFO(hw,
> -		    "[%s()] queue %d -> port %d state transition (%d -> %d)\n",
> -		    __func__, queue->id, port->id, curr_state,
> -		    new_state);
> -	return 0;
> -
> -error:
> -	DLB_HW_ERR(hw,
> -		   "[%s()] Internal error: invalid queue %d -> port %d state transition
> (%d -> %d)\n",
> -		   __func__, queue->id, port->id, curr_state,
> -		   new_state);
> -	return -EFAULT;
> -}
> -
> -/* dlb_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as
> their
> - * function names imply, and should only be called by the dynamic CQ
> mapping
> - * code.
> - */
> -static void dlb_ldb_queue_disable_mapped_cqs(struct dlb_hw *hw,
> -					     struct dlb_domain *domain,
> -					     struct dlb_ldb_queue *queue)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_ldb_port *port;
> -	int slot;
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
> -		enum dlb_qid_map_state state = DLB_QUEUE_MAPPED;
> -
> -		if (!dlb_port_find_slot_queue(port, state, queue, &slot))
> -			continue;
> -
> -		if (port->enabled)
> -			dlb_ldb_port_cq_disable(hw, port);
> -	}
> -}
> -
> -static void dlb_ldb_queue_enable_mapped_cqs(struct dlb_hw *hw,
> -					    struct dlb_domain *domain,
> -					    struct dlb_ldb_queue *queue)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_ldb_port *port;
> -	int slot;
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
> -		enum dlb_qid_map_state state = DLB_QUEUE_MAPPED;
> -
> -		if (!dlb_port_find_slot_queue(port, state, queue, &slot))
> -			continue;
> -
> -		if (port->enabled)
> -			dlb_ldb_port_cq_enable(hw, port);
> -	}
> -}
> -
> -static int dlb_ldb_port_map_qid_static(struct dlb_hw *hw,
> -				       struct dlb_ldb_port *p,
> -				       struct dlb_ldb_queue *q,
> -				       u8 priority)
> -{
> -	union dlb_lsp_cq2priov r0;
> -	union dlb_lsp_cq2qid r1;
> -	union dlb_atm_pipe_qid_ldb_qid2cqidx r2;
> -	union dlb_lsp_qid_ldb_qid2cqidx r3;
> -	union dlb_lsp_qid_ldb_qid2cqidx2 r4;
> -	enum dlb_qid_map_state state;
> -	int i;
> -
> -	/* Look for a pending or already mapped slot, else an unused slot */
> -	if (!dlb_port_find_slot_queue(p, DLB_QUEUE_MAP_IN_PROGRESS, q, &i) &&
> -	    !dlb_port_find_slot_queue(p, DLB_QUEUE_MAPPED, q, &i) &&
> -	    !dlb_port_find_slot(p, DLB_QUEUE_UNMAPPED, &i)) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: CQ has no available QID mapping slots\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: port slot tracking failed\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	/* Read-modify-write the priority and valid bit register */
> -	r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ2PRIOV(p->id));
> -
> -	r0.field.v |= 1 << i;
> -	r0.field.prio |= (priority & 0x7) << i * 3;
> -
> -	DLB_CSR_WR(hw, DLB_LSP_CQ2PRIOV(p->id), r0.val);
> -
> -	/* Read-modify-write the QID map register */
> -	r1.val = DLB_CSR_RD(hw, DLB_LSP_CQ2QID(p->id, i / 4));
> -
> -	if (i == 0 || i == 4)
> -		r1.field.qid_p0 = q->id;
> -	if (i == 1 || i == 5)
> -		r1.field.qid_p1 = q->id;
> -	if (i == 2 || i == 6)
> -		r1.field.qid_p2 = q->id;
> -	if (i == 3 || i == 7)
> -		r1.field.qid_p3 = q->id;
> -
> -	DLB_CSR_WR(hw, DLB_LSP_CQ2QID(p->id, i / 4), r1.val);
> -
> -	r2.val = DLB_CSR_RD(hw,
> -			    DLB_ATM_PIPE_QID_LDB_QID2CQIDX(q->id,
> -							   p->id / 4));
> -
> -	r3.val = DLB_CSR_RD(hw,
> -			    DLB_LSP_QID_LDB_QID2CQIDX(q->id,
> -						      p->id / 4));
> -
> -	r4.val = DLB_CSR_RD(hw,
> -			    DLB_LSP_QID_LDB_QID2CQIDX2(q->id,
> -						       p->id / 4));
> -
> -	switch (p->id % 4) {
> -	case 0:
> -		r2.field.cq_p0 |= 1 << i;
> -		r3.field.cq_p0 |= 1 << i;
> -		r4.field.cq_p0 |= 1 << i;
> -		break;
> -
> -	case 1:
> -		r2.field.cq_p1 |= 1 << i;
> -		r3.field.cq_p1 |= 1 << i;
> -		r4.field.cq_p1 |= 1 << i;
> -		break;
> -
> -	case 2:
> -		r2.field.cq_p2 |= 1 << i;
> -		r3.field.cq_p2 |= 1 << i;
> -		r4.field.cq_p2 |= 1 << i;
> -		break;
> -
> -	case 3:
> -		r2.field.cq_p3 |= 1 << i;
> -		r3.field.cq_p3 |= 1 << i;
> -		r4.field.cq_p3 |= 1 << i;
> -		break;
> -	}
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_ATM_PIPE_QID_LDB_QID2CQIDX(q->id,
> -						  p->id / 4),
> -		   r2.val);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_LSP_QID_LDB_QID2CQIDX(q->id,
> -					     p->id / 4),
> -		   r3.val);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_LSP_QID_LDB_QID2CQIDX2(q->id,
> -					      p->id / 4),
> -		   r4.val);
> -
> -	dlb_flush_csr(hw);
> -
> -	p->qid_map[i].qid = q->id;
> -	p->qid_map[i].priority = priority;
> -
> -	state = DLB_QUEUE_MAPPED;
> -
> -	return dlb_port_slot_state_transition(hw, p, q, i, state);
> -}
> -
> -static int dlb_ldb_port_set_has_work_bits(struct dlb_hw *hw,
> -					  struct dlb_ldb_port *port,
> -					  struct dlb_ldb_queue *queue,
> -					  int slot)
> -{
> -	union dlb_lsp_qid_aqed_active_cnt r0;
> -	union dlb_lsp_qid_ldb_enqueue_cnt r1;
> -	union dlb_lsp_ldb_sched_ctrl r2 = { {0} };
> -
> -	/* Set the atomic scheduling haswork bit */
> -	r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_AQED_ACTIVE_CNT(queue->id));
> -
> -	r2.field.cq = port->id;
> -	r2.field.qidix = slot;
> -	r2.field.value = 1;
> -	r2.field.rlist_haswork_v = r0.field.count > 0;
> -
> -	/* Set the non-atomic scheduling haswork bit */
> -	DLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r2.val);
> -
> -	r1.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_ENQUEUE_CNT(queue->id));
> -
> -	memset(&r2, 0, sizeof(r2));
> -
> -	r2.field.cq = port->id;
> -	r2.field.qidix = slot;
> -	r2.field.value = 1;
> -	r2.field.nalb_haswork_v = (r1.field.count > 0);
> -
> -	DLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r2.val);
> -
> -	dlb_flush_csr(hw);
> -
> -	return 0;
> -}
> -
> -static void dlb_ldb_port_clear_queue_if_status(struct dlb_hw *hw,
> -					       struct dlb_ldb_port *port,
> -					       int slot)
> -{
> -	union dlb_lsp_ldb_sched_ctrl r0 = { {0} };
> -
> -	r0.field.cq = port->id;
> -	r0.field.qidix = slot;
> -	r0.field.value = 0;
> -	r0.field.inflight_ok_v = 1;
> -
> -	DLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r0.val);
> -
> -	dlb_flush_csr(hw);
> -}
> -
> -static void dlb_ldb_port_set_queue_if_status(struct dlb_hw *hw,
> -					     struct dlb_ldb_port *port,
> -					     int slot)
> -{
> -	union dlb_lsp_ldb_sched_ctrl r0 = { {0} };
> -
> -	r0.field.cq = port->id;
> -	r0.field.qidix = slot;
> -	r0.field.value = 1;
> -	r0.field.inflight_ok_v = 1;
> -
> -	DLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r0.val);
> -
> -	dlb_flush_csr(hw);
> -}
> -
> -static void dlb_ldb_queue_set_inflight_limit(struct dlb_hw *hw,
> -					     struct dlb_ldb_queue *queue)
> -{
> -	union dlb_lsp_qid_ldb_infl_lim r0 = { {0} };
> -
> -	r0.field.limit = queue->num_qid_inflights;
> -
> -	DLB_CSR_WR(hw, DLB_LSP_QID_LDB_INFL_LIM(queue->id), r0.val);
> -}
> -
> -static void dlb_ldb_queue_clear_inflight_limit(struct dlb_hw *hw,
> -					       struct dlb_ldb_queue *queue)
> -{
> -	DLB_CSR_WR(hw,
> -		   DLB_LSP_QID_LDB_INFL_LIM(queue->id),
> -		   DLB_LSP_QID_LDB_INFL_LIM_RST);
> -}
> -
> -static int dlb_ldb_port_finish_map_qid_dynamic(struct dlb_hw *hw,
> -					       struct dlb_domain *domain,
> -					       struct dlb_ldb_port *port,
> -					       struct dlb_ldb_queue *queue)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	union dlb_lsp_qid_ldb_infl_cnt r0;
> -	enum dlb_qid_map_state state;
> -	int slot, ret;
> -	u8 prio;
> -
> -	r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(queue->id));
> -
> -	if (r0.field.count) {
> -		DLB_HW_ERR(hw,
> -			   "[%s()] Internal error: non-zero QID inflight count\n",
> -			   __func__);
> -		return -EFAULT;
> -	}
> -
> -	/* For each port with a pending mapping to this queue, perform the
> -	 * static mapping and set the corresponding has_work bits.
> -	 */
> -	state = DLB_QUEUE_MAP_IN_PROGRESS;
> -	if (!dlb_port_find_slot_queue(port, state, queue, &slot))
> -		return -EINVAL;
> -
> -	if (slot >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: port slot tracking failed\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	prio = port->qid_map[slot].priority;
> -
> -	/* Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and
> -	 * the port's qid_map state.
> -	 */
> -	ret = dlb_ldb_port_map_qid_static(hw, port, queue, prio);
> -	if (ret)
> -		return ret;
> -
> -	ret = dlb_ldb_port_set_has_work_bits(hw, port, queue, slot);
> -	if (ret)
> -		return ret;
> -
> -	/* Ensure IF_status(cq,qid) is 0 before enabling the port to
> -	 * prevent spurious schedules to cause the queue's inflight
> -	 * count to increase.
> -	 */
> -	dlb_ldb_port_clear_queue_if_status(hw, port, slot);
> -
> -	/* Reset the queue's inflight status */
> -	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
> -		state = DLB_QUEUE_MAPPED;
> -		if (!dlb_port_find_slot_queue(port, state, queue, &slot))
> -			continue;
> -
> -		dlb_ldb_port_set_queue_if_status(hw, port, slot);
> -	}
> -
> -	dlb_ldb_queue_set_inflight_limit(hw, queue);
> -
> -	/* Re-enable CQs mapped to this queue */
> -	dlb_ldb_queue_enable_mapped_cqs(hw, domain, queue);
> -
> -	/* If this queue has other mappings pending, clear its inflight limit */
> -	if (queue->num_pending_additions > 0)
> -		dlb_ldb_queue_clear_inflight_limit(hw, queue);
> -
> -	return 0;
> -}
> -
> -/**
> - * dlb_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping
> - * @hw: dlb_hw handle for a particular device.
> - * @port: load-balanced port
> - * @queue: load-balanced queue
> - * @priority: queue servicing priority
> - *
> - * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to
> occur
> - * at a later point, and <0 if an error occurred.
> - */
> -static int dlb_ldb_port_map_qid_dynamic(struct dlb_hw *hw,
> -					struct dlb_ldb_port *port,
> -					struct dlb_ldb_queue *queue,
> -					u8 priority)
> -{
> -	union dlb_lsp_qid_ldb_infl_cnt r0 = { {0} };
> -	enum dlb_qid_map_state state;
> -	struct dlb_domain *domain;
> -	int slot, ret;
> -
> -	domain = dlb_get_domain_from_id(hw, port->domain_id);
> -	if (domain == NULL) {
> -		DLB_HW_ERR(hw,
> -			   "[%s()] Internal error: unable to find domain %d\n",
> -			   __func__, port->domain_id);
> -		return -EFAULT;
> -	}
> -
> -	/* Set the QID inflight limit to 0 to prevent further scheduling of the
> -	 * queue.
> -	 */
> -	DLB_CSR_WR(hw, DLB_LSP_QID_LDB_INFL_LIM(queue->id), 0);
> -
> -	if (!dlb_port_find_slot(port, DLB_QUEUE_UNMAPPED, &slot)) {
> -		DLB_HW_ERR(hw,
> -			   "Internal error: No available unmapped slots\n");
> -		return -EFAULT;
> -	}
> -
> -	if (slot >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: port slot tracking failed\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	port->qid_map[slot].qid = queue->id;
> -	port->qid_map[slot].priority = priority;
> -
> -	state = DLB_QUEUE_MAP_IN_PROGRESS;
> -	ret = dlb_port_slot_state_transition(hw, port, queue, slot, state);
> -	if (ret)
> -		return ret;
> -
> -	r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(queue->id));
> -
> -	if (r0.field.count) {
> -		/* The queue is owed completions so it's not safe to map it
> -		 * yet. Schedule a kernel thread to complete the mapping later,
> -		 * once software has completed all the queue's inflight events.
> -		 */
> -		if (!os_worker_active(hw))
> -			os_schedule_work(hw);
> -
> -		return 1;
> -	}
> -
> -	/* Disable the affected CQ, and the CQs already mapped to the QID,
> -	 * before reading the QID's inflight count a second time. There is an
> -	 * unlikely race in which the QID may schedule one more QE after we
> -	 * read an inflight count of 0, and disabling the CQs guarantees that
> -	 * the race will not occur after a re-read of the inflight count
> -	 * register.
> -	 */
> -	if (port->enabled)
> -		dlb_ldb_port_cq_disable(hw, port);
> -
> -	dlb_ldb_queue_disable_mapped_cqs(hw, domain, queue);
> -
> -	r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(queue->id));
> -
> -	if (r0.field.count) {
> -		if (port->enabled)
> -			dlb_ldb_port_cq_enable(hw, port);
> -
> -		dlb_ldb_queue_enable_mapped_cqs(hw, domain, queue);
> -
> -		/* The queue is owed completions so it's not safe to map it
> -		 * yet. Schedule a kernel thread to complete the mapping later,
> -		 * once software has completed all the queue's inflight events.
> -		 */
> -		if (!os_worker_active(hw))
> -			os_schedule_work(hw);
> -
> -		return 1;
> -	}
> -
> -	return dlb_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
> -}
> -
> -
> -static int dlb_ldb_port_map_qid(struct dlb_hw *hw,
> -				struct dlb_domain *domain,
> -				struct dlb_ldb_port *port,
> -				struct dlb_ldb_queue *queue,
> -				u8 prio)
> -{
> -	if (domain->started)
> -		return dlb_ldb_port_map_qid_dynamic(hw, port, queue, prio);
> -	else
> -		return dlb_ldb_port_map_qid_static(hw, port, queue, prio);
> -}
> -
> -static int dlb_ldb_port_unmap_qid(struct dlb_hw *hw,
> -				  struct dlb_ldb_port *port,
> -				  struct dlb_ldb_queue *queue)
> -{
> -	enum dlb_qid_map_state mapped, in_progress, pending_map, unmapped;
> -	union dlb_lsp_cq2priov r0;
> -	union dlb_atm_pipe_qid_ldb_qid2cqidx r1;
> -	union dlb_lsp_qid_ldb_qid2cqidx r2;
> -	union dlb_lsp_qid_ldb_qid2cqidx2 r3;
> -	u32 queue_id;
> -	u32 port_id;
> -	int i;
> -
> -	/* Find the queue's slot */
> -	mapped = DLB_QUEUE_MAPPED;
> -	in_progress = DLB_QUEUE_UNMAP_IN_PROGRESS;
> -	pending_map = DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP;
> -
> -	if (!dlb_port_find_slot_queue(port, mapped, queue, &i) &&
> -	    !dlb_port_find_slot_queue(port, in_progress, queue, &i) &&
> -	    !dlb_port_find_slot_queue(port, pending_map, queue, &i)) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: QID %d isn't mapped\n",
> -			   __func__, __LINE__, queue->id);
> -		return -EFAULT;
> -	}
> -
> -	if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: port slot tracking failed\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	port_id = port->id;
> -	queue_id = queue->id;
> -
> -	/* Read-modify-write the priority and valid bit register */
> -	r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ2PRIOV(port_id));
> -
> -	r0.field.v &= ~(1 << i);
> -
> -	DLB_CSR_WR(hw, DLB_LSP_CQ2PRIOV(port_id), r0.val);
> -
> -	r1.val = DLB_CSR_RD(hw,
> -			    DLB_ATM_PIPE_QID_LDB_QID2CQIDX(queue_id,
> -							   port_id / 4));
> -
> -	r2.val = DLB_CSR_RD(hw,
> -			    DLB_LSP_QID_LDB_QID2CQIDX(queue_id,
> -						      port_id / 4));
> -
> -	r3.val = DLB_CSR_RD(hw,
> -			    DLB_LSP_QID_LDB_QID2CQIDX2(queue_id,
> -						       port_id / 4));
> -
> -	switch (port_id % 4) {
> -	case 0:
> -		r1.field.cq_p0 &= ~(1 << i);
> -		r2.field.cq_p0 &= ~(1 << i);
> -		r3.field.cq_p0 &= ~(1 << i);
> -		break;
> -
> -	case 1:
> -		r1.field.cq_p1 &= ~(1 << i);
> -		r2.field.cq_p1 &= ~(1 << i);
> -		r3.field.cq_p1 &= ~(1 << i);
> -		break;
> -
> -	case 2:
> -		r1.field.cq_p2 &= ~(1 << i);
> -		r2.field.cq_p2 &= ~(1 << i);
> -		r3.field.cq_p2 &= ~(1 << i);
> -		break;
> -
> -	case 3:
> -		r1.field.cq_p3 &= ~(1 << i);
> -		r2.field.cq_p3 &= ~(1 << i);
> -		r3.field.cq_p3 &= ~(1 << i);
> -		break;
> -	}
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_ATM_PIPE_QID_LDB_QID2CQIDX(queue_id, port_id / 4),
> -		   r1.val);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_LSP_QID_LDB_QID2CQIDX(queue_id, port_id / 4),
> -		   r2.val);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_LSP_QID_LDB_QID2CQIDX2(queue_id, port_id / 4),
> -		   r3.val);
> -
> -	dlb_flush_csr(hw);
> -
> -	unmapped = DLB_QUEUE_UNMAPPED;
> -
> -	return dlb_port_slot_state_transition(hw, port, queue, i, unmapped);
> -}
> -
> -static int
> -dlb_verify_create_sched_domain_args(struct dlb_hw *hw,
> -				    struct dlb_function_resources *rsrcs,
> -				    struct dlb_create_sched_domain_args *args,
> -				    struct dlb_cmd_response *resp)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_bitmap *ldb_credit_freelist;
> -	struct dlb_bitmap *dir_credit_freelist;
> -	unsigned int ldb_credit_freelist_count;
> -	unsigned int dir_credit_freelist_count;
> -	unsigned int max_contig_aqed_entries;
> -	unsigned int max_contig_dqed_entries;
> -	unsigned int max_contig_qed_entries;
> -	unsigned int max_contig_hl_entries;
> -	struct dlb_bitmap *aqed_freelist;
> -	enum dlb_dev_revision revision;
> -
> -	ldb_credit_freelist = rsrcs->avail_qed_freelist_entries;
> -	dir_credit_freelist = rsrcs->avail_dqed_freelist_entries;
> -	aqed_freelist = rsrcs->avail_aqed_freelist_entries;
> -
> -	ldb_credit_freelist_count = dlb_bitmap_count(ldb_credit_freelist);
> -	dir_credit_freelist_count = dlb_bitmap_count(dir_credit_freelist);
> -
> -	max_contig_hl_entries =
> -		dlb_bitmap_longest_set_range(rsrcs->avail_hist_list_entries);
> -	max_contig_aqed_entries =
> -		dlb_bitmap_longest_set_range(aqed_freelist);
> -	max_contig_qed_entries =
> -		dlb_bitmap_longest_set_range(ldb_credit_freelist);
> -	max_contig_dqed_entries =
> -		dlb_bitmap_longest_set_range(dir_credit_freelist);
> -
> -	if (rsrcs->num_avail_domains < 1)
> -		resp->status = DLB_ST_DOMAIN_UNAVAILABLE;
> -	else if (rsrcs->num_avail_ldb_queues < args->num_ldb_queues)
> -		resp->status = DLB_ST_LDB_QUEUES_UNAVAILABLE;
> -	else if (rsrcs->num_avail_ldb_ports < args->num_ldb_ports)
> -		resp->status = DLB_ST_LDB_PORTS_UNAVAILABLE;
> -	else if (args->num_ldb_queues > 0 && args->num_ldb_ports == 0)
> -		resp->status = DLB_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES;
> -	else if (rsrcs->num_avail_dir_pq_pairs < args->num_dir_ports)
> -		resp->status = DLB_ST_DIR_PORTS_UNAVAILABLE;
> -	else if (ldb_credit_freelist_count < args->num_ldb_credits)
> -		resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;
> -	else if (dir_credit_freelist_count < args->num_dir_credits)
> -		resp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;
> -	else if (rsrcs->num_avail_ldb_credit_pools < args->num_ldb_credit_pools)
> -		resp->status = DLB_ST_LDB_CREDIT_POOLS_UNAVAILABLE;
> -	else if (rsrcs->num_avail_dir_credit_pools < args->num_dir_credit_pools)
> -		resp->status = DLB_ST_DIR_CREDIT_POOLS_UNAVAILABLE;
> -	else if (max_contig_hl_entries < args->num_hist_list_entries)
> -		resp->status = DLB_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
> -	else if (max_contig_aqed_entries < args->num_atomic_inflights)
> -		resp->status = DLB_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
> -	else if (max_contig_qed_entries < args->num_ldb_credits)
> -		resp->status = DLB_ST_QED_FREELIST_ENTRIES_UNAVAILABLE;
> -	else if (max_contig_dqed_entries < args->num_dir_credits)
> -		resp->status = DLB_ST_DQED_FREELIST_ENTRIES_UNAVAILABLE;
> -
> -	/* DLB A-stepping workaround for hardware write buffer lock up issue:
> -	 * limit the maximum configured ports to less than 128 and disable CQ
> -	 * occupancy interrupts.
> -	 */
> -	revision = os_get_dev_revision(hw);
> -
> -	if (revision < DLB_B0) {
> -		u32 n = dlb_get_num_ports_in_use(hw);
> -
> -		n += args->num_ldb_ports + args->num_dir_ports;
> -
> -		if (n >= DLB_A_STEP_MAX_PORTS)
> -			resp->status = args->num_ldb_ports ?
> -				DLB_ST_LDB_PORTS_UNAVAILABLE :
> -				DLB_ST_DIR_PORTS_UNAVAILABLE;
> -	}
> -
> -	if (resp->status)
> -		return -1;
> -
> -	return 0;
> -}
> -
> -
> -static void
> -dlb_log_create_sched_domain_args(struct dlb_hw *hw,
> -				 struct dlb_create_sched_domain_args *args)
> -{
> -	DLB_HW_INFO(hw, "DLB create sched domain arguments:\n");
> -	DLB_HW_INFO(hw, "\tNumber of LDB queues:        %d\n",
> -		    args->num_ldb_queues);
> -	DLB_HW_INFO(hw, "\tNumber of LDB ports:         %d\n",
> -		    args->num_ldb_ports);
> -	DLB_HW_INFO(hw, "\tNumber of DIR ports:         %d\n",
> -		    args->num_dir_ports);
> -	DLB_HW_INFO(hw, "\tNumber of ATM inflights:     %d\n",
> -		    args->num_atomic_inflights);
> -	DLB_HW_INFO(hw, "\tNumber of hist list entries: %d\n",
> -		    args->num_hist_list_entries);
> -	DLB_HW_INFO(hw, "\tNumber of LDB credits:       %d\n",
> -		    args->num_ldb_credits);
> -	DLB_HW_INFO(hw, "\tNumber of DIR credits:       %d\n",
> -		    args->num_dir_credits);
> -	DLB_HW_INFO(hw, "\tNumber of LDB credit pools:  %d\n",
> -		    args->num_ldb_credit_pools);
> -	DLB_HW_INFO(hw, "\tNumber of DIR credit pools:  %d\n",
> -		    args->num_dir_credit_pools);
> -}
> -
> -/**
> - * dlb_hw_create_sched_domain() - Allocate and initialize a DLB scheduling
> - *	domain and its resources.
> - * @hw:	  Contains the current state of the DLB hardware.
> - * @args: User-provided arguments.
> - * @resp: Response to user.
> - *
> - * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
> - * satisfy a request, resp->status will be set accordingly.
> - */
> -int dlb_hw_create_sched_domain(struct dlb_hw *hw,
> -			       struct dlb_create_sched_domain_args *args,
> -			       struct dlb_cmd_response *resp)
> -{
> -	struct dlb_domain *domain;
> -	struct dlb_function_resources *rsrcs;
> -	int ret;
> -
> -	rsrcs = &hw->pf;
> -
> -	dlb_log_create_sched_domain_args(hw, args);
> -
> -	/* Verify that hardware resources are available before attempting to
> -	 * satisfy the request. This simplifies the error unwinding code.
> -	 */
> -	if (dlb_verify_create_sched_domain_args(hw, rsrcs, args, resp))
> -		return -EINVAL;
> -
> -	domain = DLB_FUNC_LIST_HEAD(rsrcs->avail_domains, typeof(*domain));
> -
> -	/* Verification should catch this. */
> -	if (domain == NULL) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: no available domains\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	if (domain->configured) {
> -		DLB_HW_ERR(hw,
> -			   "[%s()] Internal error: avail_domains contains configured
> domains.\n",
> -			   __func__);
> -		return -EFAULT;
> -	}
> -
> -	dlb_init_domain_rsrc_lists(domain);
> -
> -	/* Verification should catch this too. */
> -	ret = dlb_domain_attach_resources(hw, rsrcs, domain, args, resp);
> -	if (ret < 0) {
> -		DLB_HW_ERR(hw,
> -			   "[%s()] Internal error: failed to verify args.\n",
> -			   __func__);
> -
> -		return -EFAULT;
> -	}
> -
> -	dlb_list_del(&rsrcs->avail_domains, &domain->func_list);
> -
> -	dlb_list_add(&rsrcs->used_domains, &domain->func_list);
> -
> -	resp->id = domain->id;
> -	resp->status = 0;
> -
> -	return 0;
> -}
> -
> -static void
> -dlb_configure_ldb_credit_pool(struct dlb_hw *hw,
> -			      struct dlb_domain *domain,
> -			      struct dlb_create_ldb_pool_args *args,
> -			      struct dlb_credit_pool *pool)
> -{
> -	union dlb_sys_ldb_pool_enbld r0 = { {0} };
> -	union dlb_chp_ldb_pool_crd_lim r1 = { {0} };
> -	union dlb_chp_ldb_pool_crd_cnt r2 = { {0} };
> -	union dlb_chp_qed_fl_base  r3 = { {0} };
> -	union dlb_chp_qed_fl_lim r4 = { {0} };
> -	union dlb_chp_qed_fl_push_ptr r5 = { {0} };
> -	union dlb_chp_qed_fl_pop_ptr  r6 = { {0} };
> -
> -	r1.field.limit = args->num_ldb_credits;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_LDB_POOL_CRD_LIM(pool->id), r1.val);
> -
> -	r2.field.count = args->num_ldb_credits;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_LDB_POOL_CRD_CNT(pool->id), r2.val);
> -
> -	r3.field.base = domain->qed_freelist.base + domain->qed_freelist.offset;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_QED_FL_BASE(pool->id), r3.val);
> -
> -	r4.field.freelist_disable = 0;
> -	r4.field.limit = r3.field.base + args->num_ldb_credits - 1;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_QED_FL_LIM(pool->id), r4.val);
> -
> -	r5.field.push_ptr = r3.field.base;
> -	r5.field.generation = 1;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_QED_FL_PUSH_PTR(pool->id), r5.val);
> -
> -	r6.field.pop_ptr = r3.field.base;
> -	r6.field.generation = 0;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_QED_FL_POP_PTR(pool->id), r6.val);
> -
> -	r0.field.pool_enabled = 1;
> -
> -	DLB_CSR_WR(hw, DLB_SYS_LDB_POOL_ENBLD(pool->id), r0.val);
> -
> -	pool->avail_credits = args->num_ldb_credits;
> -	pool->total_credits = args->num_ldb_credits;
> -	domain->qed_freelist.offset += args->num_ldb_credits;
> -
> -	pool->configured = true;
> -}
> -
> -static int
> -dlb_verify_create_ldb_pool_args(struct dlb_hw *hw,
> -				u32 domain_id,
> -				struct dlb_create_ldb_pool_args *args,
> -				struct dlb_cmd_response *resp)
> -{
> -	struct dlb_freelist *qed_freelist;
> -	struct dlb_domain *domain;
> -
> -	domain = dlb_get_domain_from_id(hw, domain_id);
> -
> -	if (domain == NULL) {
> -		resp->status = DLB_ST_INVALID_DOMAIN_ID;
> -		return -1;
> -	}
> -
> -	if (!domain->configured) {
> -		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
> -		return -1;
> -	}
> -
> -	qed_freelist = &domain->qed_freelist;
> -
> -	if (dlb_freelist_count(qed_freelist) < args->num_ldb_credits) {
> -		resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;
> -		return -1;
> -	}
> -
> -	if (dlb_list_empty(&domain->avail_ldb_credit_pools)) {
> -		resp->status = DLB_ST_LDB_CREDIT_POOLS_UNAVAILABLE;
> -		return -1;
> -	}
> -
> -	if (domain->started) {
> -		resp->status = DLB_ST_DOMAIN_STARTED;
> -		return -1;
> -	}
> -
> -	return 0;
> -}
> -
> -static void
> -dlb_log_create_ldb_pool_args(struct dlb_hw *hw,
> -			     u32 domain_id,
> -			     struct dlb_create_ldb_pool_args *args)
> -{
> -	DLB_HW_INFO(hw, "DLB create load-balanced credit pool arguments:\n");
> -	DLB_HW_INFO(hw, "\tDomain ID:             %d\n", domain_id);
> -	DLB_HW_INFO(hw, "\tNumber of LDB credits: %d\n",
> -		    args->num_ldb_credits);
> -}
> -
> -/**
> - * dlb_hw_create_ldb_pool() - Allocate and initialize a DLB credit pool.
> - * @hw:	  Contains the current state of the DLB hardware.
> - * @args: User-provided arguments.
> - * @resp: Response to user.
> - *
> - * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
> - * satisfy a request, resp->status will be set accordingly.
> - */
> -int dlb_hw_create_ldb_pool(struct dlb_hw *hw,
> -			   u32 domain_id,
> -			   struct dlb_create_ldb_pool_args *args,
> -			   struct dlb_cmd_response *resp)
> -{
> -	struct dlb_credit_pool *pool;
> -	struct dlb_domain *domain;
> -
> -	dlb_log_create_ldb_pool_args(hw, domain_id, args);
> -
> -	/* Verify that hardware resources are available before attempting to
> -	 * satisfy the request. This simplifies the error unwinding code.
> -	 */
> -	if (dlb_verify_create_ldb_pool_args(hw, domain_id, args, resp))
> -		return -EINVAL;
> -
> -	domain = dlb_get_domain_from_id(hw, domain_id);
> -	if (domain == NULL) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: domain not found\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	pool = DLB_DOM_LIST_HEAD(domain->avail_ldb_credit_pools, typeof(*pool));
> -
> -	/* Verification should catch this. */
> -	if (pool == NULL) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: no available ldb credit pools\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	dlb_configure_ldb_credit_pool(hw, domain, args, pool);
> -
> -	/* Configuration succeeded, so move the resource from the 'avail' to
> -	 * the 'used' list.
> -	 */
> -	dlb_list_del(&domain->avail_ldb_credit_pools, &pool->domain_list);
> -
> -	dlb_list_add(&domain->used_ldb_credit_pools, &pool->domain_list);
> -
> -	resp->status = 0;
> -	resp->id = pool->id;
> -
> -	return 0;
> -}
> -
> -static void
> -dlb_configure_dir_credit_pool(struct dlb_hw *hw,
> -			      struct dlb_domain *domain,
> -			      struct dlb_create_dir_pool_args *args,
> -			      struct dlb_credit_pool *pool)
> -{
> -	union dlb_sys_dir_pool_enbld r0 = { {0} };
> -	union dlb_chp_dir_pool_crd_lim r1 = { {0} };
> -	union dlb_chp_dir_pool_crd_cnt r2 = { {0} };
> -	union dlb_chp_dqed_fl_base  r3 = { {0} };
> -	union dlb_chp_dqed_fl_lim r4 = { {0} };
> -	union dlb_chp_dqed_fl_push_ptr r5 = { {0} };
> -	union dlb_chp_dqed_fl_pop_ptr  r6 = { {0} };
> -
> -	r1.field.limit = args->num_dir_credits;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_DIR_POOL_CRD_LIM(pool->id), r1.val);
> -
> -	r2.field.count = args->num_dir_credits;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_DIR_POOL_CRD_CNT(pool->id), r2.val);
> -
> -	r3.field.base = domain->dqed_freelist.base +
> -			domain->dqed_freelist.offset;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_DQED_FL_BASE(pool->id), r3.val);
> -
> -	r4.field.freelist_disable = 0;
> -	r4.field.limit = r3.field.base + args->num_dir_credits - 1;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_DQED_FL_LIM(pool->id), r4.val);
> -
> -	r5.field.push_ptr = r3.field.base;
> -	r5.field.generation = 1;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_DQED_FL_PUSH_PTR(pool->id), r5.val);
> -
> -	r6.field.pop_ptr = r3.field.base;
> -	r6.field.generation = 0;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_DQED_FL_POP_PTR(pool->id), r6.val);
> -
> -	r0.field.pool_enabled = 1;
> -
> -	DLB_CSR_WR(hw, DLB_SYS_DIR_POOL_ENBLD(pool->id), r0.val);
> -
> -	pool->avail_credits = args->num_dir_credits;
> -	pool->total_credits = args->num_dir_credits;
> -	domain->dqed_freelist.offset += args->num_dir_credits;
> -
> -	pool->configured = true;
> -}
> -
> -static int
> -dlb_verify_create_dir_pool_args(struct dlb_hw *hw,
> -				u32 domain_id,
> -				struct dlb_create_dir_pool_args *args,
> -				struct dlb_cmd_response *resp)
> -{
> -	struct dlb_freelist *dqed_freelist;
> -	struct dlb_domain *domain;
> -
> -	domain = dlb_get_domain_from_id(hw, domain_id);
> -
> -	if (domain == NULL) {
> -		resp->status = DLB_ST_INVALID_DOMAIN_ID;
> -		return -1;
> -	}
> -
> -	if (!domain->configured) {
> -		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
> -		return -1;
> -	}
> -
> -	dqed_freelist = &domain->dqed_freelist;
> -
> -	if (dlb_freelist_count(dqed_freelist) < args->num_dir_credits) {
> -		resp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;
> -		return -1;
> -	}
> -
> -	if (dlb_list_empty(&domain->avail_dir_credit_pools)) {
> -		resp->status = DLB_ST_DIR_CREDIT_POOLS_UNAVAILABLE;
> -		return -1;
> -	}
> -
> -	if (domain->started) {
> -		resp->status = DLB_ST_DOMAIN_STARTED;
> -		return -1;
> -	}
> -
> -	return 0;
> -}
> -
> -static void
> -dlb_log_create_dir_pool_args(struct dlb_hw *hw,
> -			     u32 domain_id,
> -			     struct dlb_create_dir_pool_args *args)
> -{
> -	DLB_HW_INFO(hw, "DLB create directed credit pool arguments:\n");
> -	DLB_HW_INFO(hw, "\tDomain ID:             %d\n", domain_id);
> -	DLB_HW_INFO(hw, "\tNumber of DIR credits: %d\n",
> -		    args->num_dir_credits);
> -}
> -
> -/**
> - * dlb_hw_create_dir_pool() - Allocate and initialize a DLB credit pool.
> - * @hw:	  Contains the current state of the DLB hardware.
> - * @args: User-provided arguments.
> - * @resp: Response to user.
> - *
> - * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
> - * satisfy a request, resp->status will be set accordingly.
> - */
> -int dlb_hw_create_dir_pool(struct dlb_hw *hw,
> -			   u32 domain_id,
> -			   struct dlb_create_dir_pool_args *args,
> -			   struct dlb_cmd_response *resp)
> -{
> -	struct dlb_credit_pool *pool;
> -	struct dlb_domain *domain;
> -
> -	dlb_log_create_dir_pool_args(hw, domain_id, args);
> -
> -	/* Verify that hardware resources are available before attempting to
> -	 * satisfy the request. This simplifies the error unwinding code.
> -	 */
> -	/* At least one available pool */
> -	if (dlb_verify_create_dir_pool_args(hw, domain_id, args, resp))
> -		return -EINVAL;
> -
> -	domain = dlb_get_domain_from_id(hw, domain_id);
> -	if (domain == NULL) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: domain not found\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	pool = DLB_DOM_LIST_HEAD(domain->avail_dir_credit_pools, typeof(*pool));
> -
> -	/* Verification should catch this. */
> -	if (pool == NULL) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: no available dir credit pools\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	dlb_configure_dir_credit_pool(hw, domain, args, pool);
> -
> -	/* Configuration succeeded, so move the resource from the 'avail' to
> -	 * the 'used' list.
> -	 */
> -	dlb_list_del(&domain->avail_dir_credit_pools, &pool->domain_list);
> -
> -	dlb_list_add(&domain->used_dir_credit_pools, &pool->domain_list);
> -
> -	resp->status = 0;
> -	resp->id = pool->id;
> -
> -	return 0;
> -}
> -
> -static u32 dlb_ldb_cq_inflight_count(struct dlb_hw *hw,
> -				     struct dlb_ldb_port *port)
> -{
> -	union dlb_lsp_cq_ldb_infl_cnt r0;
> -
> -	r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ_LDB_INFL_CNT(port->id));
> -
> -	return r0.field.count;
> -}
> -
> -static u32 dlb_ldb_cq_token_count(struct dlb_hw *hw,
> -				  struct dlb_ldb_port *port)
> -{
> -	union dlb_lsp_cq_ldb_tkn_cnt r0;
> -
> -	r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ_LDB_TKN_CNT(port->id));
> -
> -	return r0.field.token_count;
> -}
> -
> -static int dlb_drain_ldb_cq(struct dlb_hw *hw, struct dlb_ldb_port *port)
> -{
> -	u32 infl_cnt, tkn_cnt;
> -	unsigned int i;
> -
> -	infl_cnt = dlb_ldb_cq_inflight_count(hw, port);
> -
> -	/* Account for the initial token count, which is used in order to
> -	 * provide a CQ with depth less than 8.
> -	 */
> -	tkn_cnt = dlb_ldb_cq_token_count(hw, port) - port->init_tkn_cnt;
> -
> -	if (infl_cnt || tkn_cnt) {
> -		struct dlb_hcw hcw_mem[8], *hcw;
> -		void  *pp_addr;
> -
> -		pp_addr = os_map_producer_port(hw, port->id, true);
> -
> -		/* Point hcw to a 64B-aligned location */
> -		hcw = (struct dlb_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
> -
> -		/* Program the first HCW for a completion and token return and
> -		 * the other HCWs as NOOPS
> -		 */
> -
> -		memset(hcw, 0, 4 * sizeof(*hcw));
> -		hcw->qe_comp = (infl_cnt > 0);
> -		hcw->cq_token = (tkn_cnt > 0);
> -		hcw->lock_id = tkn_cnt - 1;
> -
> -		/* Return tokens in the first HCW */
> -		dlb_movdir64b(pp_addr, hcw);
> -
> -		hcw->cq_token = 0;
> -
> -		/* Issue remaining completions (if any) */
> -		for (i = 1; i < infl_cnt; i++)
> -			dlb_movdir64b(pp_addr, hcw);
> -
> -		os_fence_hcw(hw, pp_addr);
> -
> -		os_unmap_producer_port(hw, pp_addr);
> -	}
> -
> -	return 0;
> -}
> -
> -static int dlb_domain_drain_ldb_cqs(struct dlb_hw *hw,
> -				    struct dlb_domain *domain,
> -				    bool toggle_port)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_ldb_port *port;
> -	int ret;
> -
> -	/* If the domain hasn't been started, there's no traffic to drain */
> -	if (!domain->started)
> -		return 0;
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
> -		if (toggle_port)
> -			dlb_ldb_port_cq_disable(hw, port);
> -
> -		ret = dlb_drain_ldb_cq(hw, port);
> -		if (ret < 0)
> -			return ret;
> -
> -		if (toggle_port)
> -			dlb_ldb_port_cq_enable(hw, port);
> -	}
> -
> -	return 0;
> -}
> -
> -static void dlb_domain_disable_ldb_queue_write_perms(struct dlb_hw *hw,
> -						     struct dlb_domain *domain)
> -{
> -	int domain_offset = domain->id * DLB_MAX_NUM_LDB_QUEUES;
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	union dlb_sys_ldb_vasqid_v r0;
> -	struct dlb_ldb_queue *queue;
> -
> -	r0.field.vasqid_v = 0;
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
> -		int idx = domain_offset + queue->id;
> -
> -		DLB_CSR_WR(hw, DLB_SYS_LDB_VASQID_V(idx), r0.val);
> -	}
> -}
> -
> -static void dlb_domain_disable_ldb_seq_checks(struct dlb_hw *hw,
> -					      struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	union dlb_chp_sn_chk_enbl r1;
> -	struct dlb_ldb_port *port;
> -
> -	r1.field.en = 0;
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
> -		DLB_CSR_WR(hw,
> -			   DLB_CHP_SN_CHK_ENBL(port->id),
> -			   r1.val);
> -}
> -
> -static void dlb_domain_disable_ldb_port_crd_updates(struct dlb_hw *hw,
> -						    struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	union dlb_chp_ldb_pp_crd_req_state r0;
> -	struct dlb_ldb_port *port;
> -
> -	r0.field.no_pp_credit_update = 1;
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
> -		DLB_CSR_WR(hw,
> -			   DLB_CHP_LDB_PP_CRD_REQ_STATE(port->id),
> -			   r0.val);
> -}
> -
> -static void dlb_domain_disable_ldb_port_interrupts(struct dlb_hw *hw,
> -						   struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	union dlb_chp_ldb_cq_int_enb r0 = { {0} };
> -	union dlb_chp_ldb_cq_wd_enb r1 = { {0} };
> -	struct dlb_ldb_port *port;
> -
> -	r0.field.en_tim = 0;
> -	r0.field.en_depth = 0;
> -
> -	r1.field.wd_enable = 0;
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
> -		DLB_CSR_WR(hw,
> -			   DLB_CHP_LDB_CQ_INT_ENB(port->id),
> -			   r0.val);
> -
> -		DLB_CSR_WR(hw,
> -			   DLB_CHP_LDB_CQ_WD_ENB(port->id),
> -			   r1.val);
> -	}
> -}
> -
> -static void dlb_domain_disable_dir_queue_write_perms(struct dlb_hw *hw,
> -						     struct dlb_domain *domain)
> -{
> -	int domain_offset = domain->id * DLB_MAX_NUM_DIR_PORTS;
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	union dlb_sys_dir_vasqid_v r0;
> -	struct dlb_dir_pq_pair *port;
> -
> -	r0.field.vasqid_v = 0;
> -
> -	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
> -		int idx = domain_offset + port->id;
> -
> -		DLB_CSR_WR(hw, DLB_SYS_DIR_VASQID_V(idx), r0.val);
> -	}
> -}
> -
> -static void dlb_domain_disable_dir_port_interrupts(struct dlb_hw *hw,
> -						   struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	union dlb_chp_dir_cq_int_enb r0 = { {0} };
> -	union dlb_chp_dir_cq_wd_enb r1 = { {0} };
> -	struct dlb_dir_pq_pair *port;
> -
> -	r0.field.en_tim = 0;
> -	r0.field.en_depth = 0;
> -
> -	r1.field.wd_enable = 0;
> -
> -	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
> -		DLB_CSR_WR(hw,
> -			   DLB_CHP_DIR_CQ_INT_ENB(port->id),
> -			   r0.val);
> -
> -		DLB_CSR_WR(hw,
> -			   DLB_CHP_DIR_CQ_WD_ENB(port->id),
> -			   r1.val);
> -	}
> -}
> -
> -static void dlb_domain_disable_dir_port_crd_updates(struct dlb_hw *hw,
> -						    struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	union dlb_chp_dir_pp_crd_req_state r0;
> -	struct dlb_dir_pq_pair *port;
> -
> -	r0.field.no_pp_credit_update = 1;
> -
> -	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
> -		DLB_CSR_WR(hw,
> -			   DLB_CHP_DIR_PP_CRD_REQ_STATE(port->id),
> -			   r0.val);
> -}
> -
> -static void dlb_domain_disable_dir_cqs(struct dlb_hw *hw,
> -				       struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_dir_pq_pair *port;
> -
> -	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
> -		port->enabled = false;
> -
> -		dlb_dir_port_cq_disable(hw, port);
> -	}
> -}
> -
> -static void dlb_domain_disable_ldb_cqs(struct dlb_hw *hw,
> -				       struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_ldb_port *port;
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
> -		port->enabled = false;
> -
> -		dlb_ldb_port_cq_disable(hw, port);
> -	}
> -}
> -
> -static void dlb_domain_enable_ldb_cqs(struct dlb_hw *hw,
> -				      struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_ldb_port *port;
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
> -		port->enabled = true;
> -
> -		dlb_ldb_port_cq_enable(hw, port);
> -	}
> -}
> -
> -static struct dlb_ldb_queue *dlb_get_ldb_queue_from_id(struct dlb_hw *hw,
> -						       u32 id)
> -{
> -	if (id >= DLB_MAX_NUM_LDB_QUEUES)
> -		return NULL;
> -
> -	return &hw->rsrcs.ldb_queues[id];
> -}
> -
> -static void dlb_ldb_port_clear_has_work_bits(struct dlb_hw *hw,
> -					     struct dlb_ldb_port *port,
> -					     u8 slot)
> -{
> -	union dlb_lsp_ldb_sched_ctrl r2 = { {0} };
> -
> -	r2.field.cq = port->id;
> -	r2.field.qidix = slot;
> -	r2.field.value = 0;
> -	r2.field.rlist_haswork_v = 1;
> -
> -	DLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r2.val);
> -
> -	memset(&r2, 0, sizeof(r2));
> -
> -	r2.field.cq = port->id;
> -	r2.field.qidix = slot;
> -	r2.field.value = 0;
> -	r2.field.nalb_haswork_v = 1;
> -
> -	DLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r2.val);
> -
> -	dlb_flush_csr(hw);
> -}
> -
> -static void dlb_domain_finish_map_port(struct dlb_hw *hw,
> -				       struct dlb_domain *domain,
> -				       struct dlb_ldb_port *port)
> -{
> -	int i;
> -
> -	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
> -		union dlb_lsp_qid_ldb_infl_cnt r0;
> -		struct dlb_ldb_queue *queue;
> -		int qid;
> -
> -		if (port->qid_map[i].state != DLB_QUEUE_MAP_IN_PROGRESS)
> -			continue;
> -
> -		qid = port->qid_map[i].qid;
> -
> -		queue = dlb_get_ldb_queue_from_id(hw, qid);
> -
> -		if (queue == NULL) {
> -			DLB_HW_ERR(hw,
> -				   "[%s()] Internal error: unable to find queue %d\n",
> -				   __func__, qid);
> -			continue;
> -		}
> -
> -		r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(qid));
> -
> -		if (r0.field.count)
> -			continue;
> -
> -		/* Disable the affected CQ, and the CQs already mapped to the
> -		 * QID, before reading the QID's inflight count a second time.
> -		 * There is an unlikely race in which the QID may schedule one
> -		 * more QE after we read an inflight count of 0, and disabling
> -		 * the CQs guarantees that the race will not occur after a
> -		 * re-read of the inflight count register.
> -		 */
> -		if (port->enabled)
> -			dlb_ldb_port_cq_disable(hw, port);
> -
> -		dlb_ldb_queue_disable_mapped_cqs(hw, domain, queue);
> -
> -		r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(qid));
> -
> -		if (r0.field.count) {
> -			if (port->enabled)
> -				dlb_ldb_port_cq_enable(hw, port);
> -
> -			dlb_ldb_queue_enable_mapped_cqs(hw, domain, queue);
> -
> -			continue;
> -		}
> -
> -		dlb_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
> -	}
> -}
> -
> -static unsigned int
> -dlb_domain_finish_map_qid_procedures(struct dlb_hw *hw,
> -				     struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_ldb_port *port;
> -
> -	if (!domain->configured || domain->num_pending_additions == 0)
> -		return 0;
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
> -		dlb_domain_finish_map_port(hw, domain, port);
> -
> -	return domain->num_pending_additions;
> -}
> -
> -unsigned int dlb_finish_map_qid_procedures(struct dlb_hw *hw)
> -{
> -	int i, num = 0;
> -
> -	/* Finish queue map jobs for any domain that needs it */
> -	for (i = 0; i < DLB_MAX_NUM_DOMAINS; i++) {
> -		struct dlb_domain *domain = &hw->domains[i];
> -
> -		num += dlb_domain_finish_map_qid_procedures(hw, domain);
> -	}
> -
> -	return num;
> -}
> -
> -
> -static int dlb_domain_wait_for_ldb_cqs_to_empty(struct dlb_hw *hw,
> -						struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_ldb_port *port;
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
> -		int i;
> -
> -		for (i = 0; i < DLB_MAX_CQ_COMP_CHECK_LOOPS; i++) {
> -			if (dlb_ldb_cq_inflight_count(hw, port) == 0)
> -				break;
> -		}
> -
> -		if (i == DLB_MAX_CQ_COMP_CHECK_LOOPS) {
> -			DLB_HW_ERR(hw,
> -				   "[%s()] Internal error: failed to flush load-balanced port %d's
> completions.\n",
> -				   __func__, port->id);
> -			return -EFAULT;
> -		}
> -	}
> -
> -	return 0;
> -}
> -
> -
> -static void dlb_domain_finish_unmap_port_slot(struct dlb_hw *hw,
> -					      struct dlb_domain *domain,
> -					      struct dlb_ldb_port *port,
> -					      int slot)
> -{
> -	enum dlb_qid_map_state state;
> -	struct dlb_ldb_queue *queue;
> -
> -	queue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid];
> -
> -	state = port->qid_map[slot].state;
> -
> -	/* Update the QID2CQIDX and CQ2QID vectors */
> -	dlb_ldb_port_unmap_qid(hw, port, queue);
> -
> -	/* Ensure the QID will not be serviced by this {CQ, slot} by clearing
> -	 * the has_work bits
> -	 */
> -	dlb_ldb_port_clear_has_work_bits(hw, port, slot);
> -
> -	/* Reset the {CQ, slot} to its default state */
> -	dlb_ldb_port_set_queue_if_status(hw, port, slot);
> -
> -	/* Re-enable the CQ if it was not manually disabled by the user */
> -	if (port->enabled)
> -		dlb_ldb_port_cq_enable(hw, port);
> -
> -	/* If there is a mapping that is pending this slot's removal, perform
> -	 * the mapping now.
> -	 */
> -	if (state == DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP) {
> -		struct dlb_ldb_port_qid_map *map;
> -		struct dlb_ldb_queue *map_queue;
> -		u8 prio;
> -
> -		map = &port->qid_map[slot];
> -
> -		map->qid = map->pending_qid;
> -		map->priority = map->pending_priority;
> -
> -		map_queue = &hw->rsrcs.ldb_queues[map->qid];
> -		prio = map->priority;
> -
> -		dlb_ldb_port_map_qid(hw, domain, port, map_queue, prio);
> -	}
> -}
> -
> -static bool dlb_domain_finish_unmap_port(struct dlb_hw *hw,
> -					 struct dlb_domain *domain,
> -					 struct dlb_ldb_port *port)
> -{
> -	union dlb_lsp_cq_ldb_infl_cnt r0;
> -	int i;
> -
> -	if (port->num_pending_removals == 0)
> -		return false;
> -
> -	/* The unmap requires all the CQ's outstanding inflights to be
> -	 * completed.
> -	 */
> -	r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ_LDB_INFL_CNT(port->id));
> -	if (r0.field.count > 0)
> -		return false;
> -
> -	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
> -		struct dlb_ldb_port_qid_map *map;
> -
> -		map = &port->qid_map[i];
> -
> -		if (map->state != DLB_QUEUE_UNMAP_IN_PROGRESS &&
> -		    map->state != DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP)
> -			continue;
> -
> -		dlb_domain_finish_unmap_port_slot(hw, domain, port, i);
> -	}
> -
> -	return true;
> -}
> -
> -static unsigned int
> -dlb_domain_finish_unmap_qid_procedures(struct dlb_hw *hw,
> -				       struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_ldb_port *port;
> -
> -	if (!domain->configured || domain->num_pending_removals == 0)
> -		return 0;
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
> -		dlb_domain_finish_unmap_port(hw, domain, port);
> -
> -	return domain->num_pending_removals;
> -}
> -
> -unsigned int dlb_finish_unmap_qid_procedures(struct dlb_hw *hw)
> -{
> -	int i, num = 0;
> -
> -	/* Finish queue unmap jobs for any domain that needs it */
> -	for (i = 0; i < DLB_MAX_NUM_DOMAINS; i++) {
> -		struct dlb_domain *domain = &hw->domains[i];
> -
> -		num += dlb_domain_finish_unmap_qid_procedures(hw, domain);
> -	}
> -
> -	return num;
> -}
> -
> -/* Returns whether the queue is empty, including its inflight and replay
> - * counts.
> - */
> -static bool dlb_ldb_queue_is_empty(struct dlb_hw *hw,
> -				   struct dlb_ldb_queue *queue)
> -{
> -	union dlb_lsp_qid_ldb_replay_cnt r0;
> -	union dlb_lsp_qid_aqed_active_cnt r1;
> -	union dlb_lsp_qid_atq_enqueue_cnt r2;
> -	union dlb_lsp_qid_ldb_enqueue_cnt r3;
> -	union dlb_lsp_qid_ldb_infl_cnt r4;
> -
> -	r0.val = DLB_CSR_RD(hw,
> -			    DLB_LSP_QID_LDB_REPLAY_CNT(queue->id));
> -	if (r0.val)
> -		return false;
> -
> -	r1.val = DLB_CSR_RD(hw,
> -			    DLB_LSP_QID_AQED_ACTIVE_CNT(queue->id));
> -	if (r1.val)
> -		return false;
> -
> -	r2.val = DLB_CSR_RD(hw,
> -			    DLB_LSP_QID_ATQ_ENQUEUE_CNT(queue->id));
> -	if (r2.val)
> -		return false;
> -
> -	r3.val = DLB_CSR_RD(hw,
> -			    DLB_LSP_QID_LDB_ENQUEUE_CNT(queue->id));
> -	if (r3.val)
> -		return false;
> -
> -	r4.val = DLB_CSR_RD(hw,
> -			    DLB_LSP_QID_LDB_INFL_CNT(queue->id));
> -	if (r4.val)
> -		return false;
> -
> -	return true;
> -}
> -
> -static bool dlb_domain_mapped_queues_empty(struct dlb_hw *hw,
> -					   struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_ldb_queue *queue;
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
> -		if (queue->num_mappings == 0)
> -			continue;
> -
> -		if (!dlb_ldb_queue_is_empty(hw, queue))
> -			return false;
> -	}
> -
> -	return true;
> -}
> -
> -static int dlb_domain_drain_mapped_queues(struct dlb_hw *hw,
> -					  struct dlb_domain *domain)
> -{
> -	int i, ret;
> -
> -	/* If the domain hasn't been started, there's no traffic to drain */
> -	if (!domain->started)
> -		return 0;
> -
> -	if (domain->num_pending_removals > 0) {
> -		DLB_HW_ERR(hw,
> -			   "[%s()] Internal error: failed to unmap domain queues\n",
> -			   __func__);
> -		return -EFAULT;
> -	}
> -
> -	for (i = 0; i < DLB_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
> -		ret = dlb_domain_drain_ldb_cqs(hw, domain, true);
> -		if (ret < 0)
> -			return ret;
> -
> -		if (dlb_domain_mapped_queues_empty(hw, domain))
> -			break;
> -	}
> -
> -	if (i == DLB_MAX_QID_EMPTY_CHECK_LOOPS) {
> -		DLB_HW_ERR(hw,
> -			   "[%s()] Internal error: failed to empty queues\n",
> -			   __func__);
> -		return -EFAULT;
> -	}
> -
> -	/* Drain the CQs one more time. For the queues to go empty, they would
> -	 * have scheduled one or more QEs.
> -	 */
> -	ret = dlb_domain_drain_ldb_cqs(hw, domain, true);
> -	if (ret < 0)
> -		return ret;
> -
> -	return 0;
> -}
> -
> -static int dlb_domain_drain_unmapped_queue(struct dlb_hw *hw,
> -					   struct dlb_domain *domain,
> -					   struct dlb_ldb_queue *queue)
> -{
> -	struct dlb_ldb_port *port;
> -	int ret;
> -
> -	/* If a domain has LDB queues, it must have LDB ports */
> -	if (dlb_list_empty(&domain->used_ldb_ports)) {
> -		DLB_HW_ERR(hw,
> -			   "[%s()] Internal error: No configured LDB ports\n",
> -			   __func__);
> -		return -EFAULT;
> -	}
> -
> -	port = DLB_DOM_LIST_HEAD(domain->used_ldb_ports, typeof(*port));
> -
> -	/* If necessary, free up a QID slot in this CQ */
> -	if (port->num_mappings == DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
> -		struct dlb_ldb_queue *mapped_queue;
> -
> -		mapped_queue = &hw->rsrcs.ldb_queues[port->qid_map[0].qid];
> -
> -		ret = dlb_ldb_port_unmap_qid(hw, port, mapped_queue);
> -		if (ret)
> -			return ret;
> -	}
> -
> -	ret = dlb_ldb_port_map_qid_dynamic(hw, port, queue, 0);
> -	if (ret)
> -		return ret;
> -
> -	return dlb_domain_drain_mapped_queues(hw, domain);
> -}
> -
> -static int dlb_domain_drain_unmapped_queues(struct dlb_hw *hw,
> -					    struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_ldb_queue *queue;
> -	int ret;
> -
> -	/* If the domain hasn't been started, there's no traffic to drain */
> -	if (!domain->started)
> -		return 0;
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
> -		if (queue->num_mappings != 0 ||
> -		    dlb_ldb_queue_is_empty(hw, queue))
> -			continue;
> -
> -		ret = dlb_domain_drain_unmapped_queue(hw, domain, queue);
> -		if (ret)
> -			return ret;
> -	}
> -
> -	return 0;
> -}
> -
> -static int dlb_domain_wait_for_ldb_pool_refill(struct dlb_hw *hw,
> -					       struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_credit_pool *pool;
> -
> -	/* Confirm that all credits are returned to the domain's credit pools */
> -	DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter) {
> -		union dlb_chp_qed_fl_push_ptr r0;
> -		union dlb_chp_qed_fl_pop_ptr r1;
> -		unsigned long pop_offs, push_offs;
> -		int i;
> -
> -		push_offs = DLB_CHP_QED_FL_PUSH_PTR(pool->id);
> -		pop_offs = DLB_CHP_QED_FL_POP_PTR(pool->id);
> -
> -		for (i = 0; i < DLB_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
> -			r0.val = DLB_CSR_RD(hw, push_offs);
> -
> -			r1.val = DLB_CSR_RD(hw, pop_offs);
> -
> -			/* Break early if the freelist is replenished */
> -			if (r1.field.pop_ptr == r0.field.push_ptr &&
> -			    r1.field.generation != r0.field.generation) {
> -				break;
> -			}
> -		}
> -
> -		/* Error if the freelist is not full */
> -		if (r1.field.pop_ptr != r0.field.push_ptr ||
> -		    r1.field.generation == r0.field.generation) {
> -			return -EFAULT;
> -		}
> -	}
> -
> -	return 0;
> -}
> -
> -static int dlb_domain_wait_for_dir_pool_refill(struct dlb_hw *hw,
> -					       struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_credit_pool *pool;
> -
> -	/* Confirm that all credits are returned to the domain's credit pools */
> -	DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) {
> -		union dlb_chp_dqed_fl_push_ptr r0;
> -		union dlb_chp_dqed_fl_pop_ptr r1;
> -		unsigned long pop_offs, push_offs;
> -		int i;
> -
> -		push_offs = DLB_CHP_DQED_FL_PUSH_PTR(pool->id);
> -		pop_offs = DLB_CHP_DQED_FL_POP_PTR(pool->id);
> -
> -		for (i = 0; i < DLB_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
> -			r0.val = DLB_CSR_RD(hw, push_offs);
> -
> -			r1.val = DLB_CSR_RD(hw, pop_offs);
> -
> -			/* Break early if the freelist is replenished */
> -			if (r1.field.pop_ptr == r0.field.push_ptr &&
> -			    r1.field.generation != r0.field.generation) {
> -				break;
> -			}
> -		}
> -
> -		/* Error if the freelist is not full */
> -		if (r1.field.pop_ptr != r0.field.push_ptr ||
> -		    r1.field.generation == r0.field.generation) {
> -			return -EFAULT;
> -		}
> -	}
> -
> -	return 0;
> -}
> -
> -static u32 dlb_dir_queue_depth(struct dlb_hw *hw,
> -			       struct dlb_dir_pq_pair *queue)
> -{
> -	union dlb_lsp_qid_dir_enqueue_cnt r0;
> -
> -	r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_DIR_ENQUEUE_CNT(queue->id));
> -
> -	return r0.field.count;
> -}
> -
> -static bool dlb_dir_queue_is_empty(struct dlb_hw *hw,
> -				   struct dlb_dir_pq_pair *queue)
> -{
> -	return dlb_dir_queue_depth(hw, queue) == 0;
> -}
> -
> -static bool dlb_domain_dir_queues_empty(struct dlb_hw *hw,
> -					struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_dir_pq_pair *queue;
> -
> -	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
> -		if (!dlb_dir_queue_is_empty(hw, queue))
> -			return false;
> -	}
> -
> -	return true;
> -}
> -
> -static u32 dlb_dir_cq_token_count(struct dlb_hw *hw,
> -				  struct dlb_dir_pq_pair *port)
> -{
> -	union dlb_lsp_cq_dir_tkn_cnt r0;
> -
> -	r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ_DIR_TKN_CNT(port->id));
> -
> -	return r0.field.count;
> -}
> -
> -static void dlb_drain_dir_cq(struct dlb_hw *hw, struct dlb_dir_pq_pair
> *port)
> -{
> -	unsigned int port_id = port->id;
> -	u32 cnt;
> -
> -	/* Return any outstanding tokens */
> -	cnt = dlb_dir_cq_token_count(hw, port);
> -
> -	if (cnt != 0) {
> -		struct dlb_hcw hcw_mem[8], *hcw;
> -		void  *pp_addr;
> -
> -		pp_addr = os_map_producer_port(hw, port_id, false);
> -
> -		/* Point hcw to a 64B-aligned location */
> -		hcw = (struct dlb_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
> -
> -		/* Program the first HCW for a batch token return and
> -		 * the rest as NOOPS
> -		 */
> -		memset(hcw, 0, 4 * sizeof(*hcw));
> -		hcw->cq_token = 1;
> -		hcw->lock_id = cnt - 1;
> -
> -		dlb_movdir64b(pp_addr, hcw);
> -
> -		os_fence_hcw(hw, pp_addr);
> -
> -		os_unmap_producer_port(hw, pp_addr);
> -	}
> -}
> -
> -static int dlb_domain_drain_dir_cqs(struct dlb_hw *hw,
> -				    struct dlb_domain *domain,
> -				    bool toggle_port)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_dir_pq_pair *port;
> -
> -	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
> -		/* Can't drain a port if it's not configured, and there's
> -		 * nothing to drain if its queue is unconfigured.
> -		 */
> -		if (!port->port_configured || !port->queue_configured)
> -			continue;
> -
> -		if (toggle_port)
> -			dlb_dir_port_cq_disable(hw, port);
> -
> -		dlb_drain_dir_cq(hw, port);
> -
> -		if (toggle_port)
> -			dlb_dir_port_cq_enable(hw, port);
> -	}
> -
> -	return 0;
> -}
> -
> -static int dlb_domain_drain_dir_queues(struct dlb_hw *hw,
> -				       struct dlb_domain *domain)
> -{
> -	int i;
> -
> -	/* If the domain hasn't been started, there's no traffic to drain */
> -	if (!domain->started)
> -		return 0;
> -
> -	for (i = 0; i < DLB_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
> -		dlb_domain_drain_dir_cqs(hw, domain, true);
> -
> -		if (dlb_domain_dir_queues_empty(hw, domain))
> -			break;
> -	}
> -
> -	if (i == DLB_MAX_QID_EMPTY_CHECK_LOOPS) {
> -		DLB_HW_ERR(hw,
> -			   "[%s()] Internal error: failed to empty queues\n",
> -			   __func__);
> -		return -EFAULT;
> -	}
> -
> -	/* Drain the CQs one more time. For the queues to go empty, they would
> -	 * have scheduled one or more QEs.
> -	 */
> -	dlb_domain_drain_dir_cqs(hw, domain, true);
> -
> -	return 0;
> -}
> -
> -static void dlb_domain_disable_dir_producer_ports(struct dlb_hw *hw,
> -						  struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_dir_pq_pair *port;
> -	union dlb_sys_dir_pp_v r1;
> -
> -	r1.field.pp_v = 0;
> -
> -	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
> -		DLB_CSR_WR(hw,
> -			   DLB_SYS_DIR_PP_V(port->id),
> -			   r1.val);
> -}
> -
> -static void dlb_domain_disable_ldb_producer_ports(struct dlb_hw *hw,
> -						  struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	union dlb_sys_ldb_pp_v r1;
> -	struct dlb_ldb_port *port;
> -
> -	r1.field.pp_v = 0;
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
> -		DLB_CSR_WR(hw,
> -			   DLB_SYS_LDB_PP_V(port->id),
> -			   r1.val);
> -
> -		hw->pf.num_enabled_ldb_ports--;
> -	}
> -}
> -
> -static void dlb_domain_disable_dir_pools(struct dlb_hw *hw,
> -					 struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	union dlb_sys_dir_pool_enbld r0 = { {0} };
> -	struct dlb_credit_pool *pool;
> -
> -	DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter)
> -		DLB_CSR_WR(hw,
> -			   DLB_SYS_DIR_POOL_ENBLD(pool->id),
> -			   r0.val);
> -}
> -
> -static void dlb_domain_disable_ldb_pools(struct dlb_hw *hw,
> -					 struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	union dlb_sys_ldb_pool_enbld r0 = { {0} };
> -	struct dlb_credit_pool *pool;
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter)
> -		DLB_CSR_WR(hw,
> -			   DLB_SYS_LDB_POOL_ENBLD(pool->id),
> -			   r0.val);
> -}
> -
> -static int dlb_reset_hw_resource(struct dlb_hw *hw, int type, int id)
> -{
> -	union dlb_cfg_mstr_diag_reset_sts r0 = { {0} };
> -	union dlb_cfg_mstr_bcast_reset_vf_start r1 = { {0} };
> -	int i;
> -
> -	r1.field.vf_reset_start = 1;
> -
> -	r1.field.vf_reset_type = type;
> -	r1.field.vf_reset_id = id;
> -
> -	DLB_CSR_WR(hw, DLB_CFG_MSTR_BCAST_RESET_VF_START, r1.val);
> -
> -	/* Wait for hardware to complete. This is a finite time operation,
> -	 * but wait set a loop bound just in case.
> -	 */
> -	for (i = 0; i < 1024 * 1024; i++) {
> -		r0.val = DLB_CSR_RD(hw, DLB_CFG_MSTR_DIAG_RESET_STS);
> -
> -		if (r0.field.chp_vf_reset_done &&
> -		    r0.field.rop_vf_reset_done &&
> -		    r0.field.lsp_vf_reset_done &&
> -		    r0.field.nalb_vf_reset_done &&
> -		    r0.field.ap_vf_reset_done &&
> -		    r0.field.dp_vf_reset_done &&
> -		    r0.field.qed_vf_reset_done &&
> -		    r0.field.dqed_vf_reset_done &&
> -		    r0.field.aqed_vf_reset_done)
> -			return 0;
> -
> -		os_udelay(1);
> -	}
> -
> -	return -ETIMEDOUT;
> -}
> -
> -static int dlb_domain_reset_hw_resources(struct dlb_hw *hw,
> -					 struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_dir_pq_pair *dir_port;
> -	struct dlb_ldb_queue *ldb_queue;
> -	struct dlb_ldb_port *ldb_port;
> -	struct dlb_credit_pool *pool;
> -	int ret;
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter) {
> -		ret = dlb_reset_hw_resource(hw,
> -					    VF_RST_TYPE_POOL_LDB,
> -					    pool->id);
> -		if (ret)
> -			return ret;
> -	}
> -
> -	DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) {
> -		ret = dlb_reset_hw_resource(hw,
> -					    VF_RST_TYPE_POOL_DIR,
> -					    pool->id);
> -		if (ret)
> -			return ret;
> -	}
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {
> -		ret = dlb_reset_hw_resource(hw,
> -					    VF_RST_TYPE_QID_LDB,
> -					    ldb_queue->id);
> -		if (ret)
> -			return ret;
> -	}
> -
> -	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
> -		ret = dlb_reset_hw_resource(hw,
> -					    VF_RST_TYPE_QID_DIR,
> -					    dir_port->id);
> -		if (ret)
> -			return ret;
> -	}
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_ports, ldb_port, iter) {
> -		ret = dlb_reset_hw_resource(hw,
> -					    VF_RST_TYPE_CQ_LDB,
> -					    ldb_port->id);
> -		if (ret)
> -			return ret;
> -	}
> -
> -	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
> -		ret = dlb_reset_hw_resource(hw,
> -					    VF_RST_TYPE_CQ_DIR,
> -					    dir_port->id);
> -		if (ret)
> -			return ret;
> -	}
> -
> -	return 0;
> -}
> -
> -static int dlb_domain_verify_reset_success(struct dlb_hw *hw,
> -					   struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_dir_pq_pair *dir_port;
> -	struct dlb_ldb_port *ldb_port;
> -	struct dlb_credit_pool *pool;
> -	struct dlb_ldb_queue *queue;
> -
> -	/* Confirm that all credits are returned to the domain's credit pools */
> -	DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) {
> -		union dlb_chp_dqed_fl_pop_ptr r0;
> -		union dlb_chp_dqed_fl_push_ptr r1;
> -
> -		r0.val = DLB_CSR_RD(hw,
> -				    DLB_CHP_DQED_FL_POP_PTR(pool->id));
> -
> -		r1.val = DLB_CSR_RD(hw,
> -				    DLB_CHP_DQED_FL_PUSH_PTR(pool->id));
> -
> -		if (r0.field.pop_ptr != r1.field.push_ptr ||
> -		    r0.field.generation == r1.field.generation) {
> -			DLB_HW_ERR(hw,
> -				   "[%s()] Internal error: failed to refill directed pool %d's
> credits.\n",
> -				   __func__, pool->id);
> -			return -EFAULT;
> -		}
> -	}
> -
> -	/* Confirm that all the domain's queue's inflight counts and AQED
> -	 * active counts are 0.
> -	 */
> -	DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
> -		if (!dlb_ldb_queue_is_empty(hw, queue)) {
> -			DLB_HW_ERR(hw,
> -				   "[%s()] Internal error: failed to empty ldb queue %d\n",
> -				   __func__, queue->id);
> -			return -EFAULT;
> -		}
> -	}
> -
> -	/* Confirm that all the domain's CQs inflight and token counts are 0. */
> -	DLB_DOM_LIST_FOR(domain->used_ldb_ports, ldb_port, iter) {
> -		if (dlb_ldb_cq_inflight_count(hw, ldb_port) ||
> -		    dlb_ldb_cq_token_count(hw, ldb_port)) {
> -			DLB_HW_ERR(hw,
> -				   "[%s()] Internal error: failed to empty ldb port %d\n",
> -				   __func__, ldb_port->id);
> -			return -EFAULT;
> -		}
> -	}
> -
> -	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
> -		if (!dlb_dir_queue_is_empty(hw, dir_port)) {
> -			DLB_HW_ERR(hw,
> -				   "[%s()] Internal error: failed to empty dir queue %d\n",
> -				   __func__, dir_port->id);
> -			return -EFAULT;
> -		}
> -
> -		if (dlb_dir_cq_token_count(hw, dir_port)) {
> -			DLB_HW_ERR(hw,
> -				   "[%s()] Internal error: failed to empty dir port %d\n",
> -				   __func__, dir_port->id);
> -			return -EFAULT;
> -		}
> -	}
> -
> -	return 0;
> -}
> -
> -static void __dlb_domain_reset_ldb_port_registers(struct dlb_hw *hw,
> -						  struct dlb_ldb_port *port)
> -{
> -	union dlb_chp_ldb_pp_state_reset r0 = { {0} };
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_LDB_PP_CRD_REQ_STATE(port->id),
> -		   DLB_CHP_LDB_PP_CRD_REQ_STATE_RST);
> -
> -	/* Reset the port's load-balanced and directed credit state */
> -	r0.field.dir_type = 0;
> -	r0.field.reset_pp_state = 1;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_LDB_PP_STATE_RESET(port->id),
> -		   r0.val);
> -
> -	r0.field.dir_type = 1;
> -	r0.field.reset_pp_state = 1;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_LDB_PP_STATE_RESET(port->id),
> -		   r0.val);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_LDB_PP_DIR_PUSH_PTR(port->id),
> -		   DLB_CHP_LDB_PP_DIR_PUSH_PTR_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_LDB_PP_LDB_PUSH_PTR(port->id),
> -		   DLB_CHP_LDB_PP_LDB_PUSH_PTR_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_LDB_PP_LDB_MIN_CRD_QNT(port->id),
> -		   DLB_CHP_LDB_PP_LDB_MIN_CRD_QNT_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_LDB_PP_LDB_CRD_LWM(port->id),
> -		   DLB_CHP_LDB_PP_LDB_CRD_LWM_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_LDB_PP_LDB_CRD_HWM(port->id),
> -		   DLB_CHP_LDB_PP_LDB_CRD_HWM_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_LDB_LDB_PP2POOL(port->id),
> -		   DLB_CHP_LDB_LDB_PP2POOL_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_LDB_PP_DIR_MIN_CRD_QNT(port->id),
> -		   DLB_CHP_LDB_PP_DIR_MIN_CRD_QNT_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_LDB_PP_DIR_CRD_LWM(port->id),
> -		   DLB_CHP_LDB_PP_DIR_CRD_LWM_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_LDB_PP_DIR_CRD_HWM(port->id),
> -		   DLB_CHP_LDB_PP_DIR_CRD_HWM_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_LDB_DIR_PP2POOL(port->id),
> -		   DLB_CHP_LDB_DIR_PP2POOL_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_LDB_PP2LDBPOOL(port->id),
> -		   DLB_SYS_LDB_PP2LDBPOOL_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_LDB_PP2DIRPOOL(port->id),
> -		   DLB_SYS_LDB_PP2DIRPOOL_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_HIST_LIST_LIM(port->id),
> -		   DLB_CHP_HIST_LIST_LIM_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_HIST_LIST_BASE(port->id),
> -		   DLB_CHP_HIST_LIST_BASE_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_HIST_LIST_POP_PTR(port->id),
> -		   DLB_CHP_HIST_LIST_POP_PTR_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_HIST_LIST_PUSH_PTR(port->id),
> -		   DLB_CHP_HIST_LIST_PUSH_PTR_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_LDB_CQ_WPTR(port->id),
> -		   DLB_CHP_LDB_CQ_WPTR_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_LDB_CQ_INT_DEPTH_THRSH(port->id),
> -		   DLB_CHP_LDB_CQ_INT_DEPTH_THRSH_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_LDB_CQ_TMR_THRESHOLD(port->id),
> -		   DLB_CHP_LDB_CQ_TMR_THRESHOLD_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_LDB_CQ_INT_ENB(port->id),
> -		   DLB_CHP_LDB_CQ_INT_ENB_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_LSP_CQ_LDB_INFL_LIM(port->id),
> -		   DLB_LSP_CQ_LDB_INFL_LIM_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_LSP_CQ2PRIOV(port->id),
> -		   DLB_LSP_CQ2PRIOV_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_LSP_CQ_LDB_TOT_SCH_CNT_CTRL(port->id),
> -		   DLB_LSP_CQ_LDB_TOT_SCH_CNT_CTRL_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id),
> -		   DLB_LSP_CQ_LDB_TKN_DEPTH_SEL_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id),
> -		   DLB_CHP_LDB_CQ_TKN_DEPTH_SEL_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_LSP_CQ_LDB_DSBL(port->id),
> -		   DLB_LSP_CQ_LDB_DSBL_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_LDB_CQ2VF_PF(port->id),
> -		   DLB_SYS_LDB_CQ2VF_PF_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_LDB_PP2VF_PF(port->id),
> -		   DLB_SYS_LDB_PP2VF_PF_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_LDB_CQ_ADDR_L(port->id),
> -		   DLB_SYS_LDB_CQ_ADDR_L_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_LDB_CQ_ADDR_U(port->id),
> -		   DLB_SYS_LDB_CQ_ADDR_U_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_LDB_PP_ADDR_L(port->id),
> -		   DLB_SYS_LDB_PP_ADDR_L_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_LDB_PP_ADDR_U(port->id),
> -		   DLB_SYS_LDB_PP_ADDR_U_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_LDB_PP_V(port->id),
> -		   DLB_SYS_LDB_PP_V_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_LDB_PP2VAS(port->id),
> -		   DLB_SYS_LDB_PP2VAS_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_LDB_CQ_ISR(port->id),
> -		   DLB_SYS_LDB_CQ_ISR_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_WBUF_LDB_FLAGS(port->id),
> -		   DLB_SYS_WBUF_LDB_FLAGS_RST);
> -}
> -
> -static void __dlb_domain_reset_dir_port_registers(struct dlb_hw *hw,
> -						  struct dlb_dir_pq_pair *port)
> -{
> -	union dlb_chp_dir_pp_state_reset r0 = { {0} };
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_PP_CRD_REQ_STATE(port->id),
> -		   DLB_CHP_DIR_PP_CRD_REQ_STATE_RST);
> -
> -	/* Reset the port's load-balanced and directed credit state */
> -	r0.field.dir_type = 0;
> -	r0.field.reset_pp_state = 1;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_PP_STATE_RESET(port->id),
> -		   r0.val);
> -
> -	r0.field.dir_type = 1;
> -	r0.field.reset_pp_state = 1;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_PP_STATE_RESET(port->id),
> -		   r0.val);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_PP_DIR_PUSH_PTR(port->id),
> -		   DLB_CHP_DIR_PP_DIR_PUSH_PTR_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_PP_LDB_PUSH_PTR(port->id),
> -		   DLB_CHP_DIR_PP_LDB_PUSH_PTR_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_PP_LDB_MIN_CRD_QNT(port->id),
> -		   DLB_CHP_DIR_PP_LDB_MIN_CRD_QNT_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_PP_LDB_CRD_LWM(port->id),
> -		   DLB_CHP_DIR_PP_LDB_CRD_LWM_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_PP_LDB_CRD_HWM(port->id),
> -		   DLB_CHP_DIR_PP_LDB_CRD_HWM_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_LDB_PP2POOL(port->id),
> -		   DLB_CHP_DIR_LDB_PP2POOL_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_PP_DIR_MIN_CRD_QNT(port->id),
> -		   DLB_CHP_DIR_PP_DIR_MIN_CRD_QNT_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_PP_DIR_CRD_LWM(port->id),
> -		   DLB_CHP_DIR_PP_DIR_CRD_LWM_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_PP_DIR_CRD_HWM(port->id),
> -		   DLB_CHP_DIR_PP_DIR_CRD_HWM_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_DIR_PP2POOL(port->id),
> -		   DLB_CHP_DIR_DIR_PP2POOL_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_DIR_PP2LDBPOOL(port->id),
> -		   DLB_SYS_DIR_PP2LDBPOOL_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_DIR_PP2DIRPOOL(port->id),
> -		   DLB_SYS_DIR_PP2DIRPOOL_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_CQ_WPTR(port->id),
> -		   DLB_CHP_DIR_CQ_WPTR_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id),
> -		   DLB_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id),
> -		   DLB_CHP_DIR_CQ_TKN_DEPTH_SEL_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_LSP_CQ_DIR_DSBL(port->id),
> -		   DLB_LSP_CQ_DIR_DSBL_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_CQ_WPTR(port->id),
> -		   DLB_CHP_DIR_CQ_WPTR_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_CQ_INT_DEPTH_THRSH(port->id),
> -		   DLB_CHP_DIR_CQ_INT_DEPTH_THRSH_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_CQ_TMR_THRESHOLD(port->id),
> -		   DLB_CHP_DIR_CQ_TMR_THRESHOLD_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_CQ_INT_ENB(port->id),
> -		   DLB_CHP_DIR_CQ_INT_ENB_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_DIR_CQ2VF_PF(port->id),
> -		   DLB_SYS_DIR_CQ2VF_PF_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_DIR_PP2VF_PF(port->id),
> -		   DLB_SYS_DIR_PP2VF_PF_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_DIR_CQ_ADDR_L(port->id),
> -		   DLB_SYS_DIR_CQ_ADDR_L_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_DIR_CQ_ADDR_U(port->id),
> -		   DLB_SYS_DIR_CQ_ADDR_U_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_DIR_PP_ADDR_L(port->id),
> -		   DLB_SYS_DIR_PP_ADDR_L_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_DIR_PP_ADDR_U(port->id),
> -		   DLB_SYS_DIR_PP_ADDR_U_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_DIR_PP_V(port->id),
> -		   DLB_SYS_DIR_PP_V_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_DIR_PP2VAS(port->id),
> -		   DLB_SYS_DIR_PP2VAS_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_DIR_CQ_ISR(port->id),
> -		   DLB_SYS_DIR_CQ_ISR_RST);
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_WBUF_DIR_FLAGS(port->id),
> -		   DLB_SYS_WBUF_DIR_FLAGS_RST);
> -}
> -
> -static void dlb_domain_reset_dir_port_registers(struct dlb_hw *hw,
> -						struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_dir_pq_pair *port;
> -
> -	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
> -		__dlb_domain_reset_dir_port_registers(hw, port);
> -}
> -
> -static void dlb_domain_reset_ldb_queue_registers(struct dlb_hw *hw,
> -						 struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_ldb_queue *queue;
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
> -		DLB_CSR_WR(hw,
> -			   DLB_AQED_PIPE_FL_LIM(queue->id),
> -			   DLB_AQED_PIPE_FL_LIM_RST);
> -
> -		DLB_CSR_WR(hw,
> -			   DLB_AQED_PIPE_FL_BASE(queue->id),
> -			   DLB_AQED_PIPE_FL_BASE_RST);
> -
> -		DLB_CSR_WR(hw,
> -			   DLB_AQED_PIPE_FL_POP_PTR(queue->id),
> -			   DLB_AQED_PIPE_FL_POP_PTR_RST);
> -
> -		DLB_CSR_WR(hw,
> -			   DLB_AQED_PIPE_FL_PUSH_PTR(queue->id),
> -			   DLB_AQED_PIPE_FL_PUSH_PTR_RST);
> -
> -		DLB_CSR_WR(hw,
> -			   DLB_AQED_PIPE_QID_FID_LIM(queue->id),
> -			   DLB_AQED_PIPE_QID_FID_LIM_RST);
> -
> -		DLB_CSR_WR(hw,
> -			   DLB_LSP_QID_AQED_ACTIVE_LIM(queue->id),
> -			   DLB_LSP_QID_AQED_ACTIVE_LIM_RST);
> -
> -		DLB_CSR_WR(hw,
> -			   DLB_LSP_QID_LDB_INFL_LIM(queue->id),
> -			   DLB_LSP_QID_LDB_INFL_LIM_RST);
> -
> -		DLB_CSR_WR(hw,
> -			   DLB_SYS_LDB_QID_V(queue->id),
> -			   DLB_SYS_LDB_QID_V_RST);
> -
> -		DLB_CSR_WR(hw,
> -			   DLB_SYS_LDB_QID_V(queue->id),
> -			   DLB_SYS_LDB_QID_V_RST);
> -
> -		DLB_CSR_WR(hw,
> -			   DLB_CHP_ORD_QID_SN(queue->id),
> -			   DLB_CHP_ORD_QID_SN_RST);
> -
> -		DLB_CSR_WR(hw,
> -			   DLB_CHP_ORD_QID_SN_MAP(queue->id),
> -			   DLB_CHP_ORD_QID_SN_MAP_RST);
> -
> -		DLB_CSR_WR(hw,
> -			   DLB_RO_PIPE_QID2GRPSLT(queue->id),
> -			   DLB_RO_PIPE_QID2GRPSLT_RST);
> -	}
> -}
> -
> -static void dlb_domain_reset_dir_queue_registers(struct dlb_hw *hw,
> -						 struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_dir_pq_pair *queue;
> -
> -	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
> -		DLB_CSR_WR(hw,
> -			   DLB_SYS_DIR_QID_V(queue->id),
> -			   DLB_SYS_DIR_QID_V_RST);
> -	}
> -}
> -
> -static void dlb_domain_reset_ldb_pool_registers(struct dlb_hw *hw,
> -						struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_credit_pool *pool;
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter) {
> -		DLB_CSR_WR(hw,
> -			   DLB_CHP_LDB_POOL_CRD_LIM(pool->id),
> -			   DLB_CHP_LDB_POOL_CRD_LIM_RST);
> -
> -		DLB_CSR_WR(hw,
> -			   DLB_CHP_LDB_POOL_CRD_CNT(pool->id),
> -			   DLB_CHP_LDB_POOL_CRD_CNT_RST);
> -
> -		DLB_CSR_WR(hw,
> -			   DLB_CHP_QED_FL_BASE(pool->id),
> -			   DLB_CHP_QED_FL_BASE_RST);
> -
> -		DLB_CSR_WR(hw,
> -			   DLB_CHP_QED_FL_LIM(pool->id),
> -			   DLB_CHP_QED_FL_LIM_RST);
> -
> -		DLB_CSR_WR(hw,
> -			   DLB_CHP_QED_FL_PUSH_PTR(pool->id),
> -			   DLB_CHP_QED_FL_PUSH_PTR_RST);
> -
> -		DLB_CSR_WR(hw,
> -			   DLB_CHP_QED_FL_POP_PTR(pool->id),
> -			   DLB_CHP_QED_FL_POP_PTR_RST);
> -	}
> -}
> -
> -static void dlb_domain_reset_dir_pool_registers(struct dlb_hw *hw,
> -						struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_credit_pool *pool;
> -
> -	DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) {
> -		DLB_CSR_WR(hw,
> -			   DLB_CHP_DIR_POOL_CRD_LIM(pool->id),
> -			   DLB_CHP_DIR_POOL_CRD_LIM_RST);
> -
> -		DLB_CSR_WR(hw,
> -			   DLB_CHP_DIR_POOL_CRD_CNT(pool->id),
> -			   DLB_CHP_DIR_POOL_CRD_CNT_RST);
> -
> -		DLB_CSR_WR(hw,
> -			   DLB_CHP_DQED_FL_BASE(pool->id),
> -			   DLB_CHP_DQED_FL_BASE_RST);
> -
> -		DLB_CSR_WR(hw,
> -			   DLB_CHP_DQED_FL_LIM(pool->id),
> -			   DLB_CHP_DQED_FL_LIM_RST);
> -
> -		DLB_CSR_WR(hw,
> -			   DLB_CHP_DQED_FL_PUSH_PTR(pool->id),
> -			   DLB_CHP_DQED_FL_PUSH_PTR_RST);
> -
> -		DLB_CSR_WR(hw,
> -			   DLB_CHP_DQED_FL_POP_PTR(pool->id),
> -			   DLB_CHP_DQED_FL_POP_PTR_RST);
> -	}
> -}
> -
> -static void dlb_domain_reset_ldb_port_registers(struct dlb_hw *hw,
> -						struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	RTE_SET_USED(iter);
> -	struct dlb_ldb_port *port;
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
> -		__dlb_domain_reset_ldb_port_registers(hw, port);
> -}
> -
> -static void dlb_domain_reset_registers(struct dlb_hw *hw,
> -				       struct dlb_domain *domain)
> -{
> -	dlb_domain_reset_ldb_port_registers(hw, domain);
> -
> -	dlb_domain_reset_dir_port_registers(hw, domain);
> -
> -	dlb_domain_reset_ldb_queue_registers(hw, domain);
> -
> -	dlb_domain_reset_dir_queue_registers(hw, domain);
> -
> -	dlb_domain_reset_ldb_pool_registers(hw, domain);
> -
> -	dlb_domain_reset_dir_pool_registers(hw, domain);
> -}
> -
> -static int dlb_domain_reset_software_state(struct dlb_hw *hw,
> -					   struct dlb_domain *domain)
> -{
> -	struct dlb_ldb_queue *tmp_ldb_queue;
> -	RTE_SET_USED(tmp_ldb_queue);
> -	struct dlb_dir_pq_pair *tmp_dir_port;
> -	RTE_SET_USED(tmp_dir_port);
> -	struct dlb_ldb_port *tmp_ldb_port;
> -	RTE_SET_USED(tmp_ldb_port);
> -	struct dlb_credit_pool *tmp_pool;
> -	RTE_SET_USED(tmp_pool);
> -	struct dlb_list_entry *iter1;
> -	RTE_SET_USED(iter1);
> -	struct dlb_list_entry *iter2;
> -	RTE_SET_USED(iter2);
> -	struct dlb_ldb_queue *ldb_queue;
> -	struct dlb_dir_pq_pair *dir_port;
> -	struct dlb_ldb_port *ldb_port;
> -	struct dlb_credit_pool *pool;
> -
> -	struct dlb_function_resources *rsrcs;
> -	struct dlb_list_head *list;
> -	int ret;
> -
> -	rsrcs = domain->parent_func;
> -
> -	/* Move the domain's ldb queues to the function's avail list */
> -	list = &domain->used_ldb_queues;
> -	DLB_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
> -		if (ldb_queue->sn_cfg_valid) {
> -			struct dlb_sn_group *grp;
> -
> -			grp = &hw->rsrcs.sn_groups[ldb_queue->sn_group];
> -
> -			dlb_sn_group_free_slot(grp, ldb_queue->sn_slot);
> -			ldb_queue->sn_cfg_valid = false;
> -		}
> -
> -		ldb_queue->owned = false;
> -		ldb_queue->num_mappings = 0;
> -		ldb_queue->num_pending_additions = 0;
> -
> -		dlb_list_del(&domain->used_ldb_queues, &ldb_queue->domain_list);
> -		dlb_list_add(&rsrcs->avail_ldb_queues, &ldb_queue->func_list);
> -		rsrcs->num_avail_ldb_queues++;
> -	}
> -
> -	list = &domain->avail_ldb_queues;
> -	DLB_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
> -		ldb_queue->owned = false;
> -
> -		dlb_list_del(&domain->avail_ldb_queues,
> -			     &ldb_queue->domain_list);
> -		dlb_list_add(&rsrcs->avail_ldb_queues,
> -			     &ldb_queue->func_list);
> -		rsrcs->num_avail_ldb_queues++;
> -	}
> -
> -	/* Move the domain's ldb ports to the function's avail list */
> -	list = &domain->used_ldb_ports;
> -	DLB_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port, iter1, iter2) {
> -		int i;
> -
> -		ldb_port->owned = false;
> -		ldb_port->configured = false;
> -		ldb_port->num_pending_removals = 0;
> -		ldb_port->num_mappings = 0;
> -		for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++)
> -			ldb_port->qid_map[i].state = DLB_QUEUE_UNMAPPED;
> -
> -		dlb_list_del(&domain->used_ldb_ports, &ldb_port->domain_list);
> -		dlb_list_add(&rsrcs->avail_ldb_ports, &ldb_port->func_list);
> -		rsrcs->num_avail_ldb_ports++;
> -	}
> -
> -	list = &domain->avail_ldb_ports;
> -	DLB_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port, iter1, iter2) {
> -		ldb_port->owned = false;
> -
> -		dlb_list_del(&domain->avail_ldb_ports, &ldb_port->domain_list);
> -		dlb_list_add(&rsrcs->avail_ldb_ports, &ldb_port->func_list);
> -		rsrcs->num_avail_ldb_ports++;
> -	}
> -
> -	/* Move the domain's dir ports to the function's avail list */
> -	list = &domain->used_dir_pq_pairs;
> -	DLB_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
> -		dir_port->owned = false;
> -		dir_port->port_configured = false;
> -
> -		dlb_list_del(&domain->used_dir_pq_pairs,
> -			     &dir_port->domain_list);
> -
> -		dlb_list_add(&rsrcs->avail_dir_pq_pairs,
> -			     &dir_port->func_list);
> -		rsrcs->num_avail_dir_pq_pairs++;
> -	}
> -
> -	list = &domain->avail_dir_pq_pairs;
> -	DLB_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
> -		dir_port->owned = false;
> -
> -		dlb_list_del(&domain->avail_dir_pq_pairs,
> -			     &dir_port->domain_list);
> -
> -		dlb_list_add(&rsrcs->avail_dir_pq_pairs,
> -			     &dir_port->func_list);
> -		rsrcs->num_avail_dir_pq_pairs++;
> -	}
> -
> -	/* Return hist list entries to the function */
> -	ret = dlb_bitmap_set_range(rsrcs->avail_hist_list_entries,
> -				   domain->hist_list_entry_base,
> -				   domain->total_hist_list_entries);
> -	if (ret) {
> -		DLB_HW_ERR(hw,
> -			   "[%s()] Internal error: domain hist list base does not match the
> function's bitmap.\n",
> -			   __func__);
> -		return -EFAULT;
> -	}
> -
> -	domain->total_hist_list_entries = 0;
> -	domain->avail_hist_list_entries = 0;
> -	domain->hist_list_entry_base = 0;
> -	domain->hist_list_entry_offset = 0;
> -
> -	/* Return QED entries to the function */
> -	ret = dlb_bitmap_set_range(rsrcs->avail_qed_freelist_entries,
> -				   domain->qed_freelist.base,
> -				   (domain->qed_freelist.bound -
> -					domain->qed_freelist.base));
> -	if (ret) {
> -		DLB_HW_ERR(hw,
> -			   "[%s()] Internal error: domain QED base does not match the function's
> bitmap.\n",
> -			   __func__);
> -		return -EFAULT;
> -	}
> -
> -	domain->qed_freelist.base = 0;
> -	domain->qed_freelist.bound = 0;
> -	domain->qed_freelist.offset = 0;
> -
> -	/* Return DQED entries back to the function */
> -	ret = dlb_bitmap_set_range(rsrcs->avail_dqed_freelist_entries,
> -				   domain->dqed_freelist.base,
> -				   (domain->dqed_freelist.bound -
> -					domain->dqed_freelist.base));
> -	if (ret) {
> -		DLB_HW_ERR(hw,
> -			   "[%s()] Internal error: domain DQED base does not match the
> function's bitmap.\n",
> -			   __func__);
> -		return -EFAULT;
> -	}
> -
> -	domain->dqed_freelist.base = 0;
> -	domain->dqed_freelist.bound = 0;
> -	domain->dqed_freelist.offset = 0;
> -
> -	/* Return AQED entries back to the function */
> -	ret = dlb_bitmap_set_range(rsrcs->avail_aqed_freelist_entries,
> -				   domain->aqed_freelist.base,
> -				   (domain->aqed_freelist.bound -
> -					domain->aqed_freelist.base));
> -	if (ret) {
> -		DLB_HW_ERR(hw,
> -			   "[%s()] Internal error: domain AQED base does not match the
> function's bitmap.\n",
> -			   __func__);
> -		return -EFAULT;
> -	}
> -
> -	domain->aqed_freelist.base = 0;
> -	domain->aqed_freelist.bound = 0;
> -	domain->aqed_freelist.offset = 0;
> -
> -	/* Return ldb credit pools back to the function's avail list */
> -	list = &domain->used_ldb_credit_pools;
> -	DLB_DOM_LIST_FOR_SAFE(*list, pool, tmp_pool, iter1, iter2) {
> -		pool->owned = false;
> -		pool->configured = false;
> -
> -		dlb_list_del(&domain->used_ldb_credit_pools,
> -			     &pool->domain_list);
> -		dlb_list_add(&rsrcs->avail_ldb_credit_pools,
> -			     &pool->func_list);
> -		rsrcs->num_avail_ldb_credit_pools++;
> -	}
> -
> -	list = &domain->avail_ldb_credit_pools;
> -	DLB_DOM_LIST_FOR_SAFE(*list, pool, tmp_pool, iter1, iter2) {
> -		pool->owned = false;
> -
> -		dlb_list_del(&domain->avail_ldb_credit_pools,
> -			     &pool->domain_list);
> -		dlb_list_add(&rsrcs->avail_ldb_credit_pools,
> -			     &pool->func_list);
> -		rsrcs->num_avail_ldb_credit_pools++;
> -	}
> -
> -	/* Move dir credit pools back to the function */
> -	list = &domain->used_dir_credit_pools;
> -	DLB_DOM_LIST_FOR_SAFE(*list, pool, tmp_pool, iter1, iter2) {
> -		pool->owned = false;
> -		pool->configured = false;
> -
> -		dlb_list_del(&domain->used_dir_credit_pools,
> -			     &pool->domain_list);
> -		dlb_list_add(&rsrcs->avail_dir_credit_pools,
> -			     &pool->func_list);
> -		rsrcs->num_avail_dir_credit_pools++;
> -	}
> -
> -	list = &domain->avail_dir_credit_pools;
> -	DLB_DOM_LIST_FOR_SAFE(*list, pool, tmp_pool, iter1, iter2) {
> -		pool->owned = false;
> -
> -		dlb_list_del(&domain->avail_dir_credit_pools,
> -			     &pool->domain_list);
> -		dlb_list_add(&rsrcs->avail_dir_credit_pools,
> -			     &pool->func_list);
> -		rsrcs->num_avail_dir_credit_pools++;
> -	}
> -
> -	domain->num_pending_removals = 0;
> -	domain->num_pending_additions = 0;
> -	domain->configured = false;
> -	domain->started = false;
> -
> -	/* Move the domain out of the used_domains list and back to the
> -	 * function's avail_domains list.
> -	 */
> -	dlb_list_del(&rsrcs->used_domains, &domain->func_list);
> -	dlb_list_add(&rsrcs->avail_domains, &domain->func_list);
> -	rsrcs->num_avail_domains++;
> -
> -	return 0;
> -}
> -
> -static void dlb_log_reset_domain(struct dlb_hw *hw, u32 domain_id)
> -{
> -	DLB_HW_INFO(hw, "DLB reset domain:\n");
> -	DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
> -}
> -
> -/**
> - * dlb_reset_domain() - Reset a DLB scheduling domain and its associated
> - *	hardware resources.
> - * @hw:	  Contains the current state of the DLB hardware.
> - * @args: User-provided arguments.
> - * @resp: Response to user.
> - *
> - * Note: User software *must* stop sending to this domain's producer ports
> - * before invoking this function, otherwise undefined behavior will
> result.
> - *
> - * Return: returns < 0 on error, 0 otherwise.
> - */
> -int dlb_reset_domain(struct dlb_hw *hw, u32 domain_id)
> -{
> -	struct dlb_domain *domain;
> -	int ret;
> -
> -	dlb_log_reset_domain(hw, domain_id);
> -
> -	domain = dlb_get_domain_from_id(hw, domain_id);
> -
> -	if (domain  == NULL || !domain->configured)
> -		return -EINVAL;
> -
> -	/* For each queue owned by this domain, disable its write permissions to
> -	 * cause any traffic sent to it to be dropped. Well-behaved software
> -	 * should not be sending QEs at this point.
> -	 */
> -	dlb_domain_disable_dir_queue_write_perms(hw, domain);
> -
> -	dlb_domain_disable_ldb_queue_write_perms(hw, domain);
> -
> -	/* Disable credit updates and turn off completion tracking on all the
> -	 * domain's PPs.
> -	 */
> -	dlb_domain_disable_dir_port_crd_updates(hw, domain);
> -
> -	dlb_domain_disable_ldb_port_crd_updates(hw, domain);
> -
> -	dlb_domain_disable_dir_port_interrupts(hw, domain);
> -
> -	dlb_domain_disable_ldb_port_interrupts(hw, domain);
> -
> -	dlb_domain_disable_ldb_seq_checks(hw, domain);
> -
> -	/* Disable the LDB CQs and drain them in order to complete the map and
> -	 * unmap procedures, which require zero CQ inflights and zero QID
> -	 * inflights respectively.
> -	 */
> -	dlb_domain_disable_ldb_cqs(hw, domain);
> -
> -	ret = dlb_domain_drain_ldb_cqs(hw, domain, false);
> -	if (ret < 0)
> -		return ret;
> -
> -	ret = dlb_domain_wait_for_ldb_cqs_to_empty(hw, domain);
> -	if (ret < 0)
> -		return ret;
> -
> -	ret = dlb_domain_finish_unmap_qid_procedures(hw, domain);
> -	if (ret < 0)
> -		return ret;
> -
> -	ret = dlb_domain_finish_map_qid_procedures(hw, domain);
> -	if (ret < 0)
> -		return ret;
> -
> -	/* Re-enable the CQs in order to drain the mapped queues. */
> -	dlb_domain_enable_ldb_cqs(hw, domain);
> -
> -	ret = dlb_domain_drain_mapped_queues(hw, domain);
> -	if (ret < 0)
> -		return ret;
> -
> -	ret = dlb_domain_drain_unmapped_queues(hw, domain);
> -	if (ret < 0)
> -		return ret;
> -
> -	ret = dlb_domain_wait_for_ldb_pool_refill(hw, domain);
> -	if (ret) {
> -		DLB_HW_ERR(hw,
> -			   "[%s()] Internal error: LDB credits failed to refill\n",
> -			   __func__);
> -		return ret;
> -	}
> -
> -	/* Done draining LDB QEs, so disable the CQs. */
> -	dlb_domain_disable_ldb_cqs(hw, domain);
> -
> -	/* Directed queues are reset in dlb_domain_reset_hw_resources(), but
> -	 * that process does not decrement the directed queue size counters used
> -	 * by SMON for its average DQED depth measurement. So, we manually drain
> -	 * the directed queues here.
> -	 */
> -	dlb_domain_drain_dir_queues(hw, domain);
> -
> -	ret = dlb_domain_wait_for_dir_pool_refill(hw, domain);
> -	if (ret) {
> -		DLB_HW_ERR(hw,
> -			   "[%s()] Internal error: DIR credits failed to refill\n",
> -			   __func__);
> -		return ret;
> -	}
> -
> -	/* Done draining DIR QEs, so disable the CQs. */
> -	dlb_domain_disable_dir_cqs(hw, domain);
> -
> -	dlb_domain_disable_dir_producer_ports(hw, domain);
> -
> -	dlb_domain_disable_ldb_producer_ports(hw, domain);
> -
> -	dlb_domain_disable_dir_pools(hw, domain);
> -
> -	dlb_domain_disable_ldb_pools(hw, domain);
> -
> -	/* Reset the QID, credit pool, and CQ hardware.
> -	 *
> -	 * Note: DLB 1.0 A0 h/w does not disarm CQ interrupts during sched
> -	 * domain reset.
> -	 * A spurious interrupt can occur on subsequent use of a reset CQ.
> -	 */
> -	ret = dlb_domain_reset_hw_resources(hw, domain);
> -	if (ret)
> -		return ret;
> -
> -	ret = dlb_domain_verify_reset_success(hw, domain);
> -	if (ret)
> -		return ret;
> -
> -	dlb_domain_reset_registers(hw, domain);
> -
> -	/* Hardware reset complete. Reset the domain's software state */
> -	ret = dlb_domain_reset_software_state(hw, domain);
> -	if (ret)
> -		return ret;
> -
> -	return 0;
> -}
> -
> -void dlb_hw_get_num_resources(struct dlb_hw *hw,
> -			      struct dlb_get_num_resources_args *arg)
> -{
> -	struct dlb_function_resources *rsrcs;
> -	struct dlb_bitmap *map;
> -
> -	rsrcs = &hw->pf;
> -
> -	arg->num_sched_domains = rsrcs->num_avail_domains;
> -
> -	arg->num_ldb_queues = rsrcs->num_avail_ldb_queues;
> -
> -	arg->num_ldb_ports = rsrcs->num_avail_ldb_ports;
> -
> -	arg->num_dir_ports = rsrcs->num_avail_dir_pq_pairs;
> -
> -	map = rsrcs->avail_aqed_freelist_entries;
> -
> -	arg->num_atomic_inflights = dlb_bitmap_count(map);
> -
> -	arg->max_contiguous_atomic_inflights =
> -		dlb_bitmap_longest_set_range(map);
> -
> -	map = rsrcs->avail_hist_list_entries;
> -
> -	arg->num_hist_list_entries = dlb_bitmap_count(map);
> -
> -	arg->max_contiguous_hist_list_entries =
> -		dlb_bitmap_longest_set_range(map);
> -
> -	map = rsrcs->avail_qed_freelist_entries;
> -
> -	arg->num_ldb_credits = dlb_bitmap_count(map);
> -
> -	arg->max_contiguous_ldb_credits = dlb_bitmap_longest_set_range(map);
> -
> -	map = rsrcs->avail_dqed_freelist_entries;
> -
> -	arg->num_dir_credits = dlb_bitmap_count(map);
> -
> -	arg->max_contiguous_dir_credits = dlb_bitmap_longest_set_range(map);
> -
> -	arg->num_ldb_credit_pools = rsrcs->num_avail_ldb_credit_pools;
> -
> -	arg->num_dir_credit_pools = rsrcs->num_avail_dir_credit_pools;
> -}
> -
> -void dlb_hw_disable_vf_to_pf_isr_pend_err(struct dlb_hw *hw)
> -{
> -	union dlb_sys_sys_alarm_int_enable r0;
> -
> -	r0.val = DLB_CSR_RD(hw, DLB_SYS_SYS_ALARM_INT_ENABLE);
> -
> -	r0.field.vf_to_pf_isr_pend_error = 0;
> -
> -	DLB_CSR_WR(hw, DLB_SYS_SYS_ALARM_INT_ENABLE, r0.val);
> -}
> -
> -static void dlb_configure_ldb_queue(struct dlb_hw *hw,
> -				    struct dlb_domain *domain,
> -				    struct dlb_ldb_queue *queue,
> -				    struct dlb_create_ldb_queue_args *args)
> -{
> -	union dlb_sys_ldb_vasqid_v r0 = { {0} };
> -	union dlb_lsp_qid_ldb_infl_lim r1 = { {0} };
> -	union dlb_lsp_qid_aqed_active_lim r2 = { {0} };
> -	union dlb_aqed_pipe_fl_lim r3 = { {0} };
> -	union dlb_aqed_pipe_fl_base r4 = { {0} };
> -	union dlb_chp_ord_qid_sn_map r7 = { {0} };
> -	union dlb_sys_ldb_qid_cfg_v r10 = { {0} };
> -	union dlb_sys_ldb_qid_v r11 = { {0} };
> -	union dlb_aqed_pipe_fl_push_ptr r5 = { {0} };
> -	union dlb_aqed_pipe_fl_pop_ptr r6 = { {0} };
> -	union dlb_aqed_pipe_qid_fid_lim r8 = { {0} };
> -	union dlb_ro_pipe_qid2grpslt r9 = { {0} };
> -	struct dlb_sn_group *sn_group;
> -	unsigned int offs;
> -
> -	/* QID write permissions are turned on when the domain is started */
> -	r0.field.vasqid_v = 0;
> -
> -	offs = domain->id * DLB_MAX_NUM_LDB_QUEUES + queue->id;
> -
> -	DLB_CSR_WR(hw, DLB_SYS_LDB_VASQID_V(offs), r0.val);
> -
> -	/*
> -	 * Unordered QIDs get 4K inflights, ordered get as many as the number
> -	 * of sequence numbers.
> -	 */
> -	r1.field.limit = args->num_qid_inflights;
> -
> -	DLB_CSR_WR(hw, DLB_LSP_QID_LDB_INFL_LIM(queue->id), r1.val);
> -
> -	r2.field.limit = queue->aqed_freelist.bound -
> -			 queue->aqed_freelist.base;
> -
> -	if (r2.field.limit > DLB_MAX_NUM_AQOS_ENTRIES)
> -		r2.field.limit = DLB_MAX_NUM_AQOS_ENTRIES;
> -
> -	/* AQOS */
> -	DLB_CSR_WR(hw, DLB_LSP_QID_AQED_ACTIVE_LIM(queue->id), r2.val);
> -
> -	r3.field.freelist_disable = 0;
> -	r3.field.limit = queue->aqed_freelist.bound - 1;
> -
> -	DLB_CSR_WR(hw, DLB_AQED_PIPE_FL_LIM(queue->id), r3.val);
> -
> -	r4.field.base = queue->aqed_freelist.base;
> -
> -	DLB_CSR_WR(hw, DLB_AQED_PIPE_FL_BASE(queue->id), r4.val);
> -
> -	r5.field.push_ptr = r4.field.base;
> -	r5.field.generation = 1;
> -
> -	DLB_CSR_WR(hw, DLB_AQED_PIPE_FL_PUSH_PTR(queue->id), r5.val);
> -
> -	r6.field.pop_ptr = r4.field.base;
> -	r6.field.generation = 0;
> -
> -	DLB_CSR_WR(hw, DLB_AQED_PIPE_FL_POP_PTR(queue->id), r6.val);
> -
> -	/* Configure SNs */
> -	sn_group = &hw->rsrcs.sn_groups[queue->sn_group];
> -	r7.field.mode = sn_group->mode;
> -	r7.field.slot = queue->sn_slot;
> -	r7.field.grp  = sn_group->id;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_ORD_QID_SN_MAP(queue->id), r7.val);
> -
> -	/*
> -	 * This register limits the number of inflight flows a queue can have
> -	 * at one time.  It has an upper bound of 2048, but can be
> -	 * over-subscribed. 512 is chosen so that a single queue doesn't use
> -	 * the entire atomic storage, but can use a substantial portion if
> -	 * needed.
> -	 */
> -	r8.field.qid_fid_limit = 512;
> -
> -	DLB_CSR_WR(hw, DLB_AQED_PIPE_QID_FID_LIM(queue->id), r8.val);
> -
> -	r9.field.group = sn_group->id;
> -	r9.field.slot = queue->sn_slot;
> -
> -	DLB_CSR_WR(hw, DLB_RO_PIPE_QID2GRPSLT(queue->id), r9.val);
> -
> -	r10.field.sn_cfg_v = (args->num_sequence_numbers != 0);
> -	r10.field.fid_cfg_v = (args->num_atomic_inflights != 0);
> -
> -	DLB_CSR_WR(hw, DLB_SYS_LDB_QID_CFG_V(queue->id), r10.val);
> -
> -	r11.field.qid_v = 1;
> -
> -	DLB_CSR_WR(hw, DLB_SYS_LDB_QID_V(queue->id), r11.val);
> -}
> -
> -int dlb_get_group_sequence_numbers(struct dlb_hw *hw, unsigned int
> group_id)
> -{
> -	if (group_id >= DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
> -		return -EINVAL;
> -
> -	return hw->rsrcs.sn_groups[group_id].sequence_numbers_per_queue;
> -}
> -
> -int dlb_get_group_sequence_number_occupancy(struct dlb_hw *hw,
> -					    unsigned int group_id)
> -{
> -	if (group_id >= DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
> -		return -EINVAL;
> -
> -	return dlb_sn_group_used_slots(&hw->rsrcs.sn_groups[group_id]);
> -}
> -
> -static void dlb_log_set_group_sequence_numbers(struct dlb_hw *hw,
> -					       unsigned int group_id,
> -					       unsigned long val)
> -{
> -	DLB_HW_INFO(hw, "DLB set group sequence numbers:\n");
> -	DLB_HW_INFO(hw, "\tGroup ID: %u\n", group_id);
> -	DLB_HW_INFO(hw, "\tValue:    %lu\n", val);
> -}
> -
> -int dlb_set_group_sequence_numbers(struct dlb_hw *hw,
> -				   unsigned int group_id,
> -				   unsigned long val)
> -{
> -	u32 valid_allocations[6] = {32, 64, 128, 256, 512, 1024};
> -	union dlb_ro_pipe_grp_sn_mode r0 = { {0} };
> -	struct dlb_sn_group *group;
> -	int mode;
> -
> -	if (group_id >= DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
> -		return -EINVAL;
> -
> -	group = &hw->rsrcs.sn_groups[group_id];
> -
> -	/* Once the first load-balanced queue using an SN group is configured,
> -	 * the group cannot be changed.
> -	 */
> -	if (group->slot_use_bitmap != 0)
> -		return -EPERM;
> -
> -	for (mode = 0; mode < DLB_MAX_NUM_SEQUENCE_NUMBER_MODES; mode++)
> -		if (val == valid_allocations[mode])
> -			break;
> -
> -	if (mode == DLB_MAX_NUM_SEQUENCE_NUMBER_MODES)
> -		return -EINVAL;
> -
> -	group->mode = mode;
> -	group->sequence_numbers_per_queue = val;
> -
> -	r0.field.sn_mode_0 = hw->rsrcs.sn_groups[0].mode;
> -	r0.field.sn_mode_1 = hw->rsrcs.sn_groups[1].mode;
> -	r0.field.sn_mode_2 = hw->rsrcs.sn_groups[2].mode;
> -	r0.field.sn_mode_3 = hw->rsrcs.sn_groups[3].mode;
> -
> -	DLB_CSR_WR(hw, DLB_RO_PIPE_GRP_SN_MODE, r0.val);
> -
> -	dlb_log_set_group_sequence_numbers(hw, group_id, val);
> -
> -	return 0;
> -}
> -
> -static int
> -dlb_ldb_queue_attach_to_sn_group(struct dlb_hw *hw,
> -				 struct dlb_ldb_queue *queue,
> -				 struct dlb_create_ldb_queue_args *args)
> -{
> -	int slot = -1;
> -	int i;
> -
> -	queue->sn_cfg_valid = false;
> -
> -	if (args->num_sequence_numbers == 0)
> -		return 0;
> -
> -	for (i = 0; i < DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
> -		struct dlb_sn_group *group = &hw->rsrcs.sn_groups[i];
> -
> -		if (group->sequence_numbers_per_queue ==
> -		    args->num_sequence_numbers &&
> -		    !dlb_sn_group_full(group)) {
> -			slot = dlb_sn_group_alloc_slot(group);
> -			if (slot >= 0)
> -				break;
> -		}
> -	}
> -
> -	if (slot == -1) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: no sequence number slots available\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	queue->sn_cfg_valid = true;
> -	queue->sn_group = i;
> -	queue->sn_slot = slot;
> -	return 0;
> -}
> -
> -static int
> -dlb_ldb_queue_attach_resources(struct dlb_hw *hw,
> -			       struct dlb_domain *domain,
> -			       struct dlb_ldb_queue *queue,
> -			       struct dlb_create_ldb_queue_args *args)
> -{
> -	int ret;
> -
> -	ret = dlb_ldb_queue_attach_to_sn_group(hw, queue, args);
> -	if (ret)
> -		return ret;
> -
> -	/* Attach QID inflights */
> -	queue->num_qid_inflights = args->num_qid_inflights;
> -
> -	/* Attach atomic inflights */
> -	queue->aqed_freelist.base = domain->aqed_freelist.base +
> -				    domain->aqed_freelist.offset;
> -	queue->aqed_freelist.bound = queue->aqed_freelist.base +
> -				     args->num_atomic_inflights;
> -	domain->aqed_freelist.offset += args->num_atomic_inflights;
> -
> -	return 0;
> -}
> -
> -static int
> -dlb_verify_create_ldb_queue_args(struct dlb_hw *hw,
> -				 u32 domain_id,
> -				 struct dlb_create_ldb_queue_args *args,
> -				 struct dlb_cmd_response *resp)
> -{
> -	struct dlb_freelist *aqed_freelist;
> -	struct dlb_domain *domain;
> -	int i;
> -
> -	domain = dlb_get_domain_from_id(hw, domain_id);
> -
> -	if (domain == NULL) {
> -		resp->status = DLB_ST_INVALID_DOMAIN_ID;
> -		return -1;
> -	}
> -
> -	if (!domain->configured) {
> -		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
> -		return -1;
> -	}
> -
> -	if (domain->started) {
> -		resp->status = DLB_ST_DOMAIN_STARTED;
> -		return -1;
> -	}
> -
> -	if (dlb_list_empty(&domain->avail_ldb_queues)) {
> -		resp->status = DLB_ST_LDB_QUEUES_UNAVAILABLE;
> -		return -1;
> -	}
> -
> -	if (args->num_sequence_numbers) {
> -		for (i = 0; i < DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
> -			struct dlb_sn_group *group = &hw->rsrcs.sn_groups[i];
> -
> -			if (group->sequence_numbers_per_queue ==
> -			    args->num_sequence_numbers &&
> -			    !dlb_sn_group_full(group))
> -				break;
> -		}
> -
> -		if (i == DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS) {
> -			resp->status = DLB_ST_SEQUENCE_NUMBERS_UNAVAILABLE;
> -			return -1;
> -		}
> -	}
> -
> -	if (args->num_qid_inflights > 4096) {
> -		resp->status = DLB_ST_INVALID_QID_INFLIGHT_ALLOCATION;
> -		return -1;
> -	}
> -
> -	/* Inflights must be <= number of sequence numbers if ordered */
> -	if (args->num_sequence_numbers != 0 &&
> -	    args->num_qid_inflights > args->num_sequence_numbers) {
> -		resp->status = DLB_ST_INVALID_QID_INFLIGHT_ALLOCATION;
> -		return -1;
> -	}
> -
> -	aqed_freelist = &domain->aqed_freelist;
> -
> -	if (dlb_freelist_count(aqed_freelist) < args->num_atomic_inflights) {
> -		resp->status = DLB_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
> -		return -1;
> -	}
> -
> -	return 0;
> -}
> -
> -static void
> -dlb_log_create_ldb_queue_args(struct dlb_hw *hw,
> -			      u32 domain_id,
> -			      struct dlb_create_ldb_queue_args *args)
> -{
> -	DLB_HW_INFO(hw, "DLB create load-balanced queue arguments:\n");
> -	DLB_HW_INFO(hw, "\tDomain ID:                  %d\n",
> -		    domain_id);
> -	DLB_HW_INFO(hw, "\tNumber of sequence numbers: %d\n",
> -		    args->num_sequence_numbers);
> -	DLB_HW_INFO(hw, "\tNumber of QID inflights:    %d\n",
> -		    args->num_qid_inflights);
> -	DLB_HW_INFO(hw, "\tNumber of ATM inflights:    %d\n",
> -		    args->num_atomic_inflights);
> -}
> -
> -/**
> - * dlb_hw_create_ldb_queue() - Allocate and initialize a DLB LDB queue.
> - * @hw:	  Contains the current state of the DLB hardware.
> - * @args: User-provided arguments.
> - * @resp: Response to user.
> - *
> - * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
> - * satisfy a request, resp->status will be set accordingly.
> - */
> -int dlb_hw_create_ldb_queue(struct dlb_hw *hw,
> -			    u32 domain_id,
> -			    struct dlb_create_ldb_queue_args *args,
> -			    struct dlb_cmd_response *resp)
> -{
> -	struct dlb_ldb_queue *queue;
> -	struct dlb_domain *domain;
> -	int ret;
> -
> -	dlb_log_create_ldb_queue_args(hw, domain_id, args);
> -
> -	/* Verify that hardware resources are available before attempting to
> -	 * satisfy the request. This simplifies the error unwinding code.
> -	 */
> -	/* At least one available queue */
> -	if (dlb_verify_create_ldb_queue_args(hw, domain_id, args, resp))
> -		return -EINVAL;
> -
> -	domain = dlb_get_domain_from_id(hw, domain_id);
> -	if (domain == NULL) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: domain not found\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	queue = DLB_DOM_LIST_HEAD(domain->avail_ldb_queues, typeof(*queue));
> -
> -	/* Verification should catch this. */
> -	if (queue == NULL) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: no available ldb queues\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	ret = dlb_ldb_queue_attach_resources(hw, domain, queue, args);
> -	if (ret < 0) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: failed to attach the ldb queue
> resources\n",
> -			   __func__, __LINE__);
> -		return ret;
> -	}
> -
> -	dlb_configure_ldb_queue(hw, domain, queue, args);
> -
> -	queue->num_mappings = 0;
> -
> -	queue->configured = true;
> -
> -	/* Configuration succeeded, so move the resource from the 'avail' to
> -	 * the 'used' list.
> -	 */
> -	dlb_list_del(&domain->avail_ldb_queues, &queue->domain_list);
> -
> -	dlb_list_add(&domain->used_ldb_queues, &queue->domain_list);
> -
> -	resp->status = 0;
> -	resp->id = queue->id;
> -
> -	return 0;
> -}
> -
> -
> -static void
> -dlb_log_create_dir_queue_args(struct dlb_hw *hw,
> -			      u32 domain_id,
> -			      struct dlb_create_dir_queue_args *args)
> -{
> -	DLB_HW_INFO(hw, "DLB create directed queue arguments:\n");
> -	DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
> -	DLB_HW_INFO(hw, "\tPort ID:   %d\n", args->port_id);
> -}
> -
> -static struct dlb_dir_pq_pair *
> -dlb_get_domain_used_dir_pq(u32 id, struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	struct dlb_dir_pq_pair *port;
> -	RTE_SET_USED(iter);
> -
> -	if (id >= DLB_MAX_NUM_DIR_PORTS)
> -		return NULL;
> -
> -	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
> -		if (port->id == id)
> -			return port;
> -
> -	return NULL;
> -}
> -
> -static int
> -dlb_verify_create_dir_queue_args(struct dlb_hw *hw,
> -				 u32 domain_id,
> -				 struct dlb_create_dir_queue_args *args,
> -				 struct dlb_cmd_response *resp)
> -{
> -	struct dlb_domain *domain;
> -
> -	domain = dlb_get_domain_from_id(hw, domain_id);
> -
> -	if (domain == NULL) {
> -		resp->status = DLB_ST_INVALID_DOMAIN_ID;
> -		return -1;
> -	}
> -
> -	if (!domain->configured) {
> -		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
> -		return -1;
> -	}
> -
> -	if (domain->started) {
> -		resp->status = DLB_ST_DOMAIN_STARTED;
> -		return -1;
> -	}
> -
> -	/* If the user claims the port is already configured, validate the port
> -	 * ID, its domain, and whether the port is configured.
> -	 */
> -	if (args->port_id != -1) {
> -		struct dlb_dir_pq_pair *port;
> -
> -		port = dlb_get_domain_used_dir_pq(args->port_id, domain);
> -
> -		if (port  == NULL || port->domain_id != domain->id ||
> -		    !port->port_configured) {
> -			resp->status = DLB_ST_INVALID_PORT_ID;
> -			return -1;
> -		}
> -	}
> -
> -	/* If the queue's port is not configured, validate that a free
> -	 * port-queue pair is available.
> -	 */
> -	if (args->port_id == -1 &&
> -	    dlb_list_empty(&domain->avail_dir_pq_pairs)) {
> -		resp->status = DLB_ST_DIR_QUEUES_UNAVAILABLE;
> -		return -1;
> -	}
> -
> -	return 0;
> -}
> -
> -static void dlb_configure_dir_queue(struct dlb_hw *hw,
> -				    struct dlb_domain *domain,
> -				    struct dlb_dir_pq_pair *queue)
> -{
> -	union dlb_sys_dir_vasqid_v r0 = { {0} };
> -	union dlb_sys_dir_qid_v r1 = { {0} };
> -	unsigned int offs;
> -
> -	/* QID write permissions are turned on when the domain is started */
> -	r0.field.vasqid_v = 0;
> -
> -	offs = (domain->id * DLB_MAX_NUM_DIR_PORTS) + queue->id;
> -
> -	DLB_CSR_WR(hw, DLB_SYS_DIR_VASQID_V(offs), r0.val);
> -
> -	r1.field.qid_v = 1;
> -
> -	DLB_CSR_WR(hw, DLB_SYS_DIR_QID_V(queue->id), r1.val);
> -
> -	queue->queue_configured = true;
> -}
> -
> -/**
> - * dlb_hw_create_dir_queue() - Allocate and initialize a DLB DIR queue.
> - * @hw:	  Contains the current state of the DLB hardware.
> - * @args: User-provided arguments.
> - * @resp: Response to user.
> - *
> - * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
> - * satisfy a request, resp->status will be set accordingly.
> - */
> -int dlb_hw_create_dir_queue(struct dlb_hw *hw,
> -			    u32 domain_id,
> -			    struct dlb_create_dir_queue_args *args,
> -			    struct dlb_cmd_response *resp)
> -{
> -	struct dlb_dir_pq_pair *queue;
> -	struct dlb_domain *domain;
> -
> -	dlb_log_create_dir_queue_args(hw, domain_id, args);
> -
> -	/* Verify that hardware resources are available before attempting to
> -	 * satisfy the request. This simplifies the error unwinding code.
> -	 */
> -	if (dlb_verify_create_dir_queue_args(hw, domain_id, args, resp))
> -		return -EINVAL;
> -
> -	domain = dlb_get_domain_from_id(hw, domain_id);
> -	if (domain == NULL) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: domain not found\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	if (args->port_id != -1)
> -		queue = dlb_get_domain_used_dir_pq(args->port_id, domain);
> -	else
> -		queue = DLB_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
> -					  typeof(*queue));
> -
> -	/* Verification should catch this. */
> -	if (queue == NULL) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: no available dir queues\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	dlb_configure_dir_queue(hw, domain, queue);
> -
> -	/* Configuration succeeded, so move the resource from the 'avail' to
> -	 * the 'used' list (if it's not already there).
> -	 */
> -	if (args->port_id == -1) {
> -		dlb_list_del(&domain->avail_dir_pq_pairs, &queue->domain_list);
> -
> -		dlb_list_add(&domain->used_dir_pq_pairs, &queue->domain_list);
> -	}
> -
> -	resp->status = 0;
> -
> -	resp->id = queue->id;
> -
> -	return 0;
> -}
> -
> -static void dlb_log_create_ldb_port_args(struct dlb_hw *hw,
> -					 u32 domain_id,
> -					 u64 pop_count_dma_base,
> -					 u64 cq_dma_base,
> -					 struct dlb_create_ldb_port_args *args)
> -{
> -	DLB_HW_INFO(hw, "DLB create load-balanced port arguments:\n");
> -	DLB_HW_INFO(hw, "\tDomain ID:                 %d\n",
> -		    domain_id);
> -	DLB_HW_INFO(hw, "\tLDB credit pool ID:        %d\n",
> -		    args->ldb_credit_pool_id);
> -	DLB_HW_INFO(hw, "\tLDB credit high watermark: %d\n",
> -		    args->ldb_credit_high_watermark);
> -	DLB_HW_INFO(hw, "\tLDB credit low watermark:  %d\n",
> -		    args->ldb_credit_low_watermark);
> -	DLB_HW_INFO(hw, "\tLDB credit quantum:        %d\n",
> -		    args->ldb_credit_quantum);
> -	DLB_HW_INFO(hw, "\tDIR credit pool ID:        %d\n",
> -		    args->dir_credit_pool_id);
> -	DLB_HW_INFO(hw, "\tDIR credit high watermark: %d\n",
> -		    args->dir_credit_high_watermark);
> -	DLB_HW_INFO(hw, "\tDIR credit low watermark:  %d\n",
> -		    args->dir_credit_low_watermark);
> -	DLB_HW_INFO(hw, "\tDIR credit quantum:        %d\n",
> -		    args->dir_credit_quantum);
> -	DLB_HW_INFO(hw, "\tpop_count_address:         0x%"PRIx64"\n",
> -		    pop_count_dma_base);
> -	DLB_HW_INFO(hw, "\tCQ depth:                  %d\n",
> -		    args->cq_depth);
> -	DLB_HW_INFO(hw, "\tCQ hist list size:         %d\n",
> -		    args->cq_history_list_size);
> -	DLB_HW_INFO(hw, "\tCQ base address:           0x%"PRIx64"\n",
> -		    cq_dma_base);
> -}
> -
> -static struct dlb_credit_pool *
> -dlb_get_domain_ldb_pool(u32 id, struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	struct dlb_credit_pool *pool;
> -	RTE_SET_USED(iter);
> -
> -	if (id >= DLB_MAX_NUM_LDB_CREDIT_POOLS)
> -		return NULL;
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter)
> -		if (pool->id == id)
> -			return pool;
> -
> -	return NULL;
> -}
> -
> -static struct dlb_credit_pool *
> -dlb_get_domain_dir_pool(u32 id, struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	struct dlb_credit_pool *pool;
> -	RTE_SET_USED(iter);
> -
> -	if (id >= DLB_MAX_NUM_DIR_CREDIT_POOLS)
> -		return NULL;
> -
> -	DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter)
> -		if (pool->id == id)
> -			return pool;
> -
> -	return NULL;
> -}
> -
> -static int
> -dlb_verify_create_ldb_port_args(struct dlb_hw *hw,
> -				u32 domain_id,
> -				u64 pop_count_dma_base,
> -				u64 cq_dma_base,
> -				struct dlb_create_ldb_port_args *args,
> -				struct dlb_cmd_response *resp)
> -{
> -	struct dlb_domain *domain;
> -	struct dlb_credit_pool *pool;
> -
> -	domain = dlb_get_domain_from_id(hw, domain_id);
> -
> -	if (domain == NULL) {
> -		resp->status = DLB_ST_INVALID_DOMAIN_ID;
> -		return -1;
> -	}
> -
> -	if (!domain->configured) {
> -		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
> -		return -1;
> -	}
> -
> -	if (domain->started) {
> -		resp->status = DLB_ST_DOMAIN_STARTED;
> -		return -1;
> -	}
> -
> -	if (dlb_list_empty(&domain->avail_ldb_ports)) {
> -		resp->status = DLB_ST_LDB_PORTS_UNAVAILABLE;
> -		return -1;
> -	}
> -
> -	/* If the scheduling domain has no LDB queues, we configure the
> -	 * hardware to not supply the port with any LDB credits. In that
> -	 * case, ignore the LDB credit arguments.
> -	 */
> -	if (!dlb_list_empty(&domain->used_ldb_queues) ||
> -	    !dlb_list_empty(&domain->avail_ldb_queues)) {
> -		pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
> -					       domain);
> -
> -		if (pool  == NULL || !pool->configured ||
> -		    pool->domain_id != domain->id) {
> -			resp->status = DLB_ST_INVALID_LDB_CREDIT_POOL_ID;
> -			return -1;
> -		}
> -
> -		if (args->ldb_credit_high_watermark > pool->avail_credits) {
> -			resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;
> -			return -1;
> -		}
> -
> -		if (args->ldb_credit_low_watermark >=
> -		    args->ldb_credit_high_watermark) {
> -			resp->status = DLB_ST_INVALID_LDB_CREDIT_LOW_WATERMARK;
> -			return -1;
> -		}
> -
> -		if (args->ldb_credit_quantum >=
> -		    args->ldb_credit_high_watermark) {
> -			resp->status = DLB_ST_INVALID_LDB_CREDIT_QUANTUM;
> -			return -1;
> -		}
> -
> -		if (args->ldb_credit_quantum > DLB_MAX_PORT_CREDIT_QUANTUM) {
> -			resp->status = DLB_ST_INVALID_LDB_CREDIT_QUANTUM;
> -			return -1;
> -		}
> -	}
> -
> -	/* Likewise, if the scheduling domain has no DIR queues, we configure
> -	 * the hardware to not supply the port with any DIR credits. In that
> -	 * case, ignore the DIR credit arguments.
> -	 */
> -	if (!dlb_list_empty(&domain->used_dir_pq_pairs) ||
> -	    !dlb_list_empty(&domain->avail_dir_pq_pairs)) {
> -		pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
> -					       domain);
> -
> -		if (pool  == NULL || !pool->configured ||
> -		    pool->domain_id != domain->id) {
> -			resp->status = DLB_ST_INVALID_DIR_CREDIT_POOL_ID;
> -			return -1;
> -		}
> -
> -		if (args->dir_credit_high_watermark > pool->avail_credits) {
> -			resp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;
> -			return -1;
> -		}
> -
> -		if (args->dir_credit_low_watermark >=
> -		    args->dir_credit_high_watermark) {
> -			resp->status = DLB_ST_INVALID_DIR_CREDIT_LOW_WATERMARK;
> -			return -1;
> -		}
> -
> -		if (args->dir_credit_quantum >=
> -		    args->dir_credit_high_watermark) {
> -			resp->status = DLB_ST_INVALID_DIR_CREDIT_QUANTUM;
> -			return -1;
> -		}
> -
> -		if (args->dir_credit_quantum > DLB_MAX_PORT_CREDIT_QUANTUM) {
> -			resp->status = DLB_ST_INVALID_DIR_CREDIT_QUANTUM;
> -			return -1;
> -		}
> -	}
> -
> -	/* Check cache-line alignment */
> -	if ((pop_count_dma_base & 0x3F) != 0) {
> -		resp->status = DLB_ST_INVALID_POP_COUNT_VIRT_ADDR;
> -		return -1;
> -	}
> -
> -	if ((cq_dma_base & 0x3F) != 0) {
> -		resp->status = DLB_ST_INVALID_CQ_VIRT_ADDR;
> -		return -1;
> -	}
> -
> -	if (args->cq_depth != 1 &&
> -	    args->cq_depth != 2 &&
> -	    args->cq_depth != 4 &&
> -	    args->cq_depth != 8 &&
> -	    args->cq_depth != 16 &&
> -	    args->cq_depth != 32 &&
> -	    args->cq_depth != 64 &&
> -	    args->cq_depth != 128 &&
> -	    args->cq_depth != 256 &&
> -	    args->cq_depth != 512 &&
> -	    args->cq_depth != 1024) {
> -		resp->status = DLB_ST_INVALID_CQ_DEPTH;
> -		return -1;
> -	}
> -
> -	/* The history list size must be >= 1 */
> -	if (!args->cq_history_list_size) {
> -		resp->status = DLB_ST_INVALID_HIST_LIST_DEPTH;
> -		return -1;
> -	}
> -
> -	if (args->cq_history_list_size > domain->avail_hist_list_entries) {
> -		resp->status = DLB_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
> -		return -1;
> -	}
> -
> -	return 0;
> -}
> -
> -static void dlb_ldb_pool_update_credit_count(struct dlb_hw *hw,
> -					     u32 pool_id,
> -					     u32 count)
> -{
> -	hw->rsrcs.ldb_credit_pools[pool_id].avail_credits -= count;
> -}
> -
> -static void dlb_dir_pool_update_credit_count(struct dlb_hw *hw,
> -					     u32 pool_id,
> -					     u32 count)
> -{
> -	hw->rsrcs.dir_credit_pools[pool_id].avail_credits -= count;
> -}
> -
> -static int dlb_ldb_port_configure_pp(struct dlb_hw *hw,
> -				     struct dlb_domain *domain,
> -				     struct dlb_ldb_port *port,
> -				     struct dlb_create_ldb_port_args *args)
> -{
> -	union dlb_sys_ldb_pp2ldbpool r0 = { {0} };
> -	union dlb_sys_ldb_pp2dirpool r1 = { {0} };
> -	union dlb_sys_ldb_pp2vf_pf r2 = { {0} };
> -	union dlb_sys_ldb_pp2vas r3 = { {0} };
> -	union dlb_sys_ldb_pp_v r4 = { {0} };
> -	union dlb_chp_ldb_pp_ldb_crd_hwm r6 = { {0} };
> -	union dlb_chp_ldb_pp_dir_crd_hwm r7 = { {0} };
> -	union dlb_chp_ldb_pp_ldb_crd_lwm r8 = { {0} };
> -	union dlb_chp_ldb_pp_dir_crd_lwm r9 = { {0} };
> -	union dlb_chp_ldb_pp_ldb_min_crd_qnt r10 = { {0} };
> -	union dlb_chp_ldb_pp_dir_min_crd_qnt r11 = { {0} };
> -	union dlb_chp_ldb_pp_ldb_crd_cnt r12 = { {0} };
> -	union dlb_chp_ldb_pp_dir_crd_cnt r13 = { {0} };
> -	union dlb_chp_ldb_ldb_pp2pool r14 = { {0} };
> -	union dlb_chp_ldb_dir_pp2pool r15 = { {0} };
> -	union dlb_chp_ldb_pp_crd_req_state r16 = { {0} };
> -	union dlb_chp_ldb_pp_ldb_push_ptr r17 = { {0} };
> -	union dlb_chp_ldb_pp_dir_push_ptr r18 = { {0} };
> -
> -	struct dlb_credit_pool *ldb_pool = NULL;
> -	struct dlb_credit_pool *dir_pool = NULL;
> -
> -	if (port->ldb_pool_used) {
> -		ldb_pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
> -						   domain);
> -		if (ldb_pool == NULL) {
> -			DLB_HW_ERR(hw,
> -				   "[%s()] Internal error: port validation failed\n",
> -				   __func__);
> -			return -EFAULT;
> -		}
> -	}
> -
> -	if (port->dir_pool_used) {
> -		dir_pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
> -						   domain);
> -		if (dir_pool == NULL) {
> -			DLB_HW_ERR(hw,
> -				   "[%s()] Internal error: port validation failed\n",
> -				   __func__);
> -			return -EFAULT;
> -		}
> -	}
> -
> -	r0.field.ldbpool = (port->ldb_pool_used) ? ldb_pool->id : 0;
> -
> -	DLB_CSR_WR(hw, DLB_SYS_LDB_PP2LDBPOOL(port->id), r0.val);
> -
> -	r1.field.dirpool = (port->dir_pool_used) ? dir_pool->id : 0;
> -
> -	DLB_CSR_WR(hw, DLB_SYS_LDB_PP2DIRPOOL(port->id), r1.val);
> -
> -	r2.field.is_pf = 1;
> -
> -	DLB_CSR_WR(hw, DLB_SYS_LDB_PP2VF_PF(port->id), r2.val);
> -
> -	r3.field.vas = domain->id;
> -
> -	DLB_CSR_WR(hw, DLB_SYS_LDB_PP2VAS(port->id), r3.val);
> -
> -	r6.field.hwm = args->ldb_credit_high_watermark;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_LDB_CRD_HWM(port->id), r6.val);
> -
> -	r7.field.hwm = args->dir_credit_high_watermark;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_DIR_CRD_HWM(port->id), r7.val);
> -
> -	r8.field.lwm = args->ldb_credit_low_watermark;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_LDB_CRD_LWM(port->id), r8.val);
> -
> -	r9.field.lwm = args->dir_credit_low_watermark;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_DIR_CRD_LWM(port->id), r9.val);
> -
> -	r10.field.quanta = args->ldb_credit_quantum;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_LDB_PP_LDB_MIN_CRD_QNT(port->id),
> -		   r10.val);
> -
> -	r11.field.quanta = args->dir_credit_quantum;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_LDB_PP_DIR_MIN_CRD_QNT(port->id),
> -		   r11.val);
> -
> -	r12.field.count = args->ldb_credit_high_watermark;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_LDB_CRD_CNT(port->id), r12.val);
> -
> -	r13.field.count = args->dir_credit_high_watermark;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_DIR_CRD_CNT(port->id), r13.val);
> -
> -	r14.field.pool = (port->ldb_pool_used) ? ldb_pool->id : 0;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_LDB_LDB_PP2POOL(port->id), r14.val);
> -
> -	r15.field.pool = (port->dir_pool_used) ? dir_pool->id : 0;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_LDB_DIR_PP2POOL(port->id), r15.val);
> -
> -	r16.field.no_pp_credit_update = 0;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_CRD_REQ_STATE(port->id), r16.val);
> -
> -	r17.field.push_pointer = 0;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_LDB_PUSH_PTR(port->id), r17.val);
> -
> -	r18.field.push_pointer = 0;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_DIR_PUSH_PTR(port->id), r18.val);
> -
> -	r4.field.pp_v = 1;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_LDB_PP_V(port->id),
> -		   r4.val);
> -
> -	return 0;
> -}
> -
> -static int dlb_ldb_port_configure_cq(struct dlb_hw *hw,
> -				     struct dlb_ldb_port *port,
> -				     u64 pop_count_dma_base,
> -				     u64 cq_dma_base,
> -				     struct dlb_create_ldb_port_args *args)
> -{
> -	int i;
> -
> -	union dlb_sys_ldb_cq_addr_l r0 = { {0} };
> -	union dlb_sys_ldb_cq_addr_u r1 = { {0} };
> -	union dlb_sys_ldb_cq2vf_pf r2 = { {0} };
> -	union dlb_chp_ldb_cq_tkn_depth_sel r3 = { {0} };
> -	union dlb_chp_hist_list_lim r4 = { {0} };
> -	union dlb_chp_hist_list_base r5 = { {0} };
> -	union dlb_lsp_cq_ldb_infl_lim r6 = { {0} };
> -	union dlb_lsp_cq2priov r7 = { {0} };
> -	union dlb_chp_hist_list_push_ptr r8 = { {0} };
> -	union dlb_chp_hist_list_pop_ptr r9 = { {0} };
> -	union dlb_lsp_cq_ldb_tkn_depth_sel r10 = { {0} };
> -	union dlb_sys_ldb_pp_addr_l r11 = { {0} };
> -	union dlb_sys_ldb_pp_addr_u r12 = { {0} };
> -
> -	/* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
> -	r0.field.addr_l = cq_dma_base >> 6;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_LDB_CQ_ADDR_L(port->id),
> -		   r0.val);
> -
> -	r1.field.addr_u = cq_dma_base >> 32;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_LDB_CQ_ADDR_U(port->id),
> -		   r1.val);
> -
> -	r2.field.is_pf = 1;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_LDB_CQ2VF_PF(port->id),
> -		   r2.val);
> -
> -	if (args->cq_depth <= 8) {
> -		r3.field.token_depth_select = 1;
> -	} else if (args->cq_depth == 16) {
> -		r3.field.token_depth_select = 2;
> -	} else if (args->cq_depth == 32) {
> -		r3.field.token_depth_select = 3;
> -	} else if (args->cq_depth == 64) {
> -		r3.field.token_depth_select = 4;
> -	} else if (args->cq_depth == 128) {
> -		r3.field.token_depth_select = 5;
> -	} else if (args->cq_depth == 256) {
> -		r3.field.token_depth_select = 6;
> -	} else if (args->cq_depth == 512) {
> -		r3.field.token_depth_select = 7;
> -	} else if (args->cq_depth == 1024) {
> -		r3.field.token_depth_select = 8;
> -	} else {
> -		DLB_HW_ERR(hw, "[%s():%d] Internal error: invalid CQ depth\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id),
> -		   r3.val);
> -
> -	r10.field.token_depth_select = r3.field.token_depth_select;
> -	r10.field.ignore_depth = 0;
> -	/* TDT algorithm: DLB must be able to write CQs with depth < 4 */
> -	r10.field.enab_shallow_cq = 1;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id),
> -		   r10.val);
> -
> -	/* To support CQs with depth less than 8, program the token count
> -	 * register with a non-zero initial value. Operations such as domain
> -	 * reset must take this initial value into account when quiescing the
> -	 * CQ.
> -	 */
> -	port->init_tkn_cnt = 0;
> -
> -	if (args->cq_depth < 8) {
> -		union dlb_lsp_cq_ldb_tkn_cnt r12 = { {0} };
> -
> -		port->init_tkn_cnt = 8 - args->cq_depth;
> -
> -		r12.field.token_count = port->init_tkn_cnt;
> -
> -		DLB_CSR_WR(hw,
> -			   DLB_LSP_CQ_LDB_TKN_CNT(port->id),
> -			   r12.val);
> -	}
> -
> -	r4.field.limit = port->hist_list_entry_limit - 1;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_HIST_LIST_LIM(port->id), r4.val);
> -
> -	r5.field.base = port->hist_list_entry_base;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_HIST_LIST_BASE(port->id), r5.val);
> -
> -	r8.field.push_ptr = r5.field.base;
> -	r8.field.generation = 0;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_HIST_LIST_PUSH_PTR(port->id), r8.val);
> -
> -	r9.field.pop_ptr = r5.field.base;
> -	r9.field.generation = 0;
> -
> -	DLB_CSR_WR(hw, DLB_CHP_HIST_LIST_POP_PTR(port->id), r9.val);
> -
> -	/* The inflight limit sets a cap on the number of QEs for which this CQ
> -	 * can owe completions at one time.
> -	 */
> -	r6.field.limit = args->cq_history_list_size;
> -
> -	DLB_CSR_WR(hw, DLB_LSP_CQ_LDB_INFL_LIM(port->id), r6.val);
> -
> -	/* Disable the port's QID mappings */
> -	r7.field.v = 0;
> -
> -	DLB_CSR_WR(hw, DLB_LSP_CQ2PRIOV(port->id), r7.val);
> -
> -	/* Two cache lines (128B) are dedicated for the port's pop counts */
> -	r11.field.addr_l = pop_count_dma_base >> 7;
> -
> -	DLB_CSR_WR(hw, DLB_SYS_LDB_PP_ADDR_L(port->id), r11.val);
> -
> -	r12.field.addr_u = pop_count_dma_base >> 32;
> -
> -	DLB_CSR_WR(hw, DLB_SYS_LDB_PP_ADDR_U(port->id), r12.val);
> -
> -	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++)
> -		port->qid_map[i].state = DLB_QUEUE_UNMAPPED;
> -
> -	return 0;
> -}
> -
> -static void dlb_update_ldb_arb_threshold(struct dlb_hw *hw)
> -{
> -	union dlb_lsp_ctrl_config_0 r0 = { {0} };
> -
> -	/* From the hardware spec:
> -	 * "The optimal value for ldb_arb_threshold is in the region of {8 *
> -	 * #CQs}. It is expected therefore that the PF will change this value
> -	 * dynamically as the number of active ports changes."
> -	 */
> -	r0.val = DLB_CSR_RD(hw, DLB_LSP_CTRL_CONFIG_0);
> -
> -	r0.field.ldb_arb_threshold = hw->pf.num_enabled_ldb_ports * 8;
> -	r0.field.ldb_arb_ignore_empty = 1;
> -	r0.field.ldb_arb_mode = 1;
> -
> -	DLB_CSR_WR(hw, DLB_LSP_CTRL_CONFIG_0, r0.val);
> -
> -	dlb_flush_csr(hw);
> -}
> -
> -static int dlb_configure_ldb_port(struct dlb_hw *hw,
> -				  struct dlb_domain *domain,
> -				  struct dlb_ldb_port *port,
> -				  u64 pop_count_dma_base,
> -				  u64 cq_dma_base,
> -				  struct dlb_create_ldb_port_args *args)
> -{
> -	struct dlb_credit_pool *ldb_pool, *dir_pool;
> -	int ret;
> -
> -	port->hist_list_entry_base = domain->hist_list_entry_base +
> -				     domain->hist_list_entry_offset;
> -	port->hist_list_entry_limit = port->hist_list_entry_base +
> -				      args->cq_history_list_size;
> -
> -	domain->hist_list_entry_offset += args->cq_history_list_size;
> -	domain->avail_hist_list_entries -= args->cq_history_list_size;
> -
> -	port->ldb_pool_used = !dlb_list_empty(&domain->used_ldb_queues) ||
> -			      !dlb_list_empty(&domain->avail_ldb_queues);
> -	port->dir_pool_used = !dlb_list_empty(&domain->used_dir_pq_pairs) ||
> -			      !dlb_list_empty(&domain->avail_dir_pq_pairs);
> -
> -	if (port->ldb_pool_used) {
> -		u32 cnt = args->ldb_credit_high_watermark;
> -
> -		ldb_pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
> -						   domain);
> -		if (ldb_pool == NULL) {
> -			DLB_HW_ERR(hw,
> -				   "[%s()] Internal error: port validation failed\n",
> -				   __func__);
> -			return -EFAULT;
> -		}
> -
> -		dlb_ldb_pool_update_credit_count(hw, ldb_pool->id, cnt);
> -	} else {
> -		args->ldb_credit_high_watermark = 0;
> -		args->ldb_credit_low_watermark = 0;
> -		args->ldb_credit_quantum = 0;
> -	}
> -
> -	if (port->dir_pool_used) {
> -		u32 cnt = args->dir_credit_high_watermark;
> -
> -		dir_pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
> -						   domain);
> -		if (dir_pool == NULL) {
> -			DLB_HW_ERR(hw,
> -				   "[%s()] Internal error: port validation failed\n",
> -				   __func__);
> -			return -EFAULT;
> -		}
> -
> -		dlb_dir_pool_update_credit_count(hw, dir_pool->id, cnt);
> -	} else {
> -		args->dir_credit_high_watermark = 0;
> -		args->dir_credit_low_watermark = 0;
> -		args->dir_credit_quantum = 0;
> -	}
> -
> -	ret = dlb_ldb_port_configure_cq(hw,
> -					port,
> -					pop_count_dma_base,
> -					cq_dma_base,
> -					args);
> -	if (ret < 0)
> -		return ret;
> -
> -	ret = dlb_ldb_port_configure_pp(hw, domain, port, args);
> -	if (ret < 0)
> -		return ret;
> -
> -	dlb_ldb_port_cq_enable(hw, port);
> -
> -	port->num_mappings = 0;
> -
> -	port->enabled = true;
> -
> -	hw->pf.num_enabled_ldb_ports++;
> -
> -	dlb_update_ldb_arb_threshold(hw);
> -
> -	port->configured = true;
> -
> -	return 0;
> -}
> -
> -/**
> - * dlb_hw_create_ldb_port() - Allocate and initialize a load-balanced port
> and
> - *	its resources.
> - * @hw:	  Contains the current state of the DLB hardware.
> - * @args: User-provided arguments.
> - * @resp: Response to user.
> - *
> - * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
> - * satisfy a request, resp->status will be set accordingly.
> - */
> -int dlb_hw_create_ldb_port(struct dlb_hw *hw,
> -			   u32 domain_id,
> -			   struct dlb_create_ldb_port_args *args,
> -			   u64 pop_count_dma_base,
> -			   u64 cq_dma_base,
> -			   struct dlb_cmd_response *resp)
> -{
> -	struct dlb_ldb_port *port;
> -	struct dlb_domain *domain;
> -	int ret;
> -
> -	dlb_log_create_ldb_port_args(hw,
> -				     domain_id,
> -				     pop_count_dma_base,
> -				     cq_dma_base,
> -				     args);
> -
> -	/* Verify that hardware resources are available before attempting to
> -	 * satisfy the request. This simplifies the error unwinding code.
> -	 */
> -	if (dlb_verify_create_ldb_port_args(hw,
> -					    domain_id,
> -					    pop_count_dma_base,
> -					    cq_dma_base,
> -					    args,
> -					    resp))
> -		return -EINVAL;
> -
> -	domain = dlb_get_domain_from_id(hw, domain_id);
> -	if (domain == NULL) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: domain not found\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	port = DLB_DOM_LIST_HEAD(domain->avail_ldb_ports, typeof(*port));
> -
> -	/* Verification should catch this. */
> -	if (port == NULL) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: no available ldb ports\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	if (port->configured) {
> -		DLB_HW_ERR(hw,
> -			   "[%s()] Internal error: avail_ldb_ports contains configured
> ports.\n",
> -			   __func__);
> -		return -EFAULT;
> -	}
> -
> -	ret = dlb_configure_ldb_port(hw,
> -				     domain,
> -				     port,
> -				     pop_count_dma_base,
> -				     cq_dma_base,
> -				     args);
> -	if (ret < 0)
> -		return ret;
> -
> -	/* Configuration succeeded, so move the resource from the 'avail' to
> -	 * the 'used' list.
> -	 */
> -	dlb_list_del(&domain->avail_ldb_ports, &port->domain_list);
> -
> -	dlb_list_add(&domain->used_ldb_ports, &port->domain_list);
> -
> -	resp->status = 0;
> -	resp->id = port->id;
> -
> -	return 0;
> -}
> -
> -static void dlb_log_create_dir_port_args(struct dlb_hw *hw,
> -					 u32 domain_id,
> -					 u64 pop_count_dma_base,
> -					 u64 cq_dma_base,
> -					 struct dlb_create_dir_port_args *args)
> -{
> -	DLB_HW_INFO(hw, "DLB create directed port arguments:\n");
> -	DLB_HW_INFO(hw, "\tDomain ID:                 %d\n",
> -		    domain_id);
> -	DLB_HW_INFO(hw, "\tLDB credit pool ID:        %d\n",
> -		    args->ldb_credit_pool_id);
> -	DLB_HW_INFO(hw, "\tLDB credit high watermark: %d\n",
> -		    args->ldb_credit_high_watermark);
> -	DLB_HW_INFO(hw, "\tLDB credit low watermark:  %d\n",
> -		    args->ldb_credit_low_watermark);
> -	DLB_HW_INFO(hw, "\tLDB credit quantum:        %d\n",
> -		    args->ldb_credit_quantum);
> -	DLB_HW_INFO(hw, "\tDIR credit pool ID:        %d\n",
> -		    args->dir_credit_pool_id);
> -	DLB_HW_INFO(hw, "\tDIR credit high watermark: %d\n",
> -		    args->dir_credit_high_watermark);
> -	DLB_HW_INFO(hw, "\tDIR credit low watermark:  %d\n",
> -		    args->dir_credit_low_watermark);
> -	DLB_HW_INFO(hw, "\tDIR credit quantum:        %d\n",
> -		    args->dir_credit_quantum);
> -	DLB_HW_INFO(hw, "\tpop_count_address:         0x%"PRIx64"\n",
> -		    pop_count_dma_base);
> -	DLB_HW_INFO(hw, "\tCQ depth:                  %d\n",
> -		    args->cq_depth);
> -	DLB_HW_INFO(hw, "\tCQ base address:           0x%"PRIx64"\n",
> -		    cq_dma_base);
> -}
> -
> -static int
> -dlb_verify_create_dir_port_args(struct dlb_hw *hw,
> -				u32 domain_id,
> -				u64 pop_count_dma_base,
> -				u64 cq_dma_base,
> -				struct dlb_create_dir_port_args *args,
> -				struct dlb_cmd_response *resp)
> -{
> -	struct dlb_domain *domain;
> -	struct dlb_credit_pool *pool;
> -
> -	domain = dlb_get_domain_from_id(hw, domain_id);
> -
> -	if (domain == NULL) {
> -		resp->status = DLB_ST_INVALID_DOMAIN_ID;
> -		return -1;
> -	}
> -
> -	if (!domain->configured) {
> -		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
> -		return -1;
> -	}
> -
> -	if (domain->started) {
> -		resp->status = DLB_ST_DOMAIN_STARTED;
> -		return -1;
> -	}
> -
> -	/* If the user claims the queue is already configured, validate
> -	 * the queue ID, its domain, and whether the queue is configured.
> -	 */
> -	if (args->queue_id != -1) {
> -		struct dlb_dir_pq_pair *queue;
> -
> -		queue = dlb_get_domain_used_dir_pq(args->queue_id,
> -						   domain);
> -
> -		if (queue  == NULL || queue->domain_id != domain->id ||
> -		    !queue->queue_configured) {
> -			resp->status = DLB_ST_INVALID_DIR_QUEUE_ID;
> -			return -1;
> -		}
> -	}
> -
> -	/* If the port's queue is not configured, validate that a free
> -	 * port-queue pair is available.
> -	 */
> -	if (args->queue_id == -1 &&
> -	    dlb_list_empty(&domain->avail_dir_pq_pairs)) {
> -		resp->status = DLB_ST_DIR_PORTS_UNAVAILABLE;
> -		return -1;
> -	}
> -
> -	/* If the scheduling domain has no LDB queues, we configure the
> -	 * hardware to not supply the port with any LDB credits. In that
> -	 * case, ignore the LDB credit arguments.
> -	 */
> -	if (!dlb_list_empty(&domain->used_ldb_queues) ||
> -	    !dlb_list_empty(&domain->avail_ldb_queues)) {
> -		pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
> -					       domain);
> -
> -		if (pool  == NULL || !pool->configured ||
> -		    pool->domain_id != domain->id) {
> -			resp->status = DLB_ST_INVALID_LDB_CREDIT_POOL_ID;
> -			return -1;
> -		}
> -
> -		if (args->ldb_credit_high_watermark > pool->avail_credits) {
> -			resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;
> -			return -1;
> -		}
> -
> -		if (args->ldb_credit_low_watermark >=
> -		    args->ldb_credit_high_watermark) {
> -			resp->status = DLB_ST_INVALID_LDB_CREDIT_LOW_WATERMARK;
> -			return -1;
> -		}
> -
> -		if (args->ldb_credit_quantum >=
> -		    args->ldb_credit_high_watermark) {
> -			resp->status = DLB_ST_INVALID_LDB_CREDIT_QUANTUM;
> -			return -1;
> -		}
> -
> -		if (args->ldb_credit_quantum > DLB_MAX_PORT_CREDIT_QUANTUM) {
> -			resp->status = DLB_ST_INVALID_LDB_CREDIT_QUANTUM;
> -			return -1;
> -		}
> -	}
> -
> -	pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
> -				       domain);
> -
> -	if (pool  == NULL || !pool->configured ||
> -	    pool->domain_id != domain->id) {
> -		resp->status = DLB_ST_INVALID_DIR_CREDIT_POOL_ID;
> -		return -1;
> -	}
> -
> -	if (args->dir_credit_high_watermark > pool->avail_credits) {
> -		resp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;
> -		return -1;
> -	}
> -
> -	if (args->dir_credit_low_watermark >= args->dir_credit_high_watermark) {
> -		resp->status = DLB_ST_INVALID_DIR_CREDIT_LOW_WATERMARK;
> -		return -1;
> -	}
> -
> -	if (args->dir_credit_quantum >= args->dir_credit_high_watermark) {
> -		resp->status = DLB_ST_INVALID_DIR_CREDIT_QUANTUM;
> -		return -1;
> -	}
> -
> -	if (args->dir_credit_quantum > DLB_MAX_PORT_CREDIT_QUANTUM) {
> -		resp->status = DLB_ST_INVALID_DIR_CREDIT_QUANTUM;
> -		return -1;
> -	}
> -
> -	/* Check cache-line alignment */
> -	if ((pop_count_dma_base & 0x3F) != 0) {
> -		resp->status = DLB_ST_INVALID_POP_COUNT_VIRT_ADDR;
> -		return -1;
> -	}
> -
> -	if ((cq_dma_base & 0x3F) != 0) {
> -		resp->status = DLB_ST_INVALID_CQ_VIRT_ADDR;
> -		return -1;
> -	}
> -
> -	if (args->cq_depth != 8 &&
> -	    args->cq_depth != 16 &&
> -	    args->cq_depth != 32 &&
> -	    args->cq_depth != 64 &&
> -	    args->cq_depth != 128 &&
> -	    args->cq_depth != 256 &&
> -	    args->cq_depth != 512 &&
> -	    args->cq_depth != 1024) {
> -		resp->status = DLB_ST_INVALID_CQ_DEPTH;
> -		return -1;
> -	}
> -
> -	return 0;
> -}
> -
> -static int dlb_dir_port_configure_pp(struct dlb_hw *hw,
> -				     struct dlb_domain *domain,
> -				     struct dlb_dir_pq_pair *port,
> -				     struct dlb_create_dir_port_args *args)
> -{
> -	union dlb_sys_dir_pp2ldbpool r0 = { {0} };
> -	union dlb_sys_dir_pp2dirpool r1 = { {0} };
> -	union dlb_sys_dir_pp2vf_pf r2 = { {0} };
> -	union dlb_sys_dir_pp2vas r3 = { {0} };
> -	union dlb_sys_dir_pp_v r4 = { {0} };
> -	union dlb_chp_dir_pp_ldb_crd_hwm r6 = { {0} };
> -	union dlb_chp_dir_pp_dir_crd_hwm r7 = { {0} };
> -	union dlb_chp_dir_pp_ldb_crd_lwm r8 = { {0} };
> -	union dlb_chp_dir_pp_dir_crd_lwm r9 = { {0} };
> -	union dlb_chp_dir_pp_ldb_min_crd_qnt r10 = { {0} };
> -	union dlb_chp_dir_pp_dir_min_crd_qnt r11 = { {0} };
> -	union dlb_chp_dir_pp_ldb_crd_cnt r12 = { {0} };
> -	union dlb_chp_dir_pp_dir_crd_cnt r13 = { {0} };
> -	union dlb_chp_dir_ldb_pp2pool r14 = { {0} };
> -	union dlb_chp_dir_dir_pp2pool r15 = { {0} };
> -	union dlb_chp_dir_pp_crd_req_state r16 = { {0} };
> -	union dlb_chp_dir_pp_ldb_push_ptr r17 = { {0} };
> -	union dlb_chp_dir_pp_dir_push_ptr r18 = { {0} };
> -
> -	struct dlb_credit_pool *ldb_pool = NULL;
> -	struct dlb_credit_pool *dir_pool = NULL;
> -
> -	if (port->ldb_pool_used) {
> -		ldb_pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
> -						   domain);
> -		if (ldb_pool == NULL) {
> -			DLB_HW_ERR(hw,
> -				   "[%s()] Internal error: port validation failed\n",
> -				   __func__);
> -			return -EFAULT;
> -		}
> -	}
> -
> -	if (port->dir_pool_used) {
> -		dir_pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
> -						   domain);
> -		if (dir_pool == NULL) {
> -			DLB_HW_ERR(hw,
> -				   "[%s()] Internal error: port validation failed\n",
> -				   __func__);
> -			return -EFAULT;
> -		}
> -	}
> -
> -	r0.field.ldbpool = (port->ldb_pool_used) ? ldb_pool->id : 0;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_DIR_PP2LDBPOOL(port->id),
> -		   r0.val);
> -
> -	r1.field.dirpool = (port->dir_pool_used) ? dir_pool->id : 0;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_DIR_PP2DIRPOOL(port->id),
> -		   r1.val);
> -
> -	r2.field.is_pf = 1;
> -	r2.field.is_hw_dsi = 0;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_DIR_PP2VF_PF(port->id),
> -		   r2.val);
> -
> -	r3.field.vas = domain->id;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_SYS_DIR_PP2VAS(port->id),
> -		   r3.val);
> -
> -	r6.field.hwm = args->ldb_credit_high_watermark;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_PP_LDB_CRD_HWM(port->id),
> -		   r6.val);
> -
> -	r7.field.hwm = args->dir_credit_high_watermark;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_PP_DIR_CRD_HWM(port->id),
> -		   r7.val);
> -
> -	r8.field.lwm = args->ldb_credit_low_watermark;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_PP_LDB_CRD_LWM(port->id),
> -		   r8.val);
> -
> -	r9.field.lwm = args->dir_credit_low_watermark;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_PP_DIR_CRD_LWM(port->id),
> -		   r9.val);
> -
> -	r10.field.quanta = args->ldb_credit_quantum;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_PP_LDB_MIN_CRD_QNT(port->id),
> -		   r10.val);
> -
> -	r11.field.quanta = args->dir_credit_quantum;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_PP_DIR_MIN_CRD_QNT(port->id),
> -		   r11.val);
> -
> -	r12.field.count = args->ldb_credit_high_watermark;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_PP_LDB_CRD_CNT(port->id),
> -		   r12.val);
> -
> -	r13.field.count = args->dir_credit_high_watermark;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_PP_DIR_CRD_CNT(port->id),
> -		   r13.val);
> -
> -	r14.field.pool = (port->ldb_pool_used) ? ldb_pool->id : 0;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_LDB_PP2POOL(port->id),
> -		   r14.val);
> -
> -	r15.field.pool = (port->dir_pool_used) ? dir_pool->id : 0;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_DIR_PP2POOL(port->id),
> -		   r15.val);
> -
> -	r16.field.no_pp_credit_update = 0;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_PP_CRD_REQ_STATE(port->id),
> -		   r16.val);
> -
> -	r17.field.push_pointer = 0;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_PP_LDB_PUSH_PTR(port->id),
> -		   r17.val);
> -
> -	r18.field.push_pointer = 0;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_PP_DIR_PUSH_PTR(port->id),
> -		   r18.val);
> -
> -	r4.field.pp_v = 1;
> -	r4.field.mb_dm = 0;
> -
> -	DLB_CSR_WR(hw, DLB_SYS_DIR_PP_V(port->id), r4.val);
> -
> -	return 0;
> -}
> -
> -static int dlb_dir_port_configure_cq(struct dlb_hw *hw,
> -				     struct dlb_dir_pq_pair *port,
> -				     u64 pop_count_dma_base,
> -				     u64 cq_dma_base,
> -				     struct dlb_create_dir_port_args *args)
> -{
> -	union dlb_sys_dir_cq_addr_l r0 = { {0} };
> -	union dlb_sys_dir_cq_addr_u r1 = { {0} };
> -	union dlb_sys_dir_cq2vf_pf r2 = { {0} };
> -	union dlb_chp_dir_cq_tkn_depth_sel r3 = { {0} };
> -	union dlb_lsp_cq_dir_tkn_depth_sel_dsi r4 = { {0} };
> -	union dlb_sys_dir_pp_addr_l r5 = { {0} };
> -	union dlb_sys_dir_pp_addr_u r6 = { {0} };
> -
> -	/* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
> -	r0.field.addr_l = cq_dma_base >> 6;
> -
> -	DLB_CSR_WR(hw, DLB_SYS_DIR_CQ_ADDR_L(port->id), r0.val);
> -
> -	r1.field.addr_u = cq_dma_base >> 32;
> -
> -	DLB_CSR_WR(hw, DLB_SYS_DIR_CQ_ADDR_U(port->id), r1.val);
> -
> -	r2.field.is_pf = 1;
> -
> -	DLB_CSR_WR(hw, DLB_SYS_DIR_CQ2VF_PF(port->id), r2.val);
> -
> -	if (args->cq_depth == 8) {
> -		r3.field.token_depth_select = 1;
> -	} else if (args->cq_depth == 16) {
> -		r3.field.token_depth_select = 2;
> -	} else if (args->cq_depth == 32) {
> -		r3.field.token_depth_select = 3;
> -	} else if (args->cq_depth == 64) {
> -		r3.field.token_depth_select = 4;
> -	} else if (args->cq_depth == 128) {
> -		r3.field.token_depth_select = 5;
> -	} else if (args->cq_depth == 256) {
> -		r3.field.token_depth_select = 6;
> -	} else if (args->cq_depth == 512) {
> -		r3.field.token_depth_select = 7;
> -	} else if (args->cq_depth == 1024) {
> -		r3.field.token_depth_select = 8;
> -	} else {
> -		DLB_HW_ERR(hw, "[%s():%d] Internal error: invalid CQ depth\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id),
> -		   r3.val);
> -
> -	r4.field.token_depth_select = r3.field.token_depth_select;
> -	r4.field.disable_wb_opt = 0;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id),
> -		   r4.val);
> -
> -	/* Two cache lines (128B) are dedicated for the port's pop counts */
> -	r5.field.addr_l = pop_count_dma_base >> 7;
> -
> -	DLB_CSR_WR(hw, DLB_SYS_DIR_PP_ADDR_L(port->id), r5.val);
> -
> -	r6.field.addr_u = pop_count_dma_base >> 32;
> -
> -	DLB_CSR_WR(hw, DLB_SYS_DIR_PP_ADDR_U(port->id), r6.val);
> -
> -	return 0;
> -}
> -
> -static int dlb_configure_dir_port(struct dlb_hw *hw,
> -				  struct dlb_domain *domain,
> -				  struct dlb_dir_pq_pair *port,
> -				  u64 pop_count_dma_base,
> -				  u64 cq_dma_base,
> -				  struct dlb_create_dir_port_args *args)
> -{
> -	struct dlb_credit_pool *ldb_pool, *dir_pool;
> -	int ret;
> -
> -	port->ldb_pool_used = !dlb_list_empty(&domain->used_ldb_queues) ||
> -			      !dlb_list_empty(&domain->avail_ldb_queues);
> -
> -	/* Each directed port has a directed queue, hence this port requires
> -	 * directed credits.
> -	 */
> -	port->dir_pool_used = true;
> -
> -	if (port->ldb_pool_used) {
> -		u32 cnt = args->ldb_credit_high_watermark;
> -
> -		ldb_pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
> -						   domain);
> -		if (ldb_pool == NULL) {
> -			DLB_HW_ERR(hw,
> -				   "[%s()] Internal error: port validation failed\n",
> -				   __func__);
> -			return -EFAULT;
> -		}
> -
> -		dlb_ldb_pool_update_credit_count(hw, ldb_pool->id, cnt);
> -	} else {
> -		args->ldb_credit_high_watermark = 0;
> -		args->ldb_credit_low_watermark = 0;
> -		args->ldb_credit_quantum = 0;
> -	}
> -
> -	dir_pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id, domain);
> -	if (dir_pool == NULL) {
> -		DLB_HW_ERR(hw,
> -			   "[%s()] Internal error: port validation failed\n",
> -			   __func__);
> -		return -EFAULT;
> -	}
> -
> -	dlb_dir_pool_update_credit_count(hw,
> -					 dir_pool->id,
> -					 args->dir_credit_high_watermark);
> -
> -	ret = dlb_dir_port_configure_cq(hw,
> -					port,
> -					pop_count_dma_base,
> -					cq_dma_base,
> -					args);
> -
> -	if (ret < 0)
> -		return ret;
> -
> -	ret = dlb_dir_port_configure_pp(hw, domain, port, args);
> -	if (ret < 0)
> -		return ret;
> -
> -	dlb_dir_port_cq_enable(hw, port);
> -
> -	port->enabled = true;
> -
> -	port->port_configured = true;
> -
> -	return 0;
> -}
> -
> -/**
> - * dlb_hw_create_dir_port() - Allocate and initialize a DLB directed port
> and
> - *	queue. The port/queue pair have the same ID and name.
> - * @hw:	  Contains the current state of the DLB hardware.
> - * @args: User-provided arguments.
> - * @resp: Response to user.
> - *
> - * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
> - * satisfy a request, resp->status will be set accordingly.
> - */
> -int dlb_hw_create_dir_port(struct dlb_hw *hw,
> -			   u32 domain_id,
> -			   struct dlb_create_dir_port_args *args,
> -			   u64 pop_count_dma_base,
> -			   u64 cq_dma_base,
> -			   struct dlb_cmd_response *resp)
> -{
> -	struct dlb_dir_pq_pair *port;
> -	struct dlb_domain *domain;
> -	int ret;
> -
> -	dlb_log_create_dir_port_args(hw,
> -				     domain_id,
> -				     pop_count_dma_base,
> -				     cq_dma_base,
> -				     args);
> -
> -	/* Verify that hardware resources are available before attempting to
> -	 * satisfy the request. This simplifies the error unwinding code.
> -	 */
> -	if (dlb_verify_create_dir_port_args(hw,
> -					    domain_id,
> -					    pop_count_dma_base,
> -					    cq_dma_base,
> -					    args,
> -					    resp))
> -		return -EINVAL;
> -
> -	domain = dlb_get_domain_from_id(hw, domain_id);
> -	if (domain == NULL) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: domain not found\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	if (args->queue_id != -1)
> -		port = dlb_get_domain_used_dir_pq(args->queue_id,
> -						  domain);
> -	else
> -		port = DLB_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
> -					 typeof(*port));
> -
> -	/* Verification should catch this. */
> -	if (port == NULL) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: no available dir ports\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	ret = dlb_configure_dir_port(hw,
> -				     domain,
> -				     port,
> -				     pop_count_dma_base,
> -				     cq_dma_base,
> -				     args);
> -	if (ret < 0)
> -		return ret;
> -
> -	/* Configuration succeeded, so move the resource from the 'avail' to
> -	 * the 'used' list (if it's not already there).
> -	 */
> -	if (args->queue_id == -1) {
> -		dlb_list_del(&domain->avail_dir_pq_pairs, &port->domain_list);
> -
> -		dlb_list_add(&domain->used_dir_pq_pairs, &port->domain_list);
> -	}
> -
> -	resp->status = 0;
> -	resp->id = port->id;
> -
> -	return 0;
> -}
> -
> -static struct dlb_ldb_port *
> -dlb_get_domain_used_ldb_port(u32 id, struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	struct dlb_ldb_port *port;
> -	RTE_SET_USED(iter);
> -
> -	if (id >= DLB_MAX_NUM_LDB_PORTS)
> -		return NULL;
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
> -		if (port->id == id)
> -			return port;
> -
> -	DLB_DOM_LIST_FOR(domain->avail_ldb_ports, port, iter)
> -		if (port->id == id)
> -			return port;
> -
> -	return NULL;
> -}
> -
> -static void
> -dlb_log_pending_port_unmaps_args(struct dlb_hw *hw,
> -				 struct dlb_pending_port_unmaps_args *args)
> -{
> -	DLB_HW_INFO(hw, "DLB pending port unmaps arguments:\n");
> -	DLB_HW_INFO(hw, "\tPort ID: %d\n", args->port_id);
> -}
> -
> -int dlb_hw_pending_port_unmaps(struct dlb_hw *hw,
> -			       u32 domain_id,
> -			       struct dlb_pending_port_unmaps_args *args,
> -			       struct dlb_cmd_response *resp)
> -{
> -	struct dlb_domain *domain;
> -	struct dlb_ldb_port *port;
> -
> -	dlb_log_pending_port_unmaps_args(hw, args);
> -
> -	domain = dlb_get_domain_from_id(hw, domain_id);
> -
> -	if (domain == NULL) {
> -		resp->status = DLB_ST_INVALID_DOMAIN_ID;
> -		return -EINVAL;
> -	}
> -
> -	port = dlb_get_domain_used_ldb_port(args->port_id, domain);
> -	if (port == NULL || !port->configured) {
> -		resp->status = DLB_ST_INVALID_PORT_ID;
> -		return -EINVAL;
> -	}
> -
> -	resp->id = port->num_pending_removals;
> -
> -	return 0;
> -}
> -
> -static void dlb_log_unmap_qid(struct dlb_hw *hw,
> -			      u32 domain_id,
> -			      struct dlb_unmap_qid_args *args)
> -{
> -	DLB_HW_INFO(hw, "DLB unmap QID arguments:\n");
> -	DLB_HW_INFO(hw, "\tDomain ID: %d\n",
> -		    domain_id);
> -	DLB_HW_INFO(hw, "\tPort ID:   %d\n",
> -		    args->port_id);
> -	DLB_HW_INFO(hw, "\tQueue ID:  %d\n",
> -		    args->qid);
> -	if (args->qid < DLB_MAX_NUM_LDB_QUEUES)
> -		DLB_HW_INFO(hw, "\tQueue's num mappings:  %d\n",
> -			    hw->rsrcs.ldb_queues[args->qid].num_mappings);
> -}
> -
> -static struct dlb_ldb_queue *dlb_get_domain_ldb_queue(u32 id,
> -						      struct dlb_domain *domain)
> -{
> -	struct dlb_list_entry *iter;
> -	struct dlb_ldb_queue *queue;
> -	RTE_SET_USED(iter);
> -
> -	if (id >= DLB_MAX_NUM_LDB_QUEUES)
> -		return NULL;
> -
> -	DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter)
> -		if (queue->id == id)
> -			return queue;
> -
> -	return NULL;
> -}
> -
> -static bool
> -dlb_port_find_slot_with_pending_map_queue(struct dlb_ldb_port *port,
> -					  struct dlb_ldb_queue *queue,
> -					  int *slot)
> -{
> -	int i;
> -
> -	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
> -		struct dlb_ldb_port_qid_map *map = &port->qid_map[i];
> -
> -		if (map->state == DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP &&
> -		    map->pending_qid == queue->id)
> -			break;
> -	}
> -
> -	*slot = i;
> -
> -	return (i < DLB_MAX_NUM_QIDS_PER_LDB_CQ);
> -}
> -
> -static int dlb_verify_unmap_qid_args(struct dlb_hw *hw,
> -				     u32 domain_id,
> -				     struct dlb_unmap_qid_args *args,
> -				     struct dlb_cmd_response *resp)
> -{
> -	enum dlb_qid_map_state state;
> -	struct dlb_domain *domain;
> -	struct dlb_ldb_port *port;
> -	struct dlb_ldb_queue *queue;
> -	int slot;
> -	int id;
> -
> -	domain = dlb_get_domain_from_id(hw, domain_id);
> -
> -	if (domain == NULL) {
> -		resp->status = DLB_ST_INVALID_DOMAIN_ID;
> -		return -1;
> -	}
> -
> -	if (!domain->configured) {
> -		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
> -		return -1;
> -	}
> -
> -	id = args->port_id;
> -
> -	port = dlb_get_domain_used_ldb_port(id, domain);
> -
> -	if (port == NULL || !port->configured) {
> -		resp->status = DLB_ST_INVALID_PORT_ID;
> -		return -1;
> -	}
> -
> -	if (port->domain_id != domain->id) {
> -		resp->status = DLB_ST_INVALID_PORT_ID;
> -		return -1;
> -	}
> -
> -	queue = dlb_get_domain_ldb_queue(args->qid, domain);
> -
> -	if (queue == NULL || !queue->configured) {
> -		DLB_HW_ERR(hw, "[%s()] Can't unmap unconfigured queue %d\n",
> -			   __func__, args->qid);
> -		resp->status = DLB_ST_INVALID_QID;
> -		return -1;
> -	}
> -
> -	/* Verify that the port has the queue mapped. From the application's
> -	 * perspective a queue is mapped if it is actually mapped, the map is
> -	 * in progress, or the map is blocked pending an unmap.
> -	 */
> -	state = DLB_QUEUE_MAPPED;
> -	if (dlb_port_find_slot_queue(port, state, queue, &slot))
> -		return 0;
> -
> -	state = DLB_QUEUE_MAP_IN_PROGRESS;
> -	if (dlb_port_find_slot_queue(port, state, queue, &slot))
> -		return 0;
> -
> -	if (dlb_port_find_slot_with_pending_map_queue(port, queue, &slot))
> -		return 0;
> -
> -	resp->status = DLB_ST_INVALID_QID;
> -	return -1;
> -}
> -
> -int dlb_hw_unmap_qid(struct dlb_hw *hw,
> -		     u32 domain_id,
> -		     struct dlb_unmap_qid_args *args,
> -		     struct dlb_cmd_response *resp)
> -{
> -	enum dlb_qid_map_state state;
> -	struct dlb_ldb_queue *queue;
> -	struct dlb_ldb_port *port;
> -	struct dlb_domain *domain;
> -	bool unmap_complete;
> -	int i, ret, id;
> -
> -	dlb_log_unmap_qid(hw, domain_id, args);
> -
> -	/* Verify that hardware resources are available before attempting to
> -	 * satisfy the request. This simplifies the error unwinding code.
> -	 */
> -	if (dlb_verify_unmap_qid_args(hw, domain_id, args, resp))
> -		return -EINVAL;
> -
> -	domain = dlb_get_domain_from_id(hw, domain_id);
> -	if (domain == NULL) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: domain not found\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	id = args->port_id;
> -
> -	port = dlb_get_domain_used_ldb_port(id, domain);
> -	if (port == NULL) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: port not found\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	queue = dlb_get_domain_ldb_queue(args->qid, domain);
> -	if (queue == NULL) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: queue not found\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	/* If the queue hasn't been mapped yet, we need to update the slot's
> -	 * state and re-enable the queue's inflights.
> -	 */
> -	state = DLB_QUEUE_MAP_IN_PROGRESS;
> -	if (dlb_port_find_slot_queue(port, state, queue, &i)) {
> -		if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
> -			DLB_HW_ERR(hw,
> -				   "[%s():%d] Internal error: port slot tracking failed\n",
> -				   __func__, __LINE__);
> -			return -EFAULT;
> -		}
> -
> -		/* Since the in-progress map was aborted, re-enable the QID's
> -		 * inflights.
> -		 */
> -		if (queue->num_pending_additions == 0)
> -			dlb_ldb_queue_set_inflight_limit(hw, queue);
> -
> -		state = DLB_QUEUE_UNMAPPED;
> -		ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
> -		if (ret)
> -			return ret;
> -
> -		goto unmap_qid_done;
> -	}
> -
> -	/* If the queue mapping is on hold pending an unmap, we simply need to
> -	 * update the slot's state.
> -	 */
> -	if (dlb_port_find_slot_with_pending_map_queue(port, queue, &i)) {
> -		if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
> -			DLB_HW_ERR(hw,
> -				   "[%s():%d] Internal error: port slot tracking failed\n",
> -				   __func__, __LINE__);
> -			return -EFAULT;
> -		}
> -
> -		state = DLB_QUEUE_UNMAP_IN_PROGRESS;
> -		ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
> -		if (ret)
> -			return ret;
> -
> -		goto unmap_qid_done;
> -	}
> -
> -	state = DLB_QUEUE_MAPPED;
> -	if (!dlb_port_find_slot_queue(port, state, queue, &i)) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: no available CQ slots\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: port slot tracking failed\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	/* QID->CQ mapping removal is an asynchronous procedure. It requires
> -	 * stopping the DLB from scheduling this CQ, draining all inflights
> -	 * from the CQ, then unmapping the queue from the CQ. This function
> -	 * simply marks the port as needing the queue unmapped, and (if
> -	 * necessary) starts the unmapping worker thread.
> -	 */
> -	dlb_ldb_port_cq_disable(hw, port);
> -
> -	state = DLB_QUEUE_UNMAP_IN_PROGRESS;
> -	ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
> -	if (ret)
> -		return ret;
> -
> -	/* Attempt to finish the unmapping now, in case the port has no
> -	 * outstanding inflights. If that's not the case, this will fail and
> -	 * the unmapping will be completed at a later time.
> -	 */
> -	unmap_complete = dlb_domain_finish_unmap_port(hw, domain, port);
> -
> -	/* If the unmapping couldn't complete immediately, launch the worker
> -	 * thread (if it isn't already launched) to finish it later.
> -	 */
> -	if (!unmap_complete && !os_worker_active(hw))
> -		os_schedule_work(hw);
> -
> -unmap_qid_done:
> -	resp->status = 0;
> -
> -	return 0;
> -}
> -
> -static void dlb_log_map_qid(struct dlb_hw *hw,
> -			    u32 domain_id,
> -			    struct dlb_map_qid_args *args)
> -{
> -	DLB_HW_INFO(hw, "DLB map QID arguments:\n");
> -	DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
> -	DLB_HW_INFO(hw, "\tPort ID:   %d\n", args->port_id);
> -	DLB_HW_INFO(hw, "\tQueue ID:  %d\n", args->qid);
> -	DLB_HW_INFO(hw, "\tPriority:  %d\n", args->priority);
> -}
> -
> -static int dlb_verify_map_qid_args(struct dlb_hw *hw,
> -				   u32 domain_id,
> -				   struct dlb_map_qid_args *args,
> -				   struct dlb_cmd_response *resp)
> -{
> -	struct dlb_domain *domain;
> -	struct dlb_ldb_port *port;
> -	struct dlb_ldb_queue *queue;
> -	int id;
> -
> -	domain = dlb_get_domain_from_id(hw, domain_id);
> -
> -	if (domain == NULL) {
> -		resp->status = DLB_ST_INVALID_DOMAIN_ID;
> -		return -1;
> -	}
> -
> -	if (!domain->configured) {
> -		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
> -		return -1;
> -	}
> -
> -	id = args->port_id;
> -
> -	port = dlb_get_domain_used_ldb_port(id, domain);
> -
> -	if (port  == NULL || !port->configured) {
> -		resp->status = DLB_ST_INVALID_PORT_ID;
> -		return -1;
> -	}
> -
> -	if (args->priority >= DLB_QID_PRIORITIES) {
> -		resp->status = DLB_ST_INVALID_PRIORITY;
> -		return -1;
> -	}
> -
> -	queue = dlb_get_domain_ldb_queue(args->qid, domain);
> -
> -	if (queue  == NULL || !queue->configured) {
> -		resp->status = DLB_ST_INVALID_QID;
> -		return -1;
> -	}
> -
> -	if (queue->domain_id != domain->id) {
> -		resp->status = DLB_ST_INVALID_QID;
> -		return -1;
> -	}
> -
> -	if (port->domain_id != domain->id) {
> -		resp->status = DLB_ST_INVALID_PORT_ID;
> -		return -1;
> -	}
> -
> -	return 0;
> -}
> -
> -static int dlb_verify_start_domain_args(struct dlb_hw *hw,
> -					u32 domain_id,
> -					struct dlb_cmd_response *resp)
> -{
> -	struct dlb_domain *domain;
> -
> -	domain = dlb_get_domain_from_id(hw, domain_id);
> -
> -	if (domain == NULL) {
> -		resp->status = DLB_ST_INVALID_DOMAIN_ID;
> -		return -1;
> -	}
> -
> -	if (!domain->configured) {
> -		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
> -		return -1;
> -	}
> -
> -	if (domain->started) {
> -		resp->status = DLB_ST_DOMAIN_STARTED;
> -		return -1;
> -	}
> -
> -	return 0;
> -}
> -
> -static int dlb_verify_map_qid_slot_available(struct dlb_ldb_port *port,
> -					     struct dlb_ldb_queue *queue,
> -					     struct dlb_cmd_response *resp)
> -{
> -	enum dlb_qid_map_state state;
> -	int i;
> -
> -	/* Unused slot available? */
> -	if (port->num_mappings < DLB_MAX_NUM_QIDS_PER_LDB_CQ)
> -		return 0;
> -
> -	/* If the queue is already mapped (from the application's perspective),
> -	 * this is simply a priority update.
> -	 */
> -	state = DLB_QUEUE_MAPPED;
> -	if (dlb_port_find_slot_queue(port, state, queue, &i))
> -		return 0;
> -
> -	state = DLB_QUEUE_MAP_IN_PROGRESS;
> -	if (dlb_port_find_slot_queue(port, state, queue, &i))
> -		return 0;
> -
> -	if (dlb_port_find_slot_with_pending_map_queue(port, queue, &i))
> -		return 0;
> -
> -	/* If the slot contains an unmap in progress, it's considered
> -	 * available.
> -	 */
> -	state = DLB_QUEUE_UNMAP_IN_PROGRESS;
> -	if (dlb_port_find_slot(port, state, &i))
> -		return 0;
> -
> -	state = DLB_QUEUE_UNMAPPED;
> -	if (dlb_port_find_slot(port, state, &i))
> -		return 0;
> -
> -	resp->status = DLB_ST_NO_QID_SLOTS_AVAILABLE;
> -	return -EINVAL;
> -}
> -
> -static void dlb_ldb_port_change_qid_priority(struct dlb_hw *hw,
> -					     struct dlb_ldb_port *port,
> -					     int slot,
> -					     struct dlb_map_qid_args *args)
> -{
> -	union dlb_lsp_cq2priov r0;
> -
> -	/* Read-modify-write the priority and valid bit register */
> -	r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ2PRIOV(port->id));
> -
> -	r0.field.v |= 1 << slot;
> -	r0.field.prio |= (args->priority & 0x7) << slot * 3;
> -
> -	DLB_CSR_WR(hw, DLB_LSP_CQ2PRIOV(port->id), r0.val);
> -
> -	dlb_flush_csr(hw);
> -
> -	port->qid_map[slot].priority = args->priority;
> -}
> -
> -int dlb_hw_map_qid(struct dlb_hw *hw,
> -		   u32 domain_id,
> -		   struct dlb_map_qid_args *args,
> -		   struct dlb_cmd_response *resp)
> -{
> -	enum dlb_qid_map_state state;
> -	struct dlb_ldb_queue *queue;
> -	struct dlb_ldb_port *port;
> -	struct dlb_domain *domain;
> -	int ret, i, id;
> -	u8 prio;
> -
> -	dlb_log_map_qid(hw, domain_id, args);
> -
> -	/* Verify that hardware resources are available before attempting to
> -	 * satisfy the request. This simplifies the error unwinding code.
> -	 */
> -	if (dlb_verify_map_qid_args(hw, domain_id, args, resp))
> -		return -EINVAL;
> -
> -	prio = args->priority;
> -
> -	domain = dlb_get_domain_from_id(hw, domain_id);
> -	if (domain == NULL) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: domain not found\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	id = args->port_id;
> -
> -	port = dlb_get_domain_used_ldb_port(id, domain);
> -	if (port == NULL) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: port not found\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	queue = dlb_get_domain_ldb_queue(args->qid, domain);
> -	if (queue == NULL) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: queue not found\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	/* If there are any outstanding detach operations for this port,
> -	 * attempt to complete them. This may be necessary to free up a QID
> -	 * slot for this requested mapping.
> -	 */
> -	if (port->num_pending_removals)
> -		dlb_domain_finish_unmap_port(hw, domain, port);
> -
> -	ret = dlb_verify_map_qid_slot_available(port, queue, resp);
> -	if (ret)
> -		return ret;
> -
> -	/* Hardware requires disabling the CQ before mapping QIDs. */
> -	if (port->enabled)
> -		dlb_ldb_port_cq_disable(hw, port);
> -
> -	/* If this is only a priority change, don't perform the full QID->CQ
> -	 * mapping procedure
> -	 */
> -	state = DLB_QUEUE_MAPPED;
> -	if (dlb_port_find_slot_queue(port, state, queue, &i)) {
> -		if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
> -			DLB_HW_ERR(hw,
> -				   "[%s():%d] Internal error: port slot tracking failed\n",
> -				   __func__, __LINE__);
> -			return -EFAULT;
> -		}
> -
> -		if (prio != port->qid_map[i].priority) {
> -			dlb_ldb_port_change_qid_priority(hw, port, i, args);
> -			DLB_HW_INFO(hw, "DLB map: priority change only\n");
> -		}
> -
> -		state = DLB_QUEUE_MAPPED;
> -		ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
> -		if (ret)
> -			return ret;
> -
> -		goto map_qid_done;
> -	}
> -
> -	state = DLB_QUEUE_UNMAP_IN_PROGRESS;
> -	if (dlb_port_find_slot_queue(port, state, queue, &i)) {
> -		if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
> -			DLB_HW_ERR(hw,
> -				   "[%s():%d] Internal error: port slot tracking failed\n",
> -				   __func__, __LINE__);
> -			return -EFAULT;
> -		}
> -
> -		if (prio != port->qid_map[i].priority) {
> -			dlb_ldb_port_change_qid_priority(hw, port, i, args);
> -			DLB_HW_INFO(hw, "DLB map: priority change only\n");
> -		}
> -
> -		state = DLB_QUEUE_MAPPED;
> -		ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
> -		if (ret)
> -			return ret;
> -
> -		goto map_qid_done;
> -	}
> -
> -	/* If this is a priority change on an in-progress mapping, don't
> -	 * perform the full QID->CQ mapping procedure.
> -	 */
> -	state = DLB_QUEUE_MAP_IN_PROGRESS;
> -	if (dlb_port_find_slot_queue(port, state, queue, &i)) {
> -		if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
> -			DLB_HW_ERR(hw,
> -				   "[%s():%d] Internal error: port slot tracking failed\n",
> -				   __func__, __LINE__);
> -			return -EFAULT;
> -		}
> -
> -		port->qid_map[i].priority = prio;
> -
> -		DLB_HW_INFO(hw, "DLB map: priority change only\n");
> -
> -		goto map_qid_done;
> -	}
> -
> -	/* If this is a priority change on a pending mapping, update the
> -	 * pending priority
> -	 */
> -	if (dlb_port_find_slot_with_pending_map_queue(port, queue, &i)) {
> -		if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
> -			DLB_HW_ERR(hw,
> -				   "[%s():%d] Internal error: port slot tracking failed\n",
> -				   __func__, __LINE__);
> -			return -EFAULT;
> -		}
> -
> -		port->qid_map[i].pending_priority = prio;
> -
> -		DLB_HW_INFO(hw, "DLB map: priority change only\n");
> -
> -		goto map_qid_done;
> -	}
> -
> -	/* If all the CQ's slots are in use, then there's an unmap in progress
> -	 * (guaranteed by dlb_verify_map_qid_slot_available()), so add this
> -	 * mapping to pending_map and return. When the removal is completed for
> -	 * the slot's current occupant, this mapping will be performed.
> -	 */
> -	if (!dlb_port_find_slot(port, DLB_QUEUE_UNMAPPED, &i)) {
> -		if (dlb_port_find_slot(port, DLB_QUEUE_UNMAP_IN_PROGRESS, &i)) {
> -			enum dlb_qid_map_state state;
> -
> -			if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
> -				DLB_HW_ERR(hw,
> -					   "[%s():%d] Internal error: port slot tracking failed\n",
> -					   __func__, __LINE__);
> -				return -EFAULT;
> -			}
> -
> -			port->qid_map[i].pending_qid = queue->id;
> -			port->qid_map[i].pending_priority = prio;
> -
> -			state = DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP;
> -
> -			ret = dlb_port_slot_state_transition(hw, port, queue,
> -							     i, state);
> -			if (ret)
> -				return ret;
> -
> -			DLB_HW_INFO(hw, "DLB map: map pending removal\n");
> -
> -			goto map_qid_done;
> -		}
> -	}
> -
> -	/* If the domain has started, a special "dynamic" CQ->queue mapping
> -	 * procedure is required in order to safely update the CQ<->QID tables.
> -	 * The "static" procedure cannot be used when traffic is flowing,
> -	 * because the CQ<->QID tables cannot be updated atomically and the
> -	 * scheduler won't see the new mapping unless the queue's if_status
> -	 * changes, which isn't guaranteed.
> -	 */
> -	ret = dlb_ldb_port_map_qid(hw, domain, port, queue, prio);
> -
> -	/* If ret is less than zero, it's due to an internal error */
> -	if (ret < 0)
> -		return ret;
> -
> -map_qid_done:
> -	if (port->enabled)
> -		dlb_ldb_port_cq_enable(hw, port);
> -
> -	resp->status = 0;
> -
> -	return 0;
> -}
> -
> -static void dlb_log_start_domain(struct dlb_hw *hw, u32 domain_id)
> -{
> -	DLB_HW_INFO(hw, "DLB start domain arguments:\n");
> -	DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
> -}
> -
> -static void dlb_ldb_pool_write_credit_count_reg(struct dlb_hw *hw,
> -						u32 pool_id)
> -{
> -	union dlb_chp_ldb_pool_crd_cnt r0 = { {0} };
> -	struct dlb_credit_pool *pool;
> -
> -	pool = &hw->rsrcs.ldb_credit_pools[pool_id];
> -
> -	r0.field.count = pool->avail_credits;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_LDB_POOL_CRD_CNT(pool->id),
> -		   r0.val);
> -}
> -
> -static void dlb_dir_pool_write_credit_count_reg(struct dlb_hw *hw,
> -						u32 pool_id)
> -{
> -	union dlb_chp_dir_pool_crd_cnt r0 = { {0} };
> -	struct dlb_credit_pool *pool;
> -
> -	pool = &hw->rsrcs.dir_credit_pools[pool_id];
> -
> -	r0.field.count = pool->avail_credits;
> -
> -	DLB_CSR_WR(hw,
> -		   DLB_CHP_DIR_POOL_CRD_CNT(pool->id),
> -		   r0.val);
> -}
> -
> -/**
> - * dlb_hw_start_domain() - Lock the domain configuration
> - * @hw:	  Contains the current state of the DLB hardware.
> - * @args: User-provided arguments.
> - * @resp: Response to user.
> - *
> - * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
> - * satisfy a request, resp->status will be set accordingly.
> - */
> -int dlb_hw_start_domain(struct dlb_hw *hw,
> -			u32 domain_id,
> -			struct dlb_start_domain_args *arg,
> -			struct dlb_cmd_response *resp)
> -{
> -	struct dlb_list_entry *iter;
> -	struct dlb_dir_pq_pair *dir_queue;
> -	struct dlb_ldb_queue *ldb_queue;
> -	struct dlb_credit_pool *pool;
> -	struct dlb_domain *domain;
> -	RTE_SET_USED(arg);
> -	RTE_SET_USED(iter);
> -
> -	dlb_log_start_domain(hw, domain_id);
> -
> -	if (dlb_verify_start_domain_args(hw, domain_id, resp))
> -		return -EINVAL;
> -
> -	domain = dlb_get_domain_from_id(hw, domain_id);
> -	if (domain == NULL) {
> -		DLB_HW_ERR(hw,
> -			   "[%s():%d] Internal error: domain not found\n",
> -			   __func__, __LINE__);
> -		return -EFAULT;
> -	}
> -
> -	/* Write the domain's pool credit counts, which have been updated
> -	 * during port configuration. The sum of the pool credit count plus
> -	 * each producer port's credit count must equal the pool's credit
> -	 * allocation *before* traffic is sent.
> -	 */
> -	DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter)
> -		dlb_ldb_pool_write_credit_count_reg(hw, pool->id);
> -
> -	DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter)
> -		dlb_dir_pool_write_credit_count_reg(hw, pool->id);
> -
> -	/* Enable load-balanced and directed queue write permissions for the
> -	 * queues this domain owns. Without this, the DLB will drop all
> -	 * incoming traffic to those queues.
> -	 */
> -	DLB_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {
> -		union dlb_sys_ldb_vasqid_v r0 = { {0} };
> -		unsigned int offs;
> -
> -		r0.field.vasqid_v = 1;
> -
> -		offs = domain->id * DLB_MAX_NUM_LDB_QUEUES + ldb_queue->id;
> -
> -		DLB_CSR_WR(hw, DLB_SYS_LDB_VASQID_V(offs), r0.val);
> -	}
> -
> -	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) {
> -		union dlb_sys_dir_vasqid_v r0 = { {0} };
> -		unsigned int offs;
> -
> -		r0.field.vasqid_v = 1;
> -
> -		offs = domain->id * DLB_MAX_NUM_DIR_PORTS + dir_queue->id;
> -
> -		DLB_CSR_WR(hw, DLB_SYS_DIR_VASQID_V(offs), r0.val);
> -	}
> -
> -	dlb_flush_csr(hw);
> -
> -	domain->started = true;
> -
> -	resp->status = 0;
> -
> -	return 0;
> -}
> -
> -static void dlb_log_get_dir_queue_depth(struct dlb_hw *hw,
> -					u32 domain_id,
> -					u32 queue_id)
> -{
> -	DLB_HW_INFO(hw, "DLB get directed queue depth:\n");
> -	DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
> -	DLB_HW_INFO(hw, "\tQueue ID: %d\n", queue_id);
> -}
> -
> -int dlb_hw_get_dir_queue_depth(struct dlb_hw *hw,
> -			       u32 domain_id,
> -			       struct dlb_get_dir_queue_depth_args *args,
> -			       struct dlb_cmd_response *resp)
> -{
> -	struct dlb_dir_pq_pair *queue;
> -	struct dlb_domain *domain;
> -	int id;
> -
> -	id = domain_id;
> -
> -	dlb_log_get_dir_queue_depth(hw, domain_id, args->queue_id);
> -
> -	domain = dlb_get_domain_from_id(hw, id);
> -	if (domain == NULL) {
> -		resp->status = DLB_ST_INVALID_DOMAIN_ID;
> -		return -EINVAL;
> -	}
> -
> -	id = args->queue_id;
> -
> -	queue = dlb_get_domain_used_dir_pq(args->queue_id, domain);
> -	if (queue == NULL) {
> -		resp->status = DLB_ST_INVALID_QID;
> -		return -EINVAL;
> -	}
> -
> -	resp->id = dlb_dir_queue_depth(hw, queue);
> -
> -	return 0;
> -}
> -
> -static void dlb_log_get_ldb_queue_depth(struct dlb_hw *hw,
> -					u32 domain_id,
> -					u32 queue_id)
> -{
> -	DLB_HW_INFO(hw, "DLB get load-balanced queue depth:\n");
> -	DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
> -	DLB_HW_INFO(hw, "\tQueue ID: %d\n", queue_id);
> -}
> -
> -int dlb_hw_get_ldb_queue_depth(struct dlb_hw *hw,
> -			       u32 domain_id,
> -			       struct dlb_get_ldb_queue_depth_args *args,
> -			       struct dlb_cmd_response *resp)
> -{
> -	union dlb_lsp_qid_aqed_active_cnt r0;
> -	union dlb_lsp_qid_atq_enqueue_cnt r1;
> -	union dlb_lsp_qid_ldb_enqueue_cnt r2;
> -	struct dlb_ldb_queue *queue;
> -	struct dlb_domain *domain;
> -
> -	dlb_log_get_ldb_queue_depth(hw, domain_id, args->queue_id);
> -
> -	domain = dlb_get_domain_from_id(hw, domain_id);
> -	if (domain == NULL) {
> -		resp->status = DLB_ST_INVALID_DOMAIN_ID;
> -		return -EINVAL;
> -	}
> -
> -	queue = dlb_get_domain_ldb_queue(args->queue_id, domain);
> -	if (queue == NULL) {
> -		resp->status = DLB_ST_INVALID_QID;
> -		return -EINVAL;
> -	}
> -
> -	r0.val = DLB_CSR_RD(hw,
> -			    DLB_LSP_QID_AQED_ACTIVE_CNT(queue->id));
> -
> -	r1.val = DLB_CSR_RD(hw,
> -			    DLB_LSP_QID_ATQ_ENQUEUE_CNT(queue->id));
> -
> -	r2.val = DLB_CSR_RD(hw,
> -			    DLB_LSP_QID_LDB_ENQUEUE_CNT(queue->id));
> -
> -	resp->id = r0.val + r1.val + r2.val;
> -
> -	return 0;
> -}
> diff --git a/drivers/event/dlb/pf/base/dlb_resource.h
> b/drivers/event/dlb/pf/base/dlb_resource.h
> deleted file mode 100644
> index 4f48b73fd..000000000
> --- a/drivers/event/dlb/pf/base/dlb_resource.h
> +++ /dev/null
> @@ -1,876 +0,0 @@
> -/* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2016-2020 Intel Corporation
> - */
> -
> -#ifndef __DLB_RESOURCE_H
> -#define __DLB_RESOURCE_H
> -
> -#include "dlb_hw_types.h"
> -#include "dlb_osdep_types.h"
> -
> -/**
> - * dlb_resource_init() - initialize the device
> - * @hw: pointer to struct dlb_hw.
> - *
> - * This function initializes the device's software state (pointed to by the
> hw
> - * argument) and programs global scheduling QoS registers. This function
> should
> - * be called during driver initialization.
> - *
> - * The dlb_hw struct must be unique per DLB device and persist until the
> device
> - * is reset.
> - *
> - * Return:
> - * Returns 0 upon success, -1 otherwise.
> - */
> -int dlb_resource_init(struct dlb_hw *hw);
> -
> -/**
> - * dlb_resource_free() - free device state memory
> - * @hw: dlb_hw handle for a particular device.
> - *
> - * This function frees software state pointed to by dlb_hw. This function
> - * should be called when resetting the device or unloading the driver.
> - */
> -void dlb_resource_free(struct dlb_hw *hw);
> -
> -/**
> - * dlb_resource_reset() - reset in-use resources to their initial state
> - * @hw: dlb_hw handle for a particular device.
> - *
> - * This function resets in-use resources, and makes them available for
> use.
> - */
> -void dlb_resource_reset(struct dlb_hw *hw);
> -
> -/**
> - * dlb_hw_create_sched_domain() - create a scheduling domain
> - * @hw: dlb_hw handle for a particular device.
> - * @args: scheduling domain creation arguments.
> - * @resp: response structure.
> - *
> - * This function creates a scheduling domain containing the resources
> specified
> - * in args. The individual resources (queues, ports, credit pools) can be
> - * configured after creating a scheduling domain.
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status
> is
> - * assigned a detailed error code from enum dlb_error. If successful,
> resp->id
> - * contains the domain ID.
> - *
> - * Errors:
> - * EINVAL - A requested resource is unavailable, or the requested domain
> name
> - *	    is already in use.
> - * EFAULT - Internal error (resp->status not set).
> - */
> -int dlb_hw_create_sched_domain(struct dlb_hw *hw,
> -			       struct dlb_create_sched_domain_args *args,
> -			       struct dlb_cmd_response *resp);
> -
> -/**
> - * dlb_hw_create_ldb_pool() - create a load-balanced credit pool
> - * @hw: dlb_hw handle for a particular device.
> - * @domain_id: domain ID.
> - * @args: credit pool creation arguments.
> - * @resp: response structure.
> - *
> - * This function creates a load-balanced credit pool containing the number
> of
> - * requested credits.
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status
> is
> - * assigned a detailed error code from enum dlb_error. If successful,
> resp->id
> - * contains the pool ID.
> - *
> - * Errors:
> - * EINVAL - A requested resource is unavailable, the domain is not
> configured,
> - *	    or the domain has already been started.
> - * EFAULT - Internal error (resp->status not set).
> - */
> -int dlb_hw_create_ldb_pool(struct dlb_hw *hw,
> -			   u32 domain_id,
> -			   struct dlb_create_ldb_pool_args *args,
> -			   struct dlb_cmd_response *resp);
> -
> -/**
> - * dlb_hw_create_dir_pool() - create a directed credit pool
> - * @hw: dlb_hw handle for a particular device.
> - * @domain_id: domain ID.
> - * @args: credit pool creation arguments.
> - * @resp: response structure.
> - *
> - * This function creates a directed credit pool containing the number of
> - * requested credits.
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status
> is
> - * assigned a detailed error code from enum dlb_error. If successful,
> resp->id
> - * contains the pool ID.
> - *
> - * Errors:
> - * EINVAL - A requested resource is unavailable, the domain is not
> configured,
> - *	    or the domain has already been started.
> - * EFAULT - Internal error (resp->status not set).
> - */
> -int dlb_hw_create_dir_pool(struct dlb_hw *hw,
> -			   u32 domain_id,
> -			   struct dlb_create_dir_pool_args *args,
> -			   struct dlb_cmd_response *resp);
> -
> -/**
> - * dlb_hw_create_ldb_queue() - create a load-balanced queue
> - * @hw: dlb_hw handle for a particular device.
> - * @domain_id: domain ID.
> - * @args: queue creation arguments.
> - * @resp: response structure.
> - *
> - * This function creates a load-balanced queue.
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status
> is
> - * assigned a detailed error code from enum dlb_error. If successful,
> resp->id
> - * contains the queue ID.
> - *
> - * Errors:
> - * EINVAL - A requested resource is unavailable, the domain is not
> configured,
> - *	    the domain has already been started, or the requested queue name is
> - *	    already in use.
> - * EFAULT - Internal error (resp->status not set).
> - */
> -int dlb_hw_create_ldb_queue(struct dlb_hw *hw,
> -			    u32 domain_id,
> -			    struct dlb_create_ldb_queue_args *args,
> -			    struct dlb_cmd_response *resp);
> -
> -/**
> - * dlb_hw_create_dir_queue() - create a directed queue
> - * @hw: dlb_hw handle for a particular device.
> - * @domain_id: domain ID.
> - * @args: queue creation arguments.
> - * @resp: response structure.
> - *
> - * This function creates a directed queue.
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status
> is
> - * assigned a detailed error code from enum dlb_error. If successful,
> resp->id
> - * contains the queue ID.
> - *
> - * Errors:
> - * EINVAL - A requested resource is unavailable, the domain is not
> configured,
> - *	    or the domain has already been started.
> - * EFAULT - Internal error (resp->status not set).
> - */
> -int dlb_hw_create_dir_queue(struct dlb_hw *hw,
> -			    u32 domain_id,
> -			    struct dlb_create_dir_queue_args *args,
> -			    struct dlb_cmd_response *resp);
> -
> -/**
> - * dlb_hw_create_dir_port() - create a directed port
> - * @hw: dlb_hw handle for a particular device.
> - * @domain_id: domain ID.
> - * @args: port creation arguments.
> - * @pop_count_dma_base: base address of the pop count memory. This can be
> - *			a PA or an IOVA.
> - * @cq_dma_base: base address of the CQ memory. This can be a PA or an
> IOVA.
> - * @resp: response structure.
> - *
> - * This function creates a directed port.
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status
> is
> - * assigned a detailed error code from enum dlb_error. If successful,
> resp->id
> - * contains the port ID.
> - *
> - * Errors:
> - * EINVAL - A requested resource is unavailable, a credit setting is
> invalid, a
> - *	    pool ID is invalid, a pointer address is not properly aligned, the
> - *	    domain is not configured, or the domain has already been started.
> - * EFAULT - Internal error (resp->status not set).
> - */
> -int dlb_hw_create_dir_port(struct dlb_hw *hw,
> -			   u32 domain_id,
> -			   struct dlb_create_dir_port_args *args,
> -			   u64 pop_count_dma_base,
> -			   u64 cq_dma_base,
> -			   struct dlb_cmd_response *resp);
> -
> -/**
> - * dlb_hw_create_ldb_port() - create a load-balanced port
> - * @hw: dlb_hw handle for a particular device.
> - * @domain_id: domain ID.
> - * @args: port creation arguments.
> - * @pop_count_dma_base: base address of the pop count memory. This can be
> - *			 a PA or an IOVA.
> - * @cq_dma_base: base address of the CQ memory. This can be a PA or an
> IOVA.
> - * @resp: response structure.
> - *
> - * This function creates a load-balanced port.
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status
> is
> - * assigned a detailed error code from enum dlb_error. If successful,
> resp->id
> - * contains the port ID.
> - *
> - * Errors:
> - * EINVAL - A requested resource is unavailable, a credit setting is
> invalid, a
> - *	    pool ID is invalid, a pointer address is not properly aligned, the
> - *	    domain is not configured, or the domain has already been started.
> - * EFAULT - Internal error (resp->status not set).
> - */
> -int dlb_hw_create_ldb_port(struct dlb_hw *hw,
> -			   u32 domain_id,
> -			   struct dlb_create_ldb_port_args *args,
> -			   u64 pop_count_dma_base,
> -			   u64 cq_dma_base,
> -			   struct dlb_cmd_response *resp);
> -
> -/**
> - * dlb_hw_start_domain() - start a scheduling domain
> - * @hw: dlb_hw handle for a particular device.
> - * @domain_id: domain ID.
> - * @args: start domain arguments.
> - * @resp: response structure.
> - *
> - * This function starts a scheduling domain, which allows applications to
> send
> - * traffic through it. Once a domain is started, its resources can no
> longer be
> - * configured (besides QID remapping and port enable/disable).
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status
> is
> - * assigned a detailed error code from enum dlb_error.
> - *
> - * Errors:
> - * EINVAL - the domain is not configured, or the domain is already
> started.
> - */
> -int dlb_hw_start_domain(struct dlb_hw *hw,
> -			u32 domain_id,
> -			struct dlb_start_domain_args *args,
> -			struct dlb_cmd_response *resp);
> -
> -/**
> - * dlb_hw_map_qid() - map a load-balanced queue to a load-balanced port
> - * @hw: dlb_hw handle for a particular device.
> - * @domain_id: domain ID.
> - * @args: map QID arguments.
> - * @resp: response structure.
> - *
> - * This function configures the DLB to schedule QEs from the specified
> queue to
> - * the specified port. Each load-balanced port can be mapped to up to 8
> queues;
> - * each load-balanced queue can potentially map to all the load-balanced
> ports.
> - *
> - * A successful return does not necessarily mean the mapping was
> configured. If
> - * this function is unable to immediately map the queue to the port, it
> will
> - * add the requested operation to a per-port list of pending map/unmap
> - * operations, and (if it's not already running) launch a kernel thread
> that
> - * periodically attempts to process all pending operations. In a sense,
> this is
> - * an asynchronous function.
> - *
> - * This asynchronicity creates two views of the state of hardware: the
> actual
> - * hardware state and the requested state (as if every request completed
> - * immediately). If there are any pending map/unmap operations, the
> requested
> - * state will differ from the actual state. All validation is performed
> with
> - * respect to the pending state; for instance, if there are 8 pending map
> - * operations for port X, a request for a 9th will fail because a
> load-balanced
> - * port can only map up to 8 queues.
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status
> is
> - * assigned a detailed error code from enum dlb_error.
> - *
> - * Errors:
> - * EINVAL - A requested resource is unavailable, invalid port or queue ID,
> or
> - *	    the domain is not configured.
> - * EFAULT - Internal error (resp->status not set).
> - */
> -int dlb_hw_map_qid(struct dlb_hw *hw,
> -		   u32 domain_id,
> -		   struct dlb_map_qid_args *args,
> -		   struct dlb_cmd_response *resp);
> -
> -/**
> - * dlb_hw_unmap_qid() - Unmap a load-balanced queue from a load-balanced
> port
> - * @hw: dlb_hw handle for a particular device.
> - * @domain_id: domain ID.
> - * @args: unmap QID arguments.
> - * @resp: response structure.
> - *
> - * This function configures the DLB to stop scheduling QEs from the
> specified
> - * queue to the specified port.
> - *
> - * A successful return does not necessarily mean the mapping was removed.
> If
> - * this function is unable to immediately unmap the queue from the port,
> it
> - * will add the requested operation to a per-port list of pending
> map/unmap
> - * operations, and (if it's not already running) launch a kernel thread
> that
> - * periodically attempts to process all pending operations. See
> - * dlb_hw_map_qid() for more details.
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status
> is
> - * assigned a detailed error code from enum dlb_error.
> - *
> - * Errors:
> - * EINVAL - A requested resource is unavailable, invalid port or queue ID,
> or
> - *	    the domain is not configured.
> - * EFAULT - Internal error (resp->status not set).
> - */
> -int dlb_hw_unmap_qid(struct dlb_hw *hw,
> -		     u32 domain_id,
> -		     struct dlb_unmap_qid_args *args,
> -		     struct dlb_cmd_response *resp);
> -
> -/**
> - * dlb_finish_unmap_qid_procedures() - finish any pending unmap procedures
> - * @hw: dlb_hw handle for a particular device.
> - *
> - * This function attempts to finish any outstanding unmap procedures.
> - * This function should be called by the kernel thread responsible for
> - * finishing map/unmap procedures.
> - *
> - * Return:
> - * Returns the number of procedures that weren't completed.
> - */
> -unsigned int dlb_finish_unmap_qid_procedures(struct dlb_hw *hw);
> -
> -/**
> - * dlb_finish_map_qid_procedures() - finish any pending map procedures
> - * @hw: dlb_hw handle for a particular device.
> - *
> - * This function attempts to finish any outstanding map procedures.
> - * This function should be called by the kernel thread responsible for
> - * finishing map/unmap procedures.
> - *
> - * Return:
> - * Returns the number of procedures that weren't completed.
> - */
> -unsigned int dlb_finish_map_qid_procedures(struct dlb_hw *hw);
> -
> -/**
> - * dlb_hw_enable_ldb_port() - enable a load-balanced port for scheduling
> - * @hw: dlb_hw handle for a particular device.
> - * @domain_id: domain ID.
> - * @args: port enable arguments.
> - * @resp: response structure.
> - *
> - * This function configures the DLB to schedule QEs to a load-balanced
> port.
> - * Ports are enabled by default.
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status
> is
> - * assigned a detailed error code from enum dlb_error.
> - *
> - * Errors:
> - * EINVAL - The port ID is invalid or the domain is not configured.
> - * EFAULT - Internal error (resp->status not set).
> - */
> -int dlb_hw_enable_ldb_port(struct dlb_hw *hw,
> -			   u32 domain_id,
> -			   struct dlb_enable_ldb_port_args *args,
> -			   struct dlb_cmd_response *resp);
> -
> -/**
> - * dlb_hw_disable_ldb_port() - disable a load-balanced port for scheduling
> - * @hw: dlb_hw handle for a particular device.
> - * @domain_id: domain ID.
> - * @args: port disable arguments.
> - * @resp: response structure.
> - *
> - * This function configures the DLB to stop scheduling QEs to a
> load-balanced
> - * port. Ports are enabled by default.
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status
> is
> - * assigned a detailed error code from enum dlb_error.
> - *
> - * Errors:
> - * EINVAL - The port ID is invalid or the domain is not configured.
> - * EFAULT - Internal error (resp->status not set).
> - */
> -int dlb_hw_disable_ldb_port(struct dlb_hw *hw,
> -			    u32 domain_id,
> -			    struct dlb_disable_ldb_port_args *args,
> -			    struct dlb_cmd_response *resp);
> -
> -/**
> - * dlb_hw_enable_dir_port() - enable a directed port for scheduling
> - * @hw: dlb_hw handle for a particular device.
> - * @domain_id: domain ID.
> - * @args: port enable arguments.
> - * @resp: response structure.
> - *
> - * This function configures the DLB to schedule QEs to a directed port.
> - * Ports are enabled by default.
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status
> is
> - * assigned a detailed error code from enum dlb_error.
> - *
> - * Errors:
> - * EINVAL - The port ID is invalid or the domain is not configured.
> - * EFAULT - Internal error (resp->status not set).
> - */
> -int dlb_hw_enable_dir_port(struct dlb_hw *hw,
> -			   u32 domain_id,
> -			   struct dlb_enable_dir_port_args *args,
> -			   struct dlb_cmd_response *resp);
> -
> -/**
> - * dlb_hw_disable_dir_port() - disable a directed port for scheduling
> - * @hw: dlb_hw handle for a particular device.
> - * @domain_id: domain ID.
> - * @args: port disable arguments.
> - * @resp: response structure.
> - *
> - * This function configures the DLB to stop scheduling QEs to a directed
> port.
> - * Ports are enabled by default.
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status
> is
> - * assigned a detailed error code from enum dlb_error.
> - *
> - * Errors:
> - * EINVAL - The port ID is invalid or the domain is not configured.
> - * EFAULT - Internal error (resp->status not set).
> - */
> -int dlb_hw_disable_dir_port(struct dlb_hw *hw,
> -			    u32 domain_id,
> -			    struct dlb_disable_dir_port_args *args,
> -			    struct dlb_cmd_response *resp);
> -
> -/**
> - * dlb_configure_ldb_cq_interrupt() - configure load-balanced CQ for
> interrupts
> - * @hw: dlb_hw handle for a particular device.
> - * @port_id: load-balancd port ID.
> - * @vector: interrupt vector ID. Should be 0 for MSI or compressed MSI-X
> mode,
> - *	    else a value up to 64.
> - * @mode: interrupt type (DLB_CQ_ISR_MODE_MSI or DLB_CQ_ISR_MODE_MSIX)
> - * @threshold: the minimum CQ depth at which the interrupt can fire. Must
> be
> - *	greater than 0.
> - *
> - * This function configures the DLB registers for load-balanced CQ's
> interrupts.
> - * This doesn't enable the CQ's interrupt; that can be done with
> - * dlb_arm_cq_interrupt() or through an interrupt arm QE.
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise.
> - *
> - * Errors:
> - * EINVAL - The port ID is invalid.
> - */
> -int dlb_configure_ldb_cq_interrupt(struct dlb_hw *hw,
> -				   int port_id,
> -				   int vector,
> -				   int mode,
> -				   u16 threshold);
> -
> -/**
> - * dlb_configure_dir_cq_interrupt() - configure directed CQ for interrupts
> - * @hw: dlb_hw handle for a particular device.
> - * @port_id: load-balancd port ID.
> - * @vector: interrupt vector ID. Should be 0 for MSI or compressed MSI-X
> mode,
> - *	    else a value up to 64.
> - * @mode: interrupt type (DLB_CQ_ISR_MODE_MSI or DLB_CQ_ISR_MODE_MSIX)
> - * @threshold: the minimum CQ depth at which the interrupt can fire. Must
> be
> - *	greater than 0.
> - *
> - * This function configures the DLB registers for directed CQ's
> interrupts.
> - * This doesn't enable the CQ's interrupt; that can be done with
> - * dlb_arm_cq_interrupt() or through an interrupt arm QE.
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise.
> - *
> - * Errors:
> - * EINVAL - The port ID is invalid.
> - */
> -int dlb_configure_dir_cq_interrupt(struct dlb_hw *hw,
> -				   int port_id,
> -				   int vector,
> -				   int mode,
> -				   u16 threshold);
> -
> -/**
> - * dlb_enable_alarm_interrupts() - enable certain hardware alarm
> interrupts
> - * @hw: dlb_hw handle for a particular device.
> - *
> - * This function configures the ingress error alarm. (Other alarms are
> enabled
> - * by default.)
> - */
> -void dlb_enable_alarm_interrupts(struct dlb_hw *hw);
> -
> -/**
> - * dlb_disable_alarm_interrupts() - disable certain hardware alarm
> interrupts
> - * @hw: dlb_hw handle for a particular device.
> - *
> - * This function configures the ingress error alarm. (Other alarms are
> disabled
> - * by default.)
> - */
> -void dlb_disable_alarm_interrupts(struct dlb_hw *hw);
> -
> -/**
> - * dlb_set_msix_mode() - enable certain hardware alarm interrupts
> - * @hw: dlb_hw handle for a particular device.
> - * @mode: MSI-X mode (DLB_MSIX_MODE_PACKED or DLB_MSIX_MODE_COMPRESSED)
> - *
> - * This function configures the hardware to use either packed or
> compressed
> - * mode. This function should not be called if using MSI interrupts.
> - */
> -void dlb_set_msix_mode(struct dlb_hw *hw, int mode);
> -
> -/**
> - * dlb_arm_cq_interrupt() - arm a CQ's interrupt
> - * @hw: dlb_hw handle for a particular device.
> - * @port_id: port ID
> - * @is_ldb: true for load-balanced port, false for a directed port
> - *
> - * This function arms the CQ's interrupt. The CQ must be configured prior
> to
> - * calling this function.
> - *
> - * The function does no parameter validation; that is the caller's
> - * responsibility.
> - *
> - * Return: returns 0 upon success, <0 otherwise.
> - *
> - * EINVAL - Invalid port ID.
> - */
> -int dlb_arm_cq_interrupt(struct dlb_hw *hw, int port_id, bool is_ldb);
> -
> -/**
> - * dlb_read_compressed_cq_intr_status() - read compressed CQ interrupt
> status
> - * @hw: dlb_hw handle for a particular device.
> - * @ldb_interrupts: 2-entry array of u32 bitmaps
> - * @dir_interrupts: 4-entry array of u32 bitmaps
> - *
> - * This function can be called from a compressed CQ interrupt handler to
> - * determine which CQ interrupts have fired. The caller should take
> appropriate
> - * (such as waking threads blocked on a CQ's interrupt) then ack the
> interrupts
> - * with dlb_ack_compressed_cq_intr().
> - */
> -void dlb_read_compressed_cq_intr_status(struct dlb_hw *hw,
> -					u32 *ldb_interrupts,
> -					u32 *dir_interrupts);
> -
> -/**
> - * dlb_ack_compressed_cq_intr_status() - ack compressed CQ interrupts
> - * @hw: dlb_hw handle for a particular device.
> - * @ldb_interrupts: 2-entry array of u32 bitmaps
> - * @dir_interrupts: 4-entry array of u32 bitmaps
> - *
> - * This function ACKs compressed CQ interrupts. Its arguments should be
> the
> - * same ones passed to dlb_read_compressed_cq_intr_status().
> - */
> -void dlb_ack_compressed_cq_intr(struct dlb_hw *hw,
> -				u32 *ldb_interrupts,
> -				u32 *dir_interrupts);
> -
> -/**
> - * dlb_process_alarm_interrupt() - process an alarm interrupt
> - * @hw: dlb_hw handle for a particular device.
> - *
> - * This function reads the alarm syndrome, logs its, and acks the
> interrupt.
> - * This function should be called from the alarm interrupt handler when
> - * interrupt vector DLB_INT_ALARM fires.
> - */
> -void dlb_process_alarm_interrupt(struct dlb_hw *hw);
> -
> -/**
> - * dlb_process_ingress_error_interrupt() - process ingress error
> interrupts
> - * @hw: dlb_hw handle for a particular device.
> - *
> - * This function reads the alarm syndrome, logs it, notifies user-space,
> and
> - * acks the interrupt. This function should be called from the alarm
> interrupt
> - * handler when interrupt vector DLB_INT_INGRESS_ERROR fires.
> - */
> -void dlb_process_ingress_error_interrupt(struct dlb_hw *hw);
> -
> -/**
> - * dlb_get_group_sequence_numbers() - return a group's number of SNs per
> queue
> - * @hw: dlb_hw handle for a particular device.
> - * @group_id: sequence number group ID.
> - *
> - * This function returns the configured number of sequence numbers per
> queue
> - * for the specified group.
> - *
> - * Return:
> - * Returns -EINVAL if group_id is invalid, else the group's SNs per queue.
> - */
> -int dlb_get_group_sequence_numbers(struct dlb_hw *hw, unsigned int
> group_id);
> -
> -/**
> - * dlb_get_group_sequence_number_occupancy() - return a group's in-use
> slots
> - * @hw: dlb_hw handle for a particular device.
> - * @group_id: sequence number group ID.
> - *
> - * This function returns the group's number of in-use slots (i.e.
> load-balanced
> - * queues using the specified group).
> - *
> - * Return:
> - * Returns -EINVAL if group_id is invalid, else the group's occupancy.
> - */
> -int dlb_get_group_sequence_number_occupancy(struct dlb_hw *hw,
> -					    unsigned int group_id);
> -
> -/**
> - * dlb_set_group_sequence_numbers() - assign a group's number of SNs per
> queue
> - * @hw: dlb_hw handle for a particular device.
> - * @group_id: sequence number group ID.
> - * @val: requested amount of sequence numbers per queue.
> - *
> - * This function configures the group's number of sequence numbers per
> queue.
> - * val can be a power-of-two between 32 and 1024, inclusive. This setting
> can
> - * be configured until the first ordered load-balanced queue is configured,
> at
> - * which point the configuration is locked.
> - *
> - * Return:
> - * Returns 0 upon success; -EINVAL if group_id or val is invalid, -EPERM if
> an
> - * ordered queue is configured.
> - */
> -int dlb_set_group_sequence_numbers(struct dlb_hw *hw,
> -				   unsigned int group_id,
> -				   unsigned long val);
> -
> -/**
> - * dlb_reset_domain() - reset a scheduling domain
> - * @hw: dlb_hw handle for a particular device.
> - * @domain_id: domain ID.
> - *
> - * This function resets and frees a DLB scheduling domain and its
> associated
> - * resources.
> - *
> - * Pre-condition: the driver must ensure software has stopped sending QEs
> - * through this domain's producer ports before invoking this function, or
> - * undefined behavior will result.
> - *
> - * Return:
> - * Returns 0 upon success, -1 otherwise.
> - *
> - * EINVAL - Invalid domain ID, or the domain is not configured.
> - * EFAULT - Internal error. (Possibly caused if software is the
> pre-condition
> - *	    is not met.)
> - * ETIMEDOUT - Hardware component didn't reset in the expected time.
> - */
> -int dlb_reset_domain(struct dlb_hw *hw, u32 domain_id);
> -
> -/**
> - * dlb_ldb_port_owned_by_domain() - query whether a port is owned by a
> domain
> - * @hw: dlb_hw handle for a particular device.
> - * @domain_id: domain ID.
> - * @port_id: port ID.
> - *
> - * This function returns whether a load-balanced port is owned by a
> specified
> - * domain.
> - *
> - * Return:
> - * Returns 0 if false, 1 if true, <0 otherwise.
> - *
> - * EINVAL - Invalid domain or port ID, or the domain is not configured.
> - */
> -int dlb_ldb_port_owned_by_domain(struct dlb_hw *hw,
> -				 u32 domain_id,
> -				 u32 port_id);
> -
> -/**
> - * dlb_dir_port_owned_by_domain() - query whether a port is owned by a
> domain
> - * @hw: dlb_hw handle for a particular device.
> - * @domain_id: domain ID.
> - * @port_id: port ID.
> - *
> - * This function returns whether a directed port is owned by a specified
> - * domain.
> - *
> - * Return:
> - * Returns 0 if false, 1 if true, <0 otherwise.
> - *
> - * EINVAL - Invalid domain or port ID, or the domain is not configured.
> - */
> -int dlb_dir_port_owned_by_domain(struct dlb_hw *hw,
> -				 u32 domain_id,
> -				 u32 port_id);
> -
> -/**
> - * dlb_hw_get_num_resources() - query the PCI function's available
> resources
> - * @arg: pointer to resource counts.
> - *
> - * This function returns the number of available resources for the PF.
> - */
> -void dlb_hw_get_num_resources(struct dlb_hw *hw,
> -			      struct dlb_get_num_resources_args *arg);
> -
> -/**
> - * dlb_hw_get_num_used_resources() - query the PCI function's used
> resources
> - * @arg: pointer to resource counts.
> - *
> - * This function returns the number of resources in use by the PF. It fills
> in
> - * the fields that args points to, except the following:
> - * - max_contiguous_atomic_inflights
> - * - max_contiguous_hist_list_entries
> - * - max_contiguous_ldb_credits
> - * - max_contiguous_dir_credits
> - */
> -void dlb_hw_get_num_used_resources(struct dlb_hw *hw,
> -				   struct dlb_get_num_resources_args *arg);
> -
> -/**
> - * dlb_disable_dp_vasr_feature() - disable directed pipe VAS reset
> hardware
> - * @hw: dlb_hw handle for a particular device.
> - *
> - * This function disables certain hardware in the directed pipe,
> - * necessary to workaround a DLB VAS reset issue.
> - */
> -void dlb_disable_dp_vasr_feature(struct dlb_hw *hw);
> -
> -/**
> - * dlb_enable_excess_tokens_alarm() - enable interrupts for the excess
> token
> - * pop alarm
> - * @hw: dlb_hw handle for a particular device.
> - *
> - * This function enables the PF ingress error alarm interrupt to fire when
> an
> - * excess token pop occurs.
> - */
> -void dlb_enable_excess_tokens_alarm(struct dlb_hw *hw);
> -
> -/**
> - * dlb_disable_excess_tokens_alarm() - disable interrupts for the excess
> token
> - * pop alarm
> - * @hw: dlb_hw handle for a particular device.
> - *
> - * This function disables the PF ingress error alarm interrupt to fire when
> an
> - * excess token pop occurs.
> - */
> -void dlb_disable_excess_tokens_alarm(struct dlb_hw *hw);
> -
> -/**
> - * dlb_hw_get_ldb_queue_depth() - returns the depth of a load-balanced
> queue
> - * @hw: dlb_hw handle for a particular device.
> - * @domain_id: domain ID.
> - * @args: queue depth args
> - *
> - * This function returns the depth of a load-balanced queue.
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status
> is
> - * assigned a detailed error code from enum dlb_error. If successful,
> resp->id
> - * contains the depth.
> - *
> - * Errors:
> - * EINVAL - Invalid domain ID or queue ID.
> - */
> -int dlb_hw_get_ldb_queue_depth(struct dlb_hw *hw,
> -			       u32 domain_id,
> -			       struct dlb_get_ldb_queue_depth_args *args,
> -			       struct dlb_cmd_response *resp);
> -
> -/**
> - * dlb_hw_get_dir_queue_depth() - returns the depth of a directed queue
> - * @hw: dlb_hw handle for a particular device.
> - * @domain_id: domain ID.
> - * @args: queue depth args
> - *
> - * This function returns the depth of a directed queue.
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status
> is
> - * assigned a detailed error code from enum dlb_error. If successful,
> resp->id
> - * contains the depth.
> - *
> - * Errors:
> - * EINVAL - Invalid domain ID or queue ID.
> - */
> -int dlb_hw_get_dir_queue_depth(struct dlb_hw *hw,
> -			       u32 domain_id,
> -			       struct dlb_get_dir_queue_depth_args *args,
> -			       struct dlb_cmd_response *resp);
> -
> -/**
> - * dlb_hw_pending_port_unmaps() - returns the number of unmap operations
> in
> - *	progress for a load-balanced port.
> - * @hw: dlb_hw handle for a particular device.
> - * @domain_id: domain ID.
> - * @args: number of unmaps in progress args
> - *
> - * Return:
> - * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status
> is
> - * assigned a detailed error code from enum dlb_error. If successful,
> resp->id
> - * contains the number of unmaps in progress.
> - *
> - * Errors:
> - * EINVAL - Invalid port ID.
> - */
> -int dlb_hw_pending_port_unmaps(struct dlb_hw *hw,
> -			       u32 domain_id,
> -			       struct dlb_pending_port_unmaps_args *args,
> -			       struct dlb_cmd_response *resp);
> -
> -/**
> - * dlb_hw_enable_sparse_ldb_cq_mode() - enable sparse mode for
> load-balanced
> - *	ports.
> - * @hw: dlb_hw handle for a particular device.
> - *
> - * This function must be called prior to configuring scheduling domains.
> - */
> -void dlb_hw_enable_sparse_ldb_cq_mode(struct dlb_hw *hw);
> -
> -/**
> - * dlb_hw_enable_sparse_dir_cq_mode() - enable sparse mode for directed
> ports
> - * @hw: dlb_hw handle for a particular device.
> - *
> - * This function must be called prior to configuring scheduling domains.
> - */
> -void dlb_hw_enable_sparse_dir_cq_mode(struct dlb_hw *hw);
> -
> -/**
> - * dlb_hw_set_qe_arbiter_weights() - program QE arbiter weights
> - * @hw: dlb_hw handle for a particular device.
> - * @weight: 8-entry array of arbiter weights.
> - *
> - * weight[N] programs priority N's weight. In cases where the 8 priorities
> are
> - * reduced to 4 bins, the mapping is:
> - * - weight[1] programs bin 0
> - * - weight[3] programs bin 1
> - * - weight[5] programs bin 2
> - * - weight[7] programs bin 3
> - */
> -void dlb_hw_set_qe_arbiter_weights(struct dlb_hw *hw, u8 weight[8]);
> -
> -/**
> - * dlb_hw_set_qid_arbiter_weights() - program QID arbiter weights
> - * @hw: dlb_hw handle for a particular device.
> - * @weight: 8-entry array of arbiter weights.
> - *
> - * weight[N] programs priority N's weight. In cases where the 8 priorities
> are
> - * reduced to 4 bins, the mapping is:
> - * - weight[1] programs bin 0
> - * - weight[3] programs bin 1
> - * - weight[5] programs bin 2
> - * - weight[7] programs bin 3
> - */
> -void dlb_hw_set_qid_arbiter_weights(struct dlb_hw *hw, u8 weight[8]);
> -
> -/**
> - * dlb_hw_enable_pp_sw_alarms() - enable out-of-credit alarm for all
> producer
> - * ports
> - * @hw: dlb_hw handle for a particular device.
> - */
> -void dlb_hw_enable_pp_sw_alarms(struct dlb_hw *hw);
> -
> -/**
> - * dlb_hw_disable_pp_sw_alarms() - disable out-of-credit alarm for all
> producer
> - * ports
> - * @hw: dlb_hw handle for a particular device.
> - */
> -void dlb_hw_disable_pp_sw_alarms(struct dlb_hw *hw);
> -
> -/**
> - * dlb_hw_disable_pf_to_vf_isr_pend_err() - disable alarm triggered by PF
> - *	access to VF's ISR pending register
> - * @hw: dlb_hw handle for a particular device.
> - */
> -void dlb_hw_disable_pf_to_vf_isr_pend_err(struct dlb_hw *hw);
> -
> -/**
> - * dlb_hw_disable_vf_to_pf_isr_pend_err() - disable alarm triggered by VF
> - *	access to PF's ISR pending register
> - * @hw: dlb_hw handle for a particular device.
> - */
> -void dlb_hw_disable_vf_to_pf_isr_pend_err(struct dlb_hw *hw);
> -
> -#endif /* __DLB_RESOURCE_H */
> diff --git a/drivers/event/dlb/pf/dlb_main.c
> b/drivers/event/dlb/pf/dlb_main.c
> deleted file mode 100644
> index 264350e28..000000000
> --- a/drivers/event/dlb/pf/dlb_main.c
> +++ /dev/null
> @@ -1,552 +0,0 @@
> -/* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2016-2020 Intel Corporation
> - */
> -
> -#include <stdint.h>
> -#include <stdbool.h>
> -#include <stdio.h>
> -#include <errno.h>
> -#include <assert.h>
> -#include <unistd.h>
> -#include <string.h>
> -
> -#include <rte_malloc.h>
> -#include <rte_errno.h>
> -
> -#include "base/dlb_resource.h"
> -#include "base/dlb_osdep.h"
> -#include "base/dlb_regs.h"
> -#include "../dlb_priv.h"
> -#include "../dlb_inline_fns.h"
> -#include "../dlb_user.h"
> -#include "dlb_main.h"
> -
> -unsigned int dlb_unregister_timeout_s = DLB_DEFAULT_UNREGISTER_TIMEOUT_S;
> -
> -#define DLB_PCI_CAP_POINTER 0x34
> -#define DLB_PCI_CAP_NEXT(hdr) (((hdr) >> 8) & 0xFC)
> -#define DLB_PCI_CAP_ID(hdr) ((hdr) & 0xFF)
> -#define DLB_PCI_ERR_UNCOR_MASK 8
> -#define DLB_PCI_ERR_UNC_UNSUP  0x00100000
> -
> -#define DLB_PCI_LNKCTL 16
> -#define DLB_PCI_SLTCTL 24
> -#define DLB_PCI_RTCTL 28
> -#define DLB_PCI_EXP_DEVCTL2 40
> -#define DLB_PCI_LNKCTL2 48
> -#define DLB_PCI_SLTCTL2 56
> -#define DLB_PCI_CMD 4
> -#define DLB_PCI_EXP_DEVSTA 10
> -#define DLB_PCI_EXP_DEVSTA_TRPND 0x20
> -#define DLB_PCI_EXP_DEVCTL_BCR_FLR 0x8000
> -
> -#define DLB_PCI_CAP_ID_EXP       0x10
> -#define DLB_PCI_CAP_ID_MSIX      0x11
> -#define DLB_PCI_EXT_CAP_ID_PRI   0x13
> -#define DLB_PCI_EXT_CAP_ID_ACS   0xD
> -
> -#define DLB_PCI_PRI_CTRL_ENABLE         0x1
> -#define DLB_PCI_PRI_ALLOC_REQ           0xC
> -#define DLB_PCI_PRI_CTRL                0x4
> -#define DLB_PCI_MSIX_FLAGS              0x2
> -#define DLB_PCI_MSIX_FLAGS_ENABLE       0x8000
> -#define DLB_PCI_MSIX_FLAGS_MASKALL      0x4000
> -#define DLB_PCI_ERR_ROOT_STATUS         0x30
> -#define DLB_PCI_ERR_COR_STATUS          0x10
> -#define DLB_PCI_ERR_UNCOR_STATUS        0x4
> -#define DLB_PCI_COMMAND_INTX_DISABLE    0x400
> -#define DLB_PCI_ACS_CAP                 0x4
> -#define DLB_PCI_ACS_CTRL                0x6
> -#define DLB_PCI_ACS_SV                  0x1
> -#define DLB_PCI_ACS_RR                  0x4
> -#define DLB_PCI_ACS_CR                  0x8
> -#define DLB_PCI_ACS_UF                  0x10
> -#define DLB_PCI_ACS_EC                  0x20
> -
> -static int dlb_pci_find_capability(struct rte_pci_device *pdev, uint32_t
> id)
> -{
> -	uint8_t pos;
> -	int ret;
> -	uint16_t hdr;
> -
> -	ret = rte_pci_read_config(pdev, &pos, 1, DLB_PCI_CAP_POINTER);
> -	pos &= 0xFC;
> -
> -	if (ret != 1)
> -		return -1;
> -
> -	while (pos > 0x3F) {
> -		ret = rte_pci_read_config(pdev, &hdr, 2, pos);
> -		if (ret != 2)
> -			return -1;
> -
> -		if (DLB_PCI_CAP_ID(hdr) == id)
> -			return pos;
> -
> -		if (DLB_PCI_CAP_ID(hdr) == 0xFF)
> -			return -1;
> -
> -		pos = DLB_PCI_CAP_NEXT(hdr);
> -	}
> -
> -	return -1;
> -}
> -
> -static int dlb_mask_ur_err(struct rte_pci_device *pdev)
> -{
> -	uint32_t mask;
> -	size_t sz = sizeof(mask);
> -	int pos = rte_pci_find_ext_capability(pdev, RTE_PCI_EXT_CAP_ID_ERR);
> -
> -	if (pos < 0) {
> -		DLB_LOG_ERR("[%s()] failed to find the aer capability\n",
> -		       __func__);
> -		return pos;
> -	}
> -
> -	pos += DLB_PCI_ERR_UNCOR_MASK;
> -
> -	if (rte_pci_read_config(pdev, &mask, sz, pos) != (int)sz) {
> -		DLB_LOG_ERR("[%s()] Failed to read uncorrectable error mask reg\n",
> -		       __func__);
> -		return -1;
> -	}
> -
> -	/* Mask Unsupported Request errors */
> -	mask |= DLB_PCI_ERR_UNC_UNSUP;
> -
> -	if (rte_pci_write_config(pdev, &mask, sz, pos) != (int)sz) {
> -		DLB_LOG_ERR("[%s()] Failed to write uncorrectable error mask reg at
> offset %d\n",
> -		       __func__, pos);
> -		return -1;
> -	}
> -
> -	return 0;
> -}
> -
> -struct dlb_dev *
> -dlb_probe(struct rte_pci_device *pdev)
> -{
> -	struct dlb_dev *dlb_dev;
> -	int ret = 0;
> -
> -	DLB_INFO(dlb_dev, "probe\n");
> -
> -	dlb_dev = rte_malloc("DLB_PF", sizeof(struct dlb_dev),
> -			     RTE_CACHE_LINE_SIZE);
> -
> -	if (dlb_dev == NULL) {
> -		ret = -ENOMEM;
> -		goto dlb_dev_malloc_fail;
> -	}
> -
> -	/* PCI Bus driver has already mapped bar space into process.
> -	 * Save off our IO register and FUNC addresses.
> -	 */
> -
> -	/* BAR 0 */
> -	if (pdev->mem_resource[0].addr == NULL) {
> -		DLB_ERR(dlb_dev, "probe: BAR 0 addr (csr_kva) is NULL\n");
> -		ret = -EINVAL;
> -		goto pci_mmap_bad_addr;
> -	}
> -	dlb_dev->hw.func_kva = (void *)(uintptr_t)pdev->mem_resource[0].addr;
> -	dlb_dev->hw.func_phys_addr = pdev->mem_resource[0].phys_addr;
> -
> -	DLB_INFO(dlb_dev, "DLB FUNC VA=%p, PA=%p, len=%"PRIu64"\n",
> -		 (void *)dlb_dev->hw.func_kva,
> -		 (void *)dlb_dev->hw.func_phys_addr,
> -		 pdev->mem_resource[0].len);
> -
> -	/* BAR 2 */
> -	if (pdev->mem_resource[2].addr == NULL) {
> -		DLB_ERR(dlb_dev, "probe: BAR 2 addr (func_kva) is NULL\n");
> -		ret = -EINVAL;
> -		goto pci_mmap_bad_addr;
> -	}
> -	dlb_dev->hw.csr_kva = (void *)(uintptr_t)pdev->mem_resource[2].addr;
> -	dlb_dev->hw.csr_phys_addr = pdev->mem_resource[2].phys_addr;
> -
> -	DLB_INFO(dlb_dev, "DLB CSR VA=%p, PA=%p, len=%"PRIu64"\n",
> -		 (void *)dlb_dev->hw.csr_kva,
> -		 (void *)dlb_dev->hw.csr_phys_addr,
> -		 pdev->mem_resource[2].len);
> -
> -	dlb_dev->pdev = pdev;
> -
> -	ret = dlb_pf_reset(dlb_dev);
> -	if (ret)
> -		goto dlb_reset_fail;
> -
> -	/* DLB incorrectly sends URs in response to certain messages. Mask UR
> -	 * errors to prevent these from being propagated to the MCA.
> -	 */
> -	ret = dlb_mask_ur_err(pdev);
> -	if (ret)
> -		goto mask_ur_err_fail;
> -
> -	ret = dlb_pf_init_driver_state(dlb_dev);
> -	if (ret)
> -		goto init_driver_state_fail;
> -
> -	ret = dlb_resource_init(&dlb_dev->hw);
> -	if (ret)
> -		goto resource_init_fail;
> -
> -	dlb_dev->revision = os_get_dev_revision(&dlb_dev->hw);
> -
> -	dlb_pf_init_hardware(dlb_dev);
> -
> -	return dlb_dev;
> -
> -resource_init_fail:
> -	dlb_resource_free(&dlb_dev->hw);
> -init_driver_state_fail:
> -mask_ur_err_fail:
> -dlb_reset_fail:
> -pci_mmap_bad_addr:
> -	rte_free(dlb_dev);
> -dlb_dev_malloc_fail:
> -	rte_errno = ret;
> -	return NULL;
> -}
> -
> -int
> -dlb_pf_reset(struct dlb_dev *dlb_dev)
> -{
> -	int msix_cap_offset, err_cap_offset, acs_cap_offset, wait_count;
> -	uint16_t dev_ctl_word, dev_ctl2_word, lnk_word, lnk_word2;
> -	uint16_t rt_ctl_word, pri_ctrl_word;
> -	struct rte_pci_device *pdev = dlb_dev->pdev;
> -	uint16_t devsta_busy_word, devctl_word;
> -	int pcie_cap_offset, pri_cap_offset;
> -	uint16_t slt_word, slt_word2, cmd;
> -	int ret = 0, i = 0;
> -	uint32_t dword[16], pri_reqs_dword;
> -	off_t off;
> -
> -	/* Save PCI config state */
> -
> -	for (i = 0; i < 16; i++) {
> -		if (rte_pci_read_config(pdev, &dword[i], 4, i * 4) != 4)
> -			return ret;
> -	}
> -
> -	pcie_cap_offset = dlb_pci_find_capability(pdev, DLB_PCI_CAP_ID_EXP);
> -
> -	if (pcie_cap_offset < 0) {
> -		DLB_LOG_ERR("[%s()] failed to find the pcie capability\n",
> -		       __func__);
> -		return pcie_cap_offset;
> -	}
> -
> -	off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
> -	if (rte_pci_read_config(pdev, &dev_ctl_word, 2, off) != 2)
> -		dev_ctl_word = 0;
> -
> -	off = pcie_cap_offset + DLB_PCI_LNKCTL;
> -	if (rte_pci_read_config(pdev, &lnk_word, 2, off) != 2)
> -		lnk_word = 0;
> -
> -	off = pcie_cap_offset + DLB_PCI_SLTCTL;
> -	if (rte_pci_read_config(pdev, &slt_word, 2, off) != 2)
> -		slt_word = 0;
> -
> -	off = pcie_cap_offset + DLB_PCI_RTCTL;
> -	if (rte_pci_read_config(pdev, &rt_ctl_word, 2, off) != 2)
> -		rt_ctl_word = 0;
> -
> -	off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL2;
> -	if (rte_pci_read_config(pdev, &dev_ctl2_word, 2, off) != 2)
> -		dev_ctl2_word = 0;
> -
> -	off = pcie_cap_offset + DLB_PCI_LNKCTL2;
> -	if (rte_pci_read_config(pdev, &lnk_word2, 2, off) != 2)
> -		lnk_word2 = 0;
> -
> -	off = pcie_cap_offset + DLB_PCI_SLTCTL2;
> -	if (rte_pci_read_config(pdev, &slt_word2, 2, off) != 2)
> -		slt_word2 = 0;
> -
> -	pri_cap_offset = rte_pci_find_ext_capability(pdev,
> -						     DLB_PCI_EXT_CAP_ID_PRI);
> -	if (pri_cap_offset >= 0) {
> -		off = pri_cap_offset + DLB_PCI_PRI_ALLOC_REQ;
> -		if (rte_pci_read_config(pdev, &pri_reqs_dword, 4, off) != 4)
> -			pri_reqs_dword = 0;
> -	}
> -
> -	/* clear the PCI command register before issuing the FLR */
> -
> -	off = DLB_PCI_CMD;
> -	cmd = 0;
> -	if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
> -		DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
> -		       __func__, (int)off);
> -		return -1;
> -	}
> -
> -	/* issue the FLR */
> -	for (wait_count = 0; wait_count < 4; wait_count++) {
> -		int sleep_time;
> -
> -		off = pcie_cap_offset + DLB_PCI_EXP_DEVSTA;
> -		ret = rte_pci_read_config(pdev, &devsta_busy_word, 2, off);
> -		if (ret != 2) {
> -			DLB_LOG_ERR("[%s()] failed to read the pci device status\n",
> -			       __func__);
> -			return ret;
> -		}
> -
> -		if (!(devsta_busy_word & DLB_PCI_EXP_DEVSTA_TRPND))
> -			break;
> -
> -		sleep_time = (1 << (wait_count)) * 100;
> -		rte_delay_ms(sleep_time);
> -	}
> -
> -	if (wait_count == 4) {
> -		DLB_LOG_ERR("[%s()] wait for pci pending transactions timed out\n",
> -		       __func__);
> -		return -1;
> -	}
> -
> -	off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
> -	ret = rte_pci_read_config(pdev, &devctl_word, 2, off);
> -	if (ret != 2) {
> -		DLB_LOG_ERR("[%s()] failed to read the pcie device control\n",
> -		       __func__);
> -		return ret;
> -	}
> -
> -	devctl_word |= DLB_PCI_EXP_DEVCTL_BCR_FLR;
> -
> -	if (rte_pci_write_config(pdev, &devctl_word, 2, off) != 2) {
> -		DLB_LOG_ERR("[%s()] failed to write the pcie device control at offset
> %d\n",
> -		       __func__, (int)off);
> -		return -1;
> -	}
> -
> -	rte_delay_ms(100);
> -
> -	/* Restore PCI config state */
> -
> -	if (pcie_cap_offset >= 0) {
> -		off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
> -		if (rte_pci_write_config(pdev, &dev_ctl_word, 2, off) != 2) {
> -			DLB_LOG_ERR("[%s()] failed to write the pcie device control at offset
> %d\n",
> -			       __func__, (int)off);
> -			return -1;
> -		}
> -
> -		off = pcie_cap_offset + DLB_PCI_LNKCTL;
> -		if (rte_pci_write_config(pdev, &lnk_word, 2, off) != 2) {
> -			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
> -			       __func__, (int)off);
> -			return -1;
> -		}
> -
> -		off = pcie_cap_offset + DLB_PCI_SLTCTL;
> -		if (rte_pci_write_config(pdev, &slt_word, 2, off) != 2) {
> -			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
> -			       __func__, (int)off);
> -			return -1;
> -		}
> -
> -		off = pcie_cap_offset + DLB_PCI_RTCTL;
> -		if (rte_pci_write_config(pdev, &rt_ctl_word, 2, off) != 2) {
> -			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
> -			       __func__, (int)off);
> -			return -1;
> -		}
> -
> -		off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL2;
> -		if (rte_pci_write_config(pdev, &dev_ctl2_word, 2, off) != 2) {
> -			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
> -			       __func__, (int)off);
> -			return -1;
> -		}
> -
> -		off = pcie_cap_offset + DLB_PCI_LNKCTL2;
> -		if (rte_pci_write_config(pdev, &lnk_word2, 2, off) != 2) {
> -			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
> -			       __func__, (int)off);
> -			return -1;
> -		}
> -
> -		off = pcie_cap_offset + DLB_PCI_SLTCTL2;
> -		if (rte_pci_write_config(pdev, &slt_word2, 2, off) != 2) {
> -			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
> -			       __func__, (int)off);
> -			return -1;
> -		}
> -	}
> -
> -	if (pri_cap_offset >= 0) {
> -		pri_ctrl_word = DLB_PCI_PRI_CTRL_ENABLE;
> -
> -		off = pri_cap_offset + DLB_PCI_PRI_ALLOC_REQ;
> -		if (rte_pci_write_config(pdev, &pri_reqs_dword, 4, off) != 4) {
> -			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
> -			       __func__, (int)off);
> -			return -1;
> -		}
> -
> -		off = pri_cap_offset + DLB_PCI_PRI_CTRL;
> -		if (rte_pci_write_config(pdev, &pri_ctrl_word, 2, off) != 2) {
> -			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
> -			       __func__, (int)off);
> -			return -1;
> -		}
> -	}
> -
> -	err_cap_offset = rte_pci_find_ext_capability(pdev,
> -						     RTE_PCI_EXT_CAP_ID_ERR);
> -	if (err_cap_offset >= 0) {
> -		uint32_t tmp;
> -
> -		off = err_cap_offset + DLB_PCI_ERR_ROOT_STATUS;
> -		if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
> -			tmp = 0;
> -
> -		if (rte_pci_write_config(pdev, &tmp, 4, off) != 4) {
> -			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
> -			       __func__, (int)off);
> -			return -1;
> -		}
> -
> -		off = err_cap_offset + DLB_PCI_ERR_COR_STATUS;
> -		if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
> -			tmp = 0;
> -
> -		if (rte_pci_write_config(pdev, &tmp, 4, off) != 4) {
> -			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
> -			       __func__, (int)off);
> -			return -1;
> -		}
> -
> -		off = err_cap_offset + DLB_PCI_ERR_UNCOR_STATUS;
> -		if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
> -			tmp = 0;
> -
> -		if (rte_pci_write_config(pdev, &tmp, 4, off) != 4) {
> -			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
> -			       __func__, (int)off);
> -			return -1;
> -		}
> -	}
> -
> -	for (i = 16; i > 0; i--) {
> -		off = (i - 1) * 4;
> -		if (rte_pci_write_config(pdev, &dword[i - 1], 4, off) != 4) {
> -			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
> -			       __func__, (int)off);
> -			return -1;
> -		}
> -	}
> -
> -	off = DLB_PCI_CMD;
> -	if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
> -		cmd &= ~DLB_PCI_COMMAND_INTX_DISABLE;
> -		if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
> -			DLB_LOG_ERR("[%s()] failed to write pci config space\n",
> -			       __func__);
> -			return -1;
> -		}
> -	}
> -
> -	msix_cap_offset = dlb_pci_find_capability(pdev, DLB_PCI_CAP_ID_MSIX);
> -	if (msix_cap_offset >= 0) {
> -		off = msix_cap_offset + DLB_PCI_MSIX_FLAGS;
> -		if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
> -			cmd |= DLB_PCI_MSIX_FLAGS_ENABLE;
> -			cmd |= DLB_PCI_MSIX_FLAGS_MASKALL;
> -			if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
> -				DLB_LOG_ERR("[%s()] failed to write msix flags\n",
> -				       __func__);
> -				return -1;
> -			}
> -		}
> -
> -		off = msix_cap_offset + DLB_PCI_MSIX_FLAGS;
> -		if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
> -			cmd &= ~DLB_PCI_MSIX_FLAGS_MASKALL;
> -			if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
> -				DLB_LOG_ERR("[%s()] failed to write msix flags\n",
> -				       __func__);
> -				return -1;
> -			}
> -		}
> -	}
> -
> -	acs_cap_offset = rte_pci_find_ext_capability(pdev,
> -						     DLB_PCI_EXT_CAP_ID_ACS);
> -	if (acs_cap_offset >= 0) {
> -		uint16_t acs_cap, acs_ctrl, acs_mask;
> -		off = acs_cap_offset + DLB_PCI_ACS_CAP;
> -		if (rte_pci_read_config(pdev, &acs_cap, 2, off) != 2)
> -			acs_cap = 0;
> -
> -		off = acs_cap_offset + DLB_PCI_ACS_CTRL;
> -		if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)
> -			acs_ctrl = 0;
> -
> -		acs_mask = DLB_PCI_ACS_SV | DLB_PCI_ACS_RR;
> -		acs_mask |= (DLB_PCI_ACS_CR | DLB_PCI_ACS_UF);
> -		acs_ctrl |= (acs_cap & acs_mask);
> -
> -		if (rte_pci_write_config(pdev, &acs_ctrl, 2, off) != 2) {
> -			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
> -			       __func__, (int)off);
> -			return -1;
> -		}
> -
> -		off = acs_cap_offset + DLB_PCI_ACS_CTRL;
> -		if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)
> -			acs_ctrl = 0;
> -
> -		acs_mask = DLB_PCI_ACS_RR | DLB_PCI_ACS_CR | DLB_PCI_ACS_EC;
> -		acs_ctrl &= ~acs_mask;
> -
> -		off = acs_cap_offset + DLB_PCI_ACS_CTRL;
> -		if (rte_pci_write_config(pdev, &acs_ctrl, 2, off) != 2) {
> -			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
> -			       __func__, (int)off);
> -			return -1;
> -		}
> -	}
> -
> -	return 0;
> -}
> -
> -/*******************************/
> -/****** Driver management ******/
> -/*******************************/
> -
> -int
> -dlb_pf_init_driver_state(struct dlb_dev *dlb_dev)
> -{
> -	/* Initialize software state */
> -	rte_spinlock_init(&dlb_dev->resource_mutex);
> -	rte_spinlock_init(&dlb_dev->measurement_lock);
> -
> -	return 0;
> -}
> -
> -void
> -dlb_pf_init_hardware(struct dlb_dev *dlb_dev)
> -{
> -	dlb_disable_dp_vasr_feature(&dlb_dev->hw);
> -
> -	dlb_enable_excess_tokens_alarm(&dlb_dev->hw);
> -
> -	if (dlb_dev->revision >= DLB_REV_B0) {
> -		dlb_hw_enable_sparse_ldb_cq_mode(&dlb_dev->hw);
> -		dlb_hw_enable_sparse_dir_cq_mode(&dlb_dev->hw);
> -	}
> -
> -	if (dlb_dev->revision >= DLB_REV_B0) {
> -		dlb_hw_disable_pf_to_vf_isr_pend_err(&dlb_dev->hw);
> -		dlb_hw_disable_vf_to_pf_isr_pend_err(&dlb_dev->hw);
> -	}
> -}
> diff --git a/drivers/event/dlb/pf/dlb_main.h
> b/drivers/event/dlb/pf/dlb_main.h
> deleted file mode 100644
> index 22e215223..000000000
> --- a/drivers/event/dlb/pf/dlb_main.h
> +++ /dev/null
> @@ -1,47 +0,0 @@
> -/* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2016-2020 Intel Corporation
> - */
> -
> -#ifndef __DLB_MAIN_H
> -#define __DLB_MAIN_H
> -
> -#include <rte_debug.h>
> -#include <rte_log.h>
> -#include <rte_spinlock.h>
> -#include <rte_pci.h>
> -#include <rte_bus_pci.h>
> -
> -#ifndef PAGE_SIZE
> -#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
> -#endif
> -
> -#include "base/dlb_hw_types.h"
> -#include "../dlb_user.h"
> -
> -#define DLB_DEFAULT_UNREGISTER_TIMEOUT_S 5
> -
> -struct dlb_dev {
> -	struct rte_pci_device *pdev;
> -	struct dlb_hw hw;
> -	/* struct list_head list; */
> -	struct device *dlb_device;
> -	bool domain_reset_failed;
> -	/* The resource mutex serializes access to driver data structures and
> -	 * hardware registers.
> -	 */
> -	rte_spinlock_t resource_mutex;
> -	rte_spinlock_t measurement_lock;
> -	bool worker_launched;
> -	u8 revision;
> -};
> -
> -struct dlb_dev *dlb_probe(struct rte_pci_device *pdev);
> -void dlb_reset_done(struct dlb_dev *dlb_dev);
> -
> -/* pf_ops */
> -int dlb_pf_init_driver_state(struct dlb_dev *dev);
> -void dlb_pf_free_driver_state(struct dlb_dev *dev);
> -void dlb_pf_init_hardware(struct dlb_dev *dev);
> -int dlb_pf_reset(struct dlb_dev *dlb_dev);
> -
> -#endif /* __DLB_MAIN_H */
> diff --git a/drivers/event/dlb/pf/dlb_pf.c b/drivers/event/dlb/pf/dlb_pf.c
> deleted file mode 100644
> index 3aeef6f91..000000000
> --- a/drivers/event/dlb/pf/dlb_pf.c
> +++ /dev/null
> @@ -1,752 +0,0 @@
> -/* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2016-2020 Intel Corporation
> - */
> -
> -#include <stdint.h>
> -#include <stdbool.h>
> -#include <stdio.h>
> -#include <sys/mman.h>
> -#include <sys/fcntl.h>
> -#include <sys/time.h>
> -#include <errno.h>
> -#include <assert.h>
> -#include <unistd.h>
> -#include <string.h>
> -#include <rte_debug.h>
> -#include <rte_log.h>
> -#include <rte_dev.h>
> -#include <rte_devargs.h>
> -#include <rte_mbuf.h>
> -#include <rte_ring.h>
> -#include <rte_errno.h>
> -#include <rte_kvargs.h>
> -#include <rte_malloc.h>
> -#include <rte_cycles.h>
> -#include <rte_io.h>
> -#include <rte_memory.h>
> -#include <rte_string_fns.h>
> -
> -#include "../dlb_priv.h"
> -#include "../dlb_iface.h"
> -#include "../dlb_inline_fns.h"
> -#include "dlb_main.h"
> -#include "base/dlb_hw_types.h"
> -#include "base/dlb_osdep.h"
> -#include "base/dlb_resource.h"
> -
> -static void
> -dlb_pf_low_level_io_init(struct dlb_eventdev *dlb __rte_unused)
> -{
> -	int i;
> -
> -	/* Addresses will be initialized at port create */
> -	for (i = 0; i < DLB_MAX_NUM_PORTS; i++) {
> -		/* First directed ports */
> -
> -		/* producer port */
> -		dlb_port[i][DLB_DIR].pp_addr = NULL;
> -
> -		/* popcount */
> -		dlb_port[i][DLB_DIR].ldb_popcount = NULL;
> -		dlb_port[i][DLB_DIR].dir_popcount = NULL;
> -
> -		/* consumer queue */
> -		dlb_port[i][DLB_DIR].cq_base = NULL;
> -		dlb_port[i][DLB_DIR].mmaped = true;
> -
> -		/* Now load balanced ports */
> -
> -		/* producer port */
> -		dlb_port[i][DLB_LDB].pp_addr = NULL;
> -
> -		/* popcount */
> -		dlb_port[i][DLB_LDB].ldb_popcount = NULL;
> -		dlb_port[i][DLB_LDB].dir_popcount = NULL;
> -
> -		/* consumer queue */
> -		dlb_port[i][DLB_LDB].cq_base = NULL;
> -		dlb_port[i][DLB_LDB].mmaped = true;
> -	}
> -}
> -
> -static int
> -dlb_pf_open(struct dlb_hw_dev *handle, const char *name)
> -{
> -	RTE_SET_USED(handle);
> -	RTE_SET_USED(name);
> -
> -	return 0;
> -}
> -
> -static void
> -dlb_pf_domain_close(struct dlb_eventdev *dlb)
> -{
> -	struct dlb_dev *dlb_dev = (struct dlb_dev *)dlb->qm_instance.pf_dev;
> -	int ret;
> -
> -	ret = dlb_reset_domain(&dlb_dev->hw, dlb->qm_instance.domain_id);
> -	if (ret)
> -		DLB_LOG_ERR("dlb_pf_reset_domain err %d", ret);
> -}
> -
> -static int
> -dlb_pf_get_device_version(struct dlb_hw_dev *handle,
> -			  uint8_t *revision)
> -{
> -	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
> -
> -	*revision = dlb_dev->revision;
> -
> -	return 0;
> -}
> -
> -static int
> -dlb_pf_get_num_resources(struct dlb_hw_dev *handle,
> -			 struct dlb_get_num_resources_args *rsrcs)
> -{
> -	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
> -
> -	dlb_hw_get_num_resources(&dlb_dev->hw, rsrcs);
> -
> -	return 0;
> -}
> -
> -static int
> -dlb_pf_sched_domain_create(struct dlb_hw_dev *handle,
> -			   struct dlb_create_sched_domain_args *arg)
> -{
> -	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
> -	struct dlb_cmd_response response = {0};
> -	int ret;
> -
> -	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
> -
> -	if (dlb_dev->domain_reset_failed) {
> -		response.status = DLB_ST_DOMAIN_RESET_FAILED;
> -		ret = -EINVAL;
> -		goto done;
> -	}
> -
> -	ret = dlb_hw_create_sched_domain(&dlb_dev->hw, arg, &response);
> -	if (ret)
> -		goto done;
> -
> -done:
> -
> -	*(struct dlb_cmd_response *)arg->response = response;
> -
> -	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
> -
> -	return ret;
> -}
> -
> -static int
> -dlb_pf_ldb_credit_pool_create(struct dlb_hw_dev *handle,
> -			      struct dlb_create_ldb_pool_args *cfg)
> -{
> -	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
> -	struct dlb_cmd_response response = {0};
> -	int ret;
> -
> -	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
> -
> -	ret = dlb_hw_create_ldb_pool(&dlb_dev->hw,
> -				     handle->domain_id,
> -				     cfg,
> -				     &response);
> -
> -	*(struct dlb_cmd_response *)cfg->response = response;
> -
> -	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
> -
> -	return ret;
> -}
> -
> -static int
> -dlb_pf_dir_credit_pool_create(struct dlb_hw_dev *handle,
> -			      struct dlb_create_dir_pool_args *cfg)
> -{
> -	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
> -	struct dlb_cmd_response response = {0};
> -	int ret;
> -
> -	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
> -
> -	ret = dlb_hw_create_dir_pool(&dlb_dev->hw,
> -				     handle->domain_id,
> -				     cfg,
> -				     &response);
> -
> -	*(struct dlb_cmd_response *)cfg->response = response;
> -
> -	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
> -
> -	return ret;
> -}
> -
> -static int
> -dlb_pf_get_cq_poll_mode(struct dlb_hw_dev *handle,
> -			enum dlb_cq_poll_modes *mode)
> -{
> -	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
> -
> -	if (dlb_dev->revision >= DLB_REV_B0)
> -		*mode = DLB_CQ_POLL_MODE_SPARSE;
> -	else
> -		*mode = DLB_CQ_POLL_MODE_STD;
> -
> -	return 0;
> -}
> -
> -static int
> -dlb_pf_ldb_queue_create(struct dlb_hw_dev *handle,
> -			struct dlb_create_ldb_queue_args *cfg)
> -{
> -	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
> -	struct dlb_cmd_response response = {0};
> -	int ret;
> -
> -	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
> -
> -	ret = dlb_hw_create_ldb_queue(&dlb_dev->hw,
> -				      handle->domain_id,
> -				      cfg,
> -				      &response);
> -
> -	*(struct dlb_cmd_response *)cfg->response = response;
> -
> -	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
> -
> -	return ret;
> -}
> -
> -static int
> -dlb_pf_dir_queue_create(struct dlb_hw_dev *handle,
> -			struct dlb_create_dir_queue_args *cfg)
> -{
> -	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
> -	struct dlb_cmd_response response = {0};
> -	int ret;
> -
> -	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
> -
> -	ret = dlb_hw_create_dir_queue(&dlb_dev->hw,
> -				      handle->domain_id,
> -				      cfg,
> -				      &response);
> -
> -	*(struct dlb_cmd_response *)cfg->response = response;
> -
> -	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
> -
> -	return ret;
> -}
> -
> -static void *
> -dlb_alloc_coherent_aligned(const struct rte_memzone **mz, rte_iova_t
> *phys,
> -			   size_t size, int align)
> -{
> -	char mz_name[RTE_MEMZONE_NAMESIZE];
> -	uint32_t core_id = rte_lcore_id();
> -	unsigned int socket_id;
> -
> -	snprintf(mz_name, sizeof(mz_name) - 1, "event_dlb_port_mem_%lx",
> -		 (unsigned long)rte_get_timer_cycles());
> -	if (core_id == (unsigned int)LCORE_ID_ANY)
> -		core_id = rte_get_main_lcore();
> -	socket_id = rte_lcore_to_socket_id(core_id);
> -	*mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
> -					 RTE_MEMZONE_IOVA_CONTIG, align);
> -	if (*mz == NULL) {
> -		DLB_LOG_ERR("Unable to allocate DMA memory of size %zu bytes\n",
> -			    size);
> -		*phys = 0;
> -		return NULL;
> -	}
> -	*phys = (*mz)->iova;
> -	return (*mz)->addr;
> -}
> -
> -static int
> -dlb_pf_ldb_port_create(struct dlb_hw_dev *handle,
> -		       struct dlb_create_ldb_port_args *cfg,
> -		       enum dlb_cq_poll_modes poll_mode)
> -{
> -	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
> -	struct dlb_cmd_response response = {0};
> -	int ret;
> -	uint8_t *port_base;
> -	const struct rte_memzone *mz;
> -	int alloc_sz, qe_sz, cq_alloc_depth;
> -	rte_iova_t pp_dma_base;
> -	rte_iova_t pc_dma_base;
> -	rte_iova_t cq_dma_base;
> -	int is_dir = false;
> -
> -	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
> -
> -	if (poll_mode == DLB_CQ_POLL_MODE_STD)
> -		qe_sz = sizeof(struct dlb_dequeue_qe);
> -	else
> -		qe_sz = RTE_CACHE_LINE_SIZE;
> -
> -	/* The hardware always uses a CQ depth of at least
> -	 * DLB_MIN_HARDWARE_CQ_DEPTH, even though from the user
> -	 * perspective we support a depth as low as 1 for LDB ports.
> -	 */
> -	cq_alloc_depth = RTE_MAX(cfg->cq_depth, DLB_MIN_HARDWARE_CQ_DEPTH);
> -
> -	/* Calculate the port memory required, including two cache lines for
> -	 * credit pop counts. Round up to the nearest cache line.
> -	 */
> -	alloc_sz = 2 * RTE_CACHE_LINE_SIZE + cq_alloc_depth * qe_sz;
> -	alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
> -
> -	port_base = dlb_alloc_coherent_aligned(&mz, &pc_dma_base,
> -					       alloc_sz, PAGE_SIZE);
> -	if (port_base == NULL)
> -		return -ENOMEM;
> -
> -	/* Lock the page in memory */
> -	ret = rte_mem_lock_page(port_base);
> -	if (ret < 0) {
> -		DLB_LOG_ERR("dlb pf pmd could not lock page for device i/o\n");
> -		goto create_port_err;
> -	}
> -
> -	memset(port_base, 0, alloc_sz);
> -	cq_dma_base = (uintptr_t)(pc_dma_base + (2 * RTE_CACHE_LINE_SIZE));
> -
> -	ret = dlb_hw_create_ldb_port(&dlb_dev->hw,
> -				     handle->domain_id,
> -				     cfg,
> -				     pc_dma_base,
> -				     cq_dma_base,
> -				     &response);
> -	if (ret)
> -		goto create_port_err;
> -
> -	pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);
> -	dlb_port[response.id][DLB_LDB].pp_addr =
> -		(void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id));
> -
> -	dlb_port[response.id][DLB_LDB].cq_base =
> -		(void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));
> -
> -	dlb_port[response.id][DLB_LDB].ldb_popcount =
> -		(void *)(uintptr_t)port_base;
> -	dlb_port[response.id][DLB_LDB].dir_popcount = (void *)(uintptr_t)
> -		(port_base + RTE_CACHE_LINE_SIZE);
> -	dlb_port[response.id][DLB_LDB].mz = mz;
> -
> -	*(struct dlb_cmd_response *)cfg->response = response;
> -
> -	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
> -	return 0;
> -
> -create_port_err:
> -
> -	rte_memzone_free(mz);
> -
> -	return ret;
> -}
> -
> -static int
> -dlb_pf_dir_port_create(struct dlb_hw_dev *handle,
> -		       struct dlb_create_dir_port_args *cfg,
> -		       enum dlb_cq_poll_modes poll_mode)
> -{
> -	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
> -	struct dlb_cmd_response response = {0};
> -	int ret;
> -	uint8_t *port_base;
> -	const struct rte_memzone *mz;
> -	int alloc_sz, qe_sz;
> -	rte_iova_t pp_dma_base;
> -	rte_iova_t pc_dma_base;
> -	rte_iova_t cq_dma_base;
> -	int is_dir = true;
> -
> -	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
> -
> -	if (poll_mode == DLB_CQ_POLL_MODE_STD)
> -		qe_sz = sizeof(struct dlb_dequeue_qe);
> -	else
> -		qe_sz = RTE_CACHE_LINE_SIZE;
> -
> -	/* Calculate the port memory required, including two cache lines for
> -	 * credit pop counts. Round up to the nearest cache line.
> -	 */
> -	alloc_sz = 2 * RTE_CACHE_LINE_SIZE + cfg->cq_depth * qe_sz;
> -	alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
> -
> -	port_base = dlb_alloc_coherent_aligned(&mz, &pc_dma_base,
> -					       alloc_sz, PAGE_SIZE);
> -	if (port_base == NULL)
> -		return -ENOMEM;
> -
> -	/* Lock the page in memory */
> -	ret = rte_mem_lock_page(port_base);
> -	if (ret < 0) {
> -		DLB_LOG_ERR("dlb pf pmd could not lock page for device i/o\n");
> -		goto create_port_err;
> -	}
> -
> -	memset(port_base, 0, alloc_sz);
> -	cq_dma_base = (uintptr_t)(pc_dma_base + (2 * RTE_CACHE_LINE_SIZE));
> -
> -	ret = dlb_hw_create_dir_port(&dlb_dev->hw,
> -				     handle->domain_id,
> -				     cfg,
> -				     pc_dma_base,
> -				     cq_dma_base,
> -				     &response);
> -	if (ret)
> -		goto create_port_err;
> -
> -	pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);
> -	dlb_port[response.id][DLB_DIR].pp_addr =
> -		(void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id));
> -
> -	dlb_port[response.id][DLB_DIR].cq_base =
> -		(void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));
> -
> -	dlb_port[response.id][DLB_DIR].ldb_popcount =
> -		(void *)(uintptr_t)port_base;
> -	dlb_port[response.id][DLB_DIR].dir_popcount = (void *)(uintptr_t)
> -		(port_base + RTE_CACHE_LINE_SIZE);
> -	dlb_port[response.id][DLB_DIR].mz = mz;
> -
> -	*(struct dlb_cmd_response *)cfg->response = response;
> -
> -	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
> -	return 0;
> -
> -create_port_err:
> -
> -	rte_memzone_free(mz);
> -
> -	return ret;
> -}
> -
> -static int
> -dlb_pf_get_sn_allocation(struct dlb_hw_dev *handle,
> -			 struct dlb_get_sn_allocation_args *args)
> -{
> -	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
> -	struct dlb_cmd_response response = {0};
> -	int ret;
> -
> -	ret = dlb_get_group_sequence_numbers(&dlb_dev->hw, args->group);
> -
> -	response.id = ret;
> -	response.status = 0;
> -
> -	*(struct dlb_cmd_response *)args->response = response;
> -
> -	return ret;
> -}
> -
> -static int
> -dlb_pf_set_sn_allocation(struct dlb_hw_dev *handle,
> -			 struct dlb_set_sn_allocation_args *args)
> -{
> -	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
> -	struct dlb_cmd_response response = {0};
> -	int ret;
> -
> -	ret = dlb_set_group_sequence_numbers(&dlb_dev->hw, args->group,
> -					     args->num);
> -
> -	response.status = 0;
> -
> -	*(struct dlb_cmd_response *)args->response = response;
> -
> -	return ret;
> -}
> -
> -static int
> -dlb_pf_get_sn_occupancy(struct dlb_hw_dev *handle,
> -			struct dlb_get_sn_occupancy_args *args)
> -{
> -	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
> -	struct dlb_cmd_response response = {0};
> -	int ret;
> -
> -	ret = dlb_get_group_sequence_number_occupancy(&dlb_dev->hw,
> -						      args->group);
> -
> -	response.id = ret;
> -	response.status = 0;
> -
> -	*(struct dlb_cmd_response *)args->response = response;
> -
> -	return ret;
> -}
> -
> -static int
> -dlb_pf_sched_domain_start(struct dlb_hw_dev *handle,
> -			  struct dlb_start_domain_args *cfg)
> -{
> -	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
> -	struct dlb_cmd_response response = {0};
> -	int ret;
> -
> -	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
> -
> -	ret = dlb_hw_start_domain(&dlb_dev->hw,
> -				  handle->domain_id,
> -				  cfg,
> -				  &response);
> -
> -	*(struct dlb_cmd_response *)cfg->response = response;
> -
> -	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
> -
> -	return ret;
> -}
> -
> -static int
> -dlb_pf_pending_port_unmaps(struct dlb_hw_dev *handle,
> -			   struct dlb_pending_port_unmaps_args *args)
> -{
> -	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
> -	struct dlb_cmd_response response = {0};
> -	int ret;
> -
> -	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
> -
> -	ret = dlb_hw_pending_port_unmaps(&dlb_dev->hw,
> -					 handle->domain_id,
> -					 args,
> -					 &response);
> -
> -	*(struct dlb_cmd_response *)args->response = response;
> -
> -	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
> -
> -	return ret;
> -}
> -
> -static int
> -dlb_pf_map_qid(struct dlb_hw_dev *handle,
> -	       struct dlb_map_qid_args *cfg)
> -{
> -	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
> -	struct dlb_cmd_response response = {0};
> -	int ret;
> -
> -	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
> -
> -	ret = dlb_hw_map_qid(&dlb_dev->hw,
> -			     handle->domain_id,
> -			     cfg,
> -			     &response);
> -
> -	*(struct dlb_cmd_response *)cfg->response = response;
> -
> -	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
> -
> -	return ret;
> -}
> -
> -static int
> -dlb_pf_unmap_qid(struct dlb_hw_dev *handle,
> -		 struct dlb_unmap_qid_args *cfg)
> -{
> -	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
> -	struct dlb_cmd_response response = {0};
> -	int ret;
> -
> -	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
> -
> -	ret = dlb_hw_unmap_qid(&dlb_dev->hw,
> -			       handle->domain_id,
> -			       cfg,
> -			       &response);
> -
> -	*(struct dlb_cmd_response *)cfg->response = response;
> -
> -	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
> -
> -	return ret;
> -}
> -
> -static int
> -dlb_pf_get_ldb_queue_depth(struct dlb_hw_dev *handle,
> -			   struct dlb_get_ldb_queue_depth_args *args)
> -{
> -	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
> -	struct dlb_cmd_response response = {0};
> -	int ret;
> -
> -	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
> -
> -	ret = dlb_hw_get_ldb_queue_depth(&dlb_dev->hw,
> -					 handle->domain_id,
> -					 args,
> -					 &response);
> -
> -	*(struct dlb_cmd_response *)args->response = response;
> -
> -	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
> -
> -	return ret;
> -}
> -
> -static int
> -dlb_pf_get_dir_queue_depth(struct dlb_hw_dev *handle,
> -			   struct dlb_get_dir_queue_depth_args *args)
> -{
> -	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
> -	struct dlb_cmd_response response = {0};
> -	int ret = 0;
> -
> -	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
> -
> -	ret = dlb_hw_get_dir_queue_depth(&dlb_dev->hw,
> -					 handle->domain_id,
> -					 args,
> -					 &response);
> -
> -	*(struct dlb_cmd_response *)args->response = response;
> -
> -	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
> -
> -	return ret;
> -}
> -
> -static void
> -dlb_pf_iface_fn_ptrs_init(void)
> -{
> -	dlb_iface_low_level_io_init = dlb_pf_low_level_io_init;
> -	dlb_iface_open = dlb_pf_open;
> -	dlb_iface_domain_close = dlb_pf_domain_close;
> -	dlb_iface_get_device_version = dlb_pf_get_device_version;
> -	dlb_iface_get_num_resources = dlb_pf_get_num_resources;
> -	dlb_iface_sched_domain_create = dlb_pf_sched_domain_create;
> -	dlb_iface_ldb_credit_pool_create = dlb_pf_ldb_credit_pool_create;
> -	dlb_iface_dir_credit_pool_create = dlb_pf_dir_credit_pool_create;
> -	dlb_iface_ldb_queue_create = dlb_pf_ldb_queue_create;
> -	dlb_iface_dir_queue_create = dlb_pf_dir_queue_create;
> -	dlb_iface_ldb_port_create = dlb_pf_ldb_port_create;
> -	dlb_iface_dir_port_create = dlb_pf_dir_port_create;
> -	dlb_iface_map_qid = dlb_pf_map_qid;
> -	dlb_iface_unmap_qid = dlb_pf_unmap_qid;
> -	dlb_iface_sched_domain_start = dlb_pf_sched_domain_start;
> -	dlb_iface_pending_port_unmaps = dlb_pf_pending_port_unmaps;
> -	dlb_iface_get_ldb_queue_depth = dlb_pf_get_ldb_queue_depth;
> -	dlb_iface_get_dir_queue_depth = dlb_pf_get_dir_queue_depth;
> -	dlb_iface_get_cq_poll_mode = dlb_pf_get_cq_poll_mode;
> -	dlb_iface_get_sn_allocation = dlb_pf_get_sn_allocation;
> -	dlb_iface_set_sn_allocation = dlb_pf_set_sn_allocation;
> -	dlb_iface_get_sn_occupancy = dlb_pf_get_sn_occupancy;
> -
> -}
> -
> -/* PCI DEV HOOKS */
> -static int
> -dlb_eventdev_pci_init(struct rte_eventdev *eventdev)
> -{
> -	int ret = 0;
> -	struct rte_pci_device *pci_dev;
> -	struct dlb_devargs dlb_args = {
> -		.socket_id = rte_socket_id(),
> -		.max_num_events = DLB_MAX_NUM_LDB_CREDITS,
> -		.num_dir_credits_override = -1,
> -		.defer_sched = 0,
> -		.num_atm_inflights = DLB_NUM_ATOMIC_INFLIGHTS_PER_QUEUE,
> -	};
> -	struct dlb_eventdev *dlb;
> -
> -	DLB_LOG_DBG("Enter with dev_id=%d socket_id=%d",
> -		    eventdev->data->dev_id, eventdev->data->socket_id);
> -
> -	dlb_entry_points_init(eventdev);
> -
> -	dlb_pf_iface_fn_ptrs_init();
> -
> -	pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
> -
> -	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
> -		dlb = dlb_pmd_priv(eventdev); /* rte_zmalloc_socket mem */
> -
> -		/* Probe the DLB PF layer */
> -		dlb->qm_instance.pf_dev = dlb_probe(pci_dev);
> -
> -		if (dlb->qm_instance.pf_dev == NULL) {
> -			DLB_LOG_ERR("DLB PF Probe failed with error %d\n",
> -				    rte_errno);
> -			ret = -rte_errno;
> -			goto dlb_probe_failed;
> -		}
> -
> -		/* Were we invoked with runtime parameters? */
> -		if (pci_dev->device.devargs) {
> -			ret = dlb_parse_params(pci_dev->device.devargs->args,
> -					       pci_dev->device.devargs->name,
> -					       &dlb_args);
> -			if (ret) {
> -				DLB_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d\n",
> -					    ret, rte_errno);
> -				goto dlb_probe_failed;
> -			}
> -		}
> -
> -		ret = dlb_primary_eventdev_probe(eventdev,
> -						 EVDEV_DLB_NAME_PMD_STR,
> -						 &dlb_args);
> -	} else {
> -		ret = dlb_secondary_eventdev_probe(eventdev,
> -						   EVDEV_DLB_NAME_PMD_STR);
> -	}
> -	if (ret)
> -		goto dlb_probe_failed;
> -
> -	DLB_LOG_INFO("DLB PF Probe success\n");
> -
> -	return 0;
> -
> -dlb_probe_failed:
> -
> -	DLB_LOG_INFO("DLB PF Probe failed, ret=%d\n", ret);
> -
> -	return ret;
> -}
> -
> -#define EVENTDEV_INTEL_VENDOR_ID 0x8086
> -
> -static const struct rte_pci_id pci_id_dlb_map[] = {
> -	{
> -		RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
> -			       DLB_PF_DEV_ID)
> -	},
> -	{
> -		.vendor_id = 0,
> -	},
> -};
> -
> -static int
> -event_dlb_pci_probe(struct rte_pci_driver *pci_drv,
> -		    struct rte_pci_device *pci_dev)
> -{
> -	return rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
> -		sizeof(struct dlb_eventdev), dlb_eventdev_pci_init,
> -		EVDEV_DLB_NAME_PMD_STR);
> -}
> -
> -static int
> -event_dlb_pci_remove(struct rte_pci_device *pci_dev)
> -{
> -	return rte_event_pmd_pci_remove(pci_dev, NULL);
> -}
> -
> -static struct rte_pci_driver pci_eventdev_dlb_pmd = {
> -	.id_table = pci_id_dlb_map,
> -	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
> -	.probe = event_dlb_pci_probe,
> -	.remove = event_dlb_pci_remove,
> -};
> -
> -RTE_PMD_REGISTER_PCI(event_dlb_pf, pci_eventdev_dlb_pmd);
> -RTE_PMD_REGISTER_PCI_TABLE(event_dlb_pf, pci_id_dlb_map);
> diff --git a/drivers/event/dlb/rte_pmd_dlb.c
> b/drivers/event/dlb/rte_pmd_dlb.c
> deleted file mode 100644
> index 8f56dc306..000000000
> --- a/drivers/event/dlb/rte_pmd_dlb.c
> +++ /dev/null
> @@ -1,38 +0,0 @@
> -/* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2020 Intel Corporation
> - */
> -
> -#include "rte_eventdev.h"
> -#include "eventdev_pmd.h"
> -#include "rte_pmd_dlb.h"
> -#include "dlb_priv.h"
> -#include "dlb_inline_fns.h"
> -
> -int
> -rte_pmd_dlb_set_token_pop_mode(uint8_t dev_id,
> -			       uint8_t port_id,
> -			       enum dlb_token_pop_mode mode)
> -{
> -	struct dlb_eventdev *dlb;
> -	struct rte_eventdev *dev;
> -
> -	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> -	dev = &rte_eventdevs[dev_id];
> -
> -	dlb = dlb_pmd_priv(dev);
> -
> -	if (mode >= NUM_TOKEN_POP_MODES)
> -		return -EINVAL;
> -
> -	/* The event device must be configured, but not yet started */
> -	if (!dlb->configured || dlb->run_state != DLB_RUN_STATE_STOPPED)
> -		return -EINVAL;
> -
> -	/* The token pop mode must be set before configuring the port */
> -	if (port_id >= dlb->num_ports || dlb->ev_ports[port_id].setup_done)
> -		return -EINVAL;
> -
> -	dlb->ev_ports[port_id].qm_port.token_pop_mode = mode;
> -
> -	return 0;
> -}
> diff --git a/drivers/event/dlb/rte_pmd_dlb.h
> b/drivers/event/dlb/rte_pmd_dlb.h
> deleted file mode 100644
> index 9cf6dd338..000000000
> --- a/drivers/event/dlb/rte_pmd_dlb.h
> +++ /dev/null
> @@ -1,77 +0,0 @@
> -/* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2019-2020 Intel Corporation
> - */
> -
> -/*!
> - *  @file      rte_pmd_dlb.h
> - *
> - *  @brief     DLB PMD-specific functions
> - *
> - */
> -
> -#ifndef _RTE_PMD_DLB_H_
> -#define _RTE_PMD_DLB_H_
> -
> -#ifdef __cplusplus
> -extern "C" {
> -#endif
> -
> -#include <stdint.h>
> -
> -/**
> - * @warning
> - * @b EXPERIMENTAL: this API may change, or be removed, without prior
> notice
> - *
> - * Selects the token pop mode for an DLB port.
> - */
> -enum dlb_token_pop_mode {
> -	/* Pop the CQ tokens immediately after dequeueing. */
> -	AUTO_POP,
> -	/* Pop CQ tokens after (dequeue_depth - 1) events are released.
> -	 * Supported on load-balanced ports only.
> -	 */
> -	DELAYED_POP,
> -	/* Pop the CQ tokens during next dequeue operation. */
> -	DEFERRED_POP,
> -
> -	/* NUM_TOKEN_POP_MODES must be last */
> -	NUM_TOKEN_POP_MODES
> -};
> -
> -/*!
> - * @warning
> - * @b EXPERIMENTAL: this API may change, or be removed, without prior
> notice
> - *
> - * Configure the token pop mode for an DLB port. By default, all ports use
> - * AUTO_POP. This function must be called before calling
> rte_event_port_setup()
> - * for the port, but after calling rte_event_dev_configure().
> - *
> - * @note
> - *    The defer_sched vdev arg, which configures all load-balanced ports
> with
> - *    dequeue_depth == 1 for DEFERRED_POP mode, takes precedence over this
> - *    function.
> - *
> - * @param dev_id
> - *    The identifier of the event device.
> - * @param port_id
> - *    The identifier of the event port.
> - * @param mode
> - *    The token pop mode.
> - *
> - * @return
> - * - 0: Success
> - * - EINVAL: Invalid dev_id, port_id, or mode
> - * - EINVAL: The DLB is not configured, is already running, or the port is
> - *   already setup
> - */
> -
> -__rte_experimental
> -int
> -rte_pmd_dlb_set_token_pop_mode(uint8_t dev_id,
> -			       uint8_t port_id,
> -			       enum dlb_token_pop_mode mode);
> -#ifdef __cplusplus
> -}
> -#endif
> -
> -#endif /* _RTE_PMD_DLB_H_ */
> diff --git a/drivers/event/dlb/version.map b/drivers/event/dlb/version.map
> deleted file mode 100644
> index 3338a22c1..000000000
> --- a/drivers/event/dlb/version.map
> +++ /dev/null
> @@ -1,9 +0,0 @@
> -DPDK_21 {
> -	local: *;
> -};
> -
> -EXPERIMENTAL {
> -	global:
> -
> -	rte_pmd_dlb_set_token_pop_mode;
> -};
> diff --git a/drivers/event/meson.build b/drivers/event/meson.build
> index a49288a5d..b7f9bf7c6 100644
> --- a/drivers/event/meson.build
> +++ b/drivers/event/meson.build
> @@ -5,7 +5,7 @@ if is_windows
>  	subdir_done()
>  endif
>
> -drivers = ['dlb', 'dlb2', 'dpaa', 'dpaa2', 'octeontx2', 'opdl', 'skeleton',
> 'sw',
> +drivers = ['dlb2', 'dpaa', 'dpaa2', 'octeontx2', 'opdl', 'skeleton', 'sw',
>  	   'dsw']
>  if not (toolchain == 'gcc' and cc.version().version_compare('<4.8.6') and
>  	dpdk_conf.has('RTE_ARCH_ARM64'))
> --
> 2.23.0
>
>
  

Patch

diff --git a/config/rte_config.h b/config/rte_config.h
index 55a2fc50e..aedb68c42 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -138,12 +138,6 @@ 
 /* QEDE PMD defines */
 #define RTE_LIBRTE_QEDE_FW ""
 
-/* DLB PMD defines */
-#define RTE_LIBRTE_PMD_DLB_POLL_INTERVAL 1000
-#define RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE  0
-#undef RTE_LIBRTE_PMD_DLB_QUELL_STATS
-#define RTE_LIBRTE_PMD_DLB_SW_CREDIT_QUANTA 32
-
 /* DLB2 defines */
 #define RTE_LIBRTE_PMD_DLB2_POLL_INTERVAL 1000
 #define RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE  0
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 748514e24..38376149c 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -55,7 +55,6 @@  The public API headers are grouped by topics:
   [dpaa2_cmdif]        (@ref rte_pmd_dpaa2_cmdif.h),
   [dpaa2_qdma]         (@ref rte_pmd_dpaa2_qdma.h),
   [crypto_scheduler]   (@ref rte_cryptodev_scheduler.h),
-  [dlb]                (@ref rte_pmd_dlb.h),
   [dlb2]               (@ref rte_pmd_dlb2.h)
 
 - **memory**:
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 5c883b613..49d1c3ac4 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -7,7 +7,6 @@  USE_MDFILE_AS_MAINPAGE  = @TOPDIR@/doc/api/doxy-api-index.md
 INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/drivers/bus/vdev \
                           @TOPDIR@/drivers/crypto/scheduler \
-                          @TOPDIR@/drivers/event/dlb \
                           @TOPDIR@/drivers/event/dlb2 \
                           @TOPDIR@/drivers/mempool/dpaa2 \
                           @TOPDIR@/drivers/net/ark \
diff --git a/doc/guides/eventdevs/dlb.rst b/doc/guides/eventdevs/dlb.rst
deleted file mode 100644
index d44afcdcf..000000000
--- a/doc/guides/eventdevs/dlb.rst
+++ /dev/null
@@ -1,341 +0,0 @@ 
-..  SPDX-License-Identifier: BSD-3-Clause
-    Copyright(c) 2020 Intel Corporation.
-
-Driver for the Intel® Dynamic Load Balancer (DLB)
-=================================================
-
-The DPDK dlb poll mode driver supports the Intel® Dynamic Load Balancer.
-
-Prerequisites
--------------
-
-Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup
-the basic DPDK environment.
-
-Configuration
--------------
-
-The DLB PF PMD is a user-space PMD that uses VFIO to gain direct
-device access. To use this operation mode, the PCIe PF device must be bound
-to a DPDK-compatible VFIO driver, such as vfio-pci.
-
-Eventdev API Notes
-------------------
-
-The DLB provides the functions of a DPDK event device; specifically, it
-supports atomic, ordered, and parallel scheduling events from queues to ports.
-However, the DLB hardware is not a perfect match to the eventdev API. Some DLB
-features are abstracted by the PMD (e.g. directed ports), some are only
-accessible as vdev command-line parameters, and certain eventdev features are
-not supported (e.g. the event flow ID is not maintained during scheduling).
-
-In general the dlb PMD is designed for ease-of-use and does not require a
-detailed understanding of the hardware, but these details are important when
-writing high-performance code. This section describes the places where the
-eventdev API and DLB misalign.
-
-Scheduling Domain Configuration
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-There are 32 scheduling domainis the DLB.
-When one is configured, it allocates load-balanced and
-directed queues, ports, credits, and other hardware resources. Some
-resource allocations are user-controlled -- the number of queues, for example
--- and others, like credit pools (one directed and one load-balanced pool per
-scheduling domain), are not.
-
-The DLB is a closed system eventdev, and as such the ``nb_events_limit`` device
-setup argument and the per-port ``new_event_threshold`` argument apply as
-defined in the eventdev header file. The limit is applied to all enqueues,
-regardless of whether it will consume a directed or load-balanced credit.
-
-Reconfiguration
-~~~~~~~~~~~~~~~
-
-The Eventdev API allows one to reconfigure a device, its ports, and its queues
-by first stopping the device, calling the configuration function(s), then
-restarting the device. The DLB does not support configuring an individual queue
-or port without first reconfiguring the entire device, however, so there are
-certain reconfiguration sequences that are valid in the eventdev API but not
-supported by the PMD.
-
-Specifically, the PMD supports the following configuration sequence:
-1. Configure and start the device
-2. Stop the device
-3. (Optional) Reconfigure the device
-4. (Optional) If step 3 is run:
-
-   a. Setup queue(s). The reconfigured queue(s) lose their previous port links.
-   b. The reconfigured port(s) lose their previous queue links.
-
-5. (Optional, only if steps 4a and 4b are run) Link port(s) to queue(s)
-6. Restart the device. If the device is reconfigured in step 3 but one or more
-   of its ports or queues are not, the PMD will apply their previous
-   configuration (including port->queue links) at this time.
-
-The PMD does not support the following configuration sequences:
-1. Configure and start the device
-2. Stop the device
-3. Setup queue or setup port
-4. Start the device
-
-This sequence is not supported because the event device must be reconfigured
-before its ports or queues can be.
-
-Load-Balanced Queues
-~~~~~~~~~~~~~~~~~~~~
-
-A load-balanced queue can support atomic and ordered scheduling, or atomic and
-unordered scheduling, but not atomic and unordered and ordered scheduling. A
-queue's scheduling types are controlled by the event queue configuration.
-
-If the user sets the ``RTE_EVENT_QUEUE_CFG_ALL_TYPES`` flag, the
-``nb_atomic_order_sequences`` determines the supported scheduling types.
-With non-zero ``nb_atomic_order_sequences``, the queue is configured for atomic
-and ordered scheduling. In this case, ``RTE_SCHED_TYPE_PARALLEL`` scheduling is
-supported by scheduling those events as ordered events.  Note that when the
-event is dequeued, its sched_type will be ``RTE_SCHED_TYPE_ORDERED``. Else if
-``nb_atomic_order_sequences`` is zero, the queue is configured for atomic and
-unordered scheduling. In this case, ``RTE_SCHED_TYPE_ORDERED`` is unsupported.
-
-If the ``RTE_EVENT_QUEUE_CFG_ALL_TYPES`` flag is not set, schedule_type
-dictates the queue's scheduling type.
-
-The ``nb_atomic_order_sequences`` queue configuration field sets the ordered
-queue's reorder buffer size.  DLB has 4 groups of ordered queues, where each
-group is configured to contain either 1 queue with 1024 reorder entries, 2
-queues with 512 reorder entries, and so on down to 32 queues with 32 entries.
-
-When a load-balanced queue is created, the PMD will configure a new sequence
-number group on-demand if num_sequence_numbers does not match a pre-existing
-group with available reorder buffer entries. If all sequence number groups are
-in use, no new group will be created and queue configuration will fail. (Note
-that when the PMD is used with a virtual DLB device, it cannot change the
-sequence number configuration.)
-
-The queue's ``nb_atomic_flows`` parameter is ignored by the DLB PMD, because
-the DLB does not limit the number of flows a queue can track. In the DLB, all
-load-balanced queues can use the full 16-bit flow ID range.
-
-Load-balanced and Directed Ports
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-DLB ports come in two flavors: load-balanced and directed. The eventdev API
-does not have the same concept, but it has a similar one: ports and queues that
-are singly-linked (i.e. linked to a single queue or port, respectively).
-
-The ``rte_event_dev_info_get()`` function reports the number of available
-event ports and queues (among other things). For the DLB PMD, max_event_ports
-and max_event_queues report the number of available load-balanced ports and
-queues, and max_single_link_event_port_queue_pairs reports the number of
-available directed ports and queues.
-
-When a scheduling domain is created in ``rte_event_dev_configure()``, the user
-specifies ``nb_event_ports`` and ``nb_single_link_event_port_queues``, which
-control the total number of ports (load-balanced and directed) and the number
-of directed ports. Hence, the number of requested load-balanced ports is
-``nb_event_ports - nb_single_link_event_ports``. The ``nb_event_queues`` field
-specifies the total number of queues (load-balanced and directed). The number
-of directed queues comes from ``nb_single_link_event_port_queues``, since
-directed ports and queues come in pairs.
-
-When a port is setup, the ``RTE_EVENT_PORT_CFG_SINGLE_LINK`` flag determines
-whether it should be configured as a directed (the flag is set) or a
-load-balanced (the flag is unset) port. Similarly, the
-``RTE_EVENT_QUEUE_CFG_SINGLE_LINK`` queue configuration flag controls
-whether it is a directed or load-balanced queue.
-
-Load-balanced ports can only be linked to load-balanced queues, and directed
-ports can only be linked to directed queues. Furthermore, directed ports can
-only be linked to a single directed queue (and vice versa), and that link
-cannot change after the eventdev is started.
-
-The eventdev API does not have a directed scheduling type. To support directed
-traffic, the dlb PMD detects when an event is being sent to a directed queue
-and overrides its scheduling type. Note that the originally selected scheduling
-type (atomic, ordered, or parallel) is not preserved, and an event's sched_type
-will be set to ``RTE_SCHED_TYPE_ATOMIC`` when it is dequeued from a directed
-port.
-
-Flow ID
-~~~~~~~
-
-The flow ID field is not preserved in the event when it is scheduled in the
-DLB, because the DLB hardware control word format does not have sufficient
-space to preserve every event field. As a result, the flow ID specified with
-the enqueued event will not be in the dequeued event. If this field is
-required, the application should pass it through an out-of-band path (for
-example in the mbuf's udata64 field, if the event points to an mbuf) or
-reconstruct the flow ID after receiving the event.
-
-Also, the DLB hardware control word supports a 16-bit flow ID. Since struct
-rte_event's flow_id field is 20 bits, the DLB PMD drops the most significant
-four bits from the event's flow ID.
-
-Hardware Credits
-~~~~~~~~~~~~~~~~
-
-DLB uses a hardware credit scheme to prevent software from overflowing hardware
-event storage, with each unit of storage represented by a credit. A port spends
-a credit to enqueue an event, and hardware refills the ports with credits as the
-events are scheduled to ports. Refills come from credit pools, and each port is
-a member of a load-balanced credit pool and a directed credit pool. The
-load-balanced credits are used to enqueue to load-balanced queues, and directed
-credits are used for directed queues.
-
-A DLB eventdev contains one load-balanced and one directed credit pool. These
-pools' sizes are controlled by the nb_events_limit field in struct
-rte_event_dev_config. The load-balanced pool is sized to contain
-nb_events_limit credits, and the directed pool is sized to contain
-nb_events_limit/4 credits. The directed pool size can be overridden with the
-num_dir_credits vdev argument, like so:
-
-    .. code-block:: console
-
-       --vdev=dlb1_event,num_dir_credits=<value>
-
-This can be used if the default allocation is too low or too high for the
-specific application needs. The PMD also supports a vdev arg that limits the
-max_num_events reported by rte_event_dev_info_get():
-
-    .. code-block:: console
-
-       --vdev=dlb1_event,max_num_events=<value>
-
-By default, max_num_events is reported as the total available load-balanced
-credits. If multiple DLB-based applications are being used, it may be desirable
-to control how many load-balanced credits each application uses, particularly
-when application(s) are written to configure nb_events_limit equal to the
-reported max_num_events.
-
-Each port is a member of both credit pools. A port's credit allocation is
-defined by its low watermark, high watermark, and refill quanta. These three
-parameters are calculated by the dlb PMD like so:
-
-- The load-balanced high watermark is set to the port's enqueue_depth.
-  The directed high watermark is set to the minimum of the enqueue_depth and
-  the directed pool size divided by the total number of ports.
-- The refill quanta is set to half the high watermark.
-- The low watermark is set to the minimum of 16 and the refill quanta.
-
-When the eventdev is started, each port is pre-allocated a high watermark's
-worth of credits. For example, if an eventdev contains four ports with enqueue
-depths of 32 and a load-balanced credit pool size of 4096, each port will start
-with 32 load-balanced credits, and there will be 3968 credits available to
-replenish the ports. Thus, a single port is not capable of enqueueing up to the
-nb_events_limit (without any events being dequeued), since the other ports are
-retaining their initial credit allocation; in short, all ports must enqueue in
-order to reach the limit.
-
-If a port attempts to enqueue and has no credits available, the enqueue
-operation will fail and the application must retry the enqueue. Credits are
-replenished asynchronously by the DLB hardware.
-
-Software Credits
-~~~~~~~~~~~~~~~~
-
-The DLB is a "closed system" event dev, and the DLB PMD layers a software
-credit scheme on top of the hardware credit scheme in order to comply with
-the per-port backpressure described in the eventdev API.
-
-The DLB's hardware scheme is local to a queue/pipeline stage: a port spends a
-credit when it enqueues to a queue, and credits are later replenished after the
-events are dequeued and released.
-
-In the software credit scheme, a credit is consumed when a new (.op =
-RTE_EVENT_OP_NEW) event is injected into the system, and the credit is
-replenished when the event is released from the system (either explicitly with
-RTE_EVENT_OP_RELEASE or implicitly in dequeue_burst()).
-
-In this model, an event is "in the system" from its first enqueue into eventdev
-until it is last dequeued. If the event goes through multiple event queues, it
-is still considered "in the system" while a worker thread is processing it.
-
-A port will fail to enqueue if the number of events in the system exceeds its
-``new_event_threshold`` (specified at port setup time). A port will also fail
-to enqueue if it lacks enough hardware credits to enqueue; load-balanced
-credits are used to enqueue to a load-balanced queue, and directed credits are
-used to enqueue to a directed queue.
-
-The out-of-credit situations are typically transient, and an eventdev
-application using the DLB ought to retry its enqueues if they fail.
-If enqueue fails, DLB PMD sets rte_errno as follows:
-
-- -ENOSPC: Credit exhaustion (either hardware or software)
-- -EINVAL: Invalid argument, such as port ID, queue ID, or sched_type.
-
-Depending on the pipeline the application has constructed, it's possible to
-enter a credit deadlock scenario wherein the worker thread lacks the credit
-to enqueue an event, and it must dequeue an event before it can recover the
-credit. If the worker thread retries its enqueue indefinitely, it will not
-make forward progress. Such deadlock is possible if the application has event
-"loops", in which an event in dequeued from queue A and later enqueued back to
-queue A.
-
-Due to this, workers should stop retrying after a time, release the events it
-is attempting to enqueue, and dequeue more events. It is important that the
-worker release the events and don't simply set them aside to retry the enqueue
-again later, because the port has limited history list size (by default, twice
-the port's dequeue_depth).
-
-Priority
-~~~~~~~~
-
-The DLB supports event priority and per-port queue service priority, as
-described in the eventdev header file. The DLB does not support 'global' event
-queue priority established at queue creation time.
-
-DLB supports 8 event and queue service priority levels. For both priority
-types, the PMD uses the upper three bits of the priority field to determine the
-DLB priority, discarding the 5 least significant bits. The 5 least significant
-event priority bits are not preserved when an event is enqueued.
-
-Atomic Inflights Allocation
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-In the last stage prior to scheduling an atomic event to a CQ, DLB holds the
-inflight event in a temporary buffer that is divided among load-balanced
-queues. If a queue's atomic buffer storage fills up, this can result in
-head-of-line-blocking. For example:
-
-- An LDB queue allocated N atomic buffer entries
-- All N entries are filled with events from flow X, which is pinned to CQ 0.
-
-Until CQ 0 releases 1+ events, no other atomic flows for that LDB queue can be
-scheduled. The likelihood of this case depends on the eventdev configuration,
-traffic behavior, event processing latency, potential for a worker to be
-interrupted or otherwise delayed, etc.
-
-By default, the PMD allocates 16 buffer entries for each load-balanced queue,
-which provides an even division across all 128 queues but potentially wastes
-buffer space (e.g. if not all queues are used, or aren't used for atomic
-scheduling).
-
-The PMD provides a dev arg to override the default per-queue allocation. To
-increase a vdev's per-queue atomic-inflight allocation to (for example) 64:
-
-    .. code-block:: console
-
-       --vdev=dlb1_event,atm_inflights=64
-
-Deferred Scheduling
-~~~~~~~~~~~~~~~~~~~
-
-The DLB PMD's default behavior for managing a CQ is to "pop" the CQ once per
-dequeued event before returning from rte_event_dequeue_burst(). This frees the
-corresponding entries in the CQ, which enables the DLB to schedule more events
-to it.
-
-To support applications seeking finer-grained scheduling control -- for example
-deferring scheduling to get the best possible priority scheduling and
-load-balancing -- the PMD supports a deferred scheduling mode. In this mode,
-the CQ entry is not popped until the *subsequent* rte_event_dequeue_burst()
-call. This mode only applies to load-balanced event ports with dequeue depth of
-1.
-
-To enable deferred scheduling, use the defer_sched vdev argument like so:
-
-    .. code-block:: console
-
-       --vdev=dlb1_event,defer_sched=on
-
diff --git a/doc/guides/eventdevs/index.rst b/doc/guides/eventdevs/index.rst
index f5b69b39d..738788d9e 100644
--- a/doc/guides/eventdevs/index.rst
+++ b/doc/guides/eventdevs/index.rst
@@ -11,7 +11,6 @@  application through the eventdev API.
     :maxdepth: 2
     :numbered:
 
-    dlb
     dlb2
     dpaa
     dpaa2
diff --git a/doc/guides/rel_notes/release_21_05.rst b/doc/guides/rel_notes/release_21_05.rst
index 5aa9ed7db..07d12a3af 100644
--- a/doc/guides/rel_notes/release_21_05.rst
+++ b/doc/guides/rel_notes/release_21_05.rst
@@ -68,6 +68,9 @@  Removed Items
    Also, make sure to start the actual text at the margin.
    =======================================================
 
+* Removed support for DLB V1 hardware.  This is not a broad market device,
+  and existing customers already obtain the source code directly from Intel.
+
 
 API Changes
 -----------
diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c
deleted file mode 100644
index 8b26d1d2d..000000000
--- a/drivers/event/dlb/dlb.c
+++ /dev/null
@@ -1,4082 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#include <assert.h>
-#include <errno.h>
-#include <nmmintrin.h>
-#include <pthread.h>
-#include <stdbool.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <string.h>
-#include <sys/fcntl.h>
-#include <sys/mman.h>
-#include <unistd.h>
-
-#include <rte_common.h>
-#include <rte_config.h>
-#include <rte_cycles.h>
-#include <rte_debug.h>
-#include <rte_dev.h>
-#include <rte_errno.h>
-#include <rte_io.h>
-#include <rte_kvargs.h>
-#include <rte_log.h>
-#include <rte_malloc.h>
-#include <rte_mbuf.h>
-#include <rte_power_intrinsics.h>
-#include <rte_prefetch.h>
-#include <rte_ring.h>
-#include <rte_string_fns.h>
-
-#include <rte_eventdev.h>
-#include <eventdev_pmd.h>
-
-#include "dlb_priv.h"
-#include "dlb_iface.h"
-#include "dlb_inline_fns.h"
-
-/*
- * Resources exposed to eventdev.
- */
-#if (RTE_EVENT_MAX_QUEUES_PER_DEV > UINT8_MAX)
-#error "RTE_EVENT_MAX_QUEUES_PER_DEV cannot fit in member max_event_queues"
-#endif
-static struct rte_event_dev_info evdev_dlb_default_info = {
-	.driver_name = "", /* probe will set */
-	.min_dequeue_timeout_ns = DLB_MIN_DEQUEUE_TIMEOUT_NS,
-	.max_dequeue_timeout_ns = DLB_MAX_DEQUEUE_TIMEOUT_NS,
-#if (RTE_EVENT_MAX_QUEUES_PER_DEV < DLB_MAX_NUM_LDB_QUEUES)
-	.max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
-#else
-	.max_event_queues = DLB_MAX_NUM_LDB_QUEUES,
-#endif
-	.max_event_queue_flows = DLB_MAX_NUM_FLOWS,
-	.max_event_queue_priority_levels = DLB_QID_PRIORITIES,
-	.max_event_priority_levels = DLB_QID_PRIORITIES,
-	.max_event_ports = DLB_MAX_NUM_LDB_PORTS,
-	.max_event_port_dequeue_depth = DLB_MAX_CQ_DEPTH,
-	.max_event_port_enqueue_depth = DLB_MAX_ENQUEUE_DEPTH,
-	.max_event_port_links = DLB_MAX_NUM_QIDS_PER_LDB_CQ,
-	.max_num_events = DLB_MAX_NUM_LDB_CREDITS,
-	.max_single_link_event_port_queue_pairs = DLB_MAX_NUM_DIR_PORTS,
-	.event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
-			  RTE_EVENT_DEV_CAP_EVENT_QOS |
-			  RTE_EVENT_DEV_CAP_BURST_MODE |
-			  RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
-			  RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
-			  RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES),
-};
-
-struct process_local_port_data
-dlb_port[DLB_MAX_NUM_PORTS][NUM_DLB_PORT_TYPES];
-
-static inline uint16_t
-dlb_event_enqueue_delayed(void *event_port,
-			  const struct rte_event events[]);
-
-static inline uint16_t
-dlb_event_enqueue_burst_delayed(void *event_port,
-				const struct rte_event events[],
-				uint16_t num);
-
-static inline uint16_t
-dlb_event_enqueue_new_burst_delayed(void *event_port,
-				    const struct rte_event events[],
-				    uint16_t num);
-
-static inline uint16_t
-dlb_event_enqueue_forward_burst_delayed(void *event_port,
-					const struct rte_event events[],
-					uint16_t num);
-
-static int
-dlb_hw_query_resources(struct dlb_eventdev *dlb)
-{
-	struct dlb_hw_dev *handle = &dlb->qm_instance;
-	struct dlb_hw_resource_info *dlb_info = &handle->info;
-	int ret;
-
-	ret = dlb_iface_get_num_resources(handle,
-					  &dlb->hw_rsrc_query_results);
-	if (ret) {
-		DLB_LOG_ERR("get dlb num resources, err=%d\n", ret);
-		return ret;
-	}
-
-	/* Complete filling in device resource info returned to evdev app,
-	 * overriding any default values.
-	 * The capabilities (CAPs) were set at compile time.
-	 */
-
-	evdev_dlb_default_info.max_event_queues =
-		dlb->hw_rsrc_query_results.num_ldb_queues;
-
-	evdev_dlb_default_info.max_event_ports =
-		dlb->hw_rsrc_query_results.num_ldb_ports;
-
-	evdev_dlb_default_info.max_num_events =
-		dlb->hw_rsrc_query_results.max_contiguous_ldb_credits;
-
-	/* Save off values used when creating the scheduling domain. */
-
-	handle->info.num_sched_domains =
-		dlb->hw_rsrc_query_results.num_sched_domains;
-
-	handle->info.hw_rsrc_max.nb_events_limit =
-		dlb->hw_rsrc_query_results.max_contiguous_ldb_credits;
-
-	handle->info.hw_rsrc_max.num_queues =
-		dlb->hw_rsrc_query_results.num_ldb_queues +
-		dlb->hw_rsrc_query_results.num_dir_ports;
-
-	handle->info.hw_rsrc_max.num_ldb_queues =
-		dlb->hw_rsrc_query_results.num_ldb_queues;
-
-	handle->info.hw_rsrc_max.num_ldb_ports =
-		dlb->hw_rsrc_query_results.num_ldb_ports;
-
-	handle->info.hw_rsrc_max.num_dir_ports =
-		dlb->hw_rsrc_query_results.num_dir_ports;
-
-	handle->info.hw_rsrc_max.reorder_window_size =
-		dlb->hw_rsrc_query_results.num_hist_list_entries;
-
-	rte_memcpy(dlb_info, &handle->info.hw_rsrc_max, sizeof(*dlb_info));
-
-	return 0;
-}
-
-static void
-dlb_free_qe_mem(struct dlb_port *qm_port)
-{
-	if (qm_port == NULL)
-		return;
-
-	rte_free(qm_port->qe4);
-	qm_port->qe4 = NULL;
-
-	rte_free(qm_port->consume_qe);
-	qm_port->consume_qe = NULL;
-
-	rte_memzone_free(dlb_port[qm_port->id][PORT_TYPE(qm_port)].mz);
-	dlb_port[qm_port->id][PORT_TYPE(qm_port)].mz = NULL;
-}
-
-static int
-dlb_init_consume_qe(struct dlb_port *qm_port, char *mz_name)
-{
-	struct dlb_cq_pop_qe *qe;
-
-	qe = rte_zmalloc(mz_name,
-			DLB_NUM_QES_PER_CACHE_LINE *
-				sizeof(struct dlb_cq_pop_qe),
-			RTE_CACHE_LINE_SIZE);
-
-	if (qe == NULL)	{
-		DLB_LOG_ERR("dlb: no memory for consume_qe\n");
-		return -ENOMEM;
-	}
-
-	qm_port->consume_qe = qe;
-
-	qe->qe_valid = 0;
-	qe->qe_frag = 0;
-	qe->qe_comp = 0;
-	qe->cq_token = 1;
-	/* Tokens value is 0-based; i.e. '0' returns 1 token, '1' returns 2,
-	 * and so on.
-	 */
-	qe->tokens = 0;	/* set at run time */
-	qe->meas_lat = 0;
-	qe->no_dec = 0;
-	/* Completion IDs are disabled */
-	qe->cmp_id = 0;
-
-	return 0;
-}
-
-static int
-dlb_init_qe_mem(struct dlb_port *qm_port, char *mz_name)
-{
-	int ret, sz;
-
-	sz = DLB_NUM_QES_PER_CACHE_LINE * sizeof(struct dlb_enqueue_qe);
-
-	qm_port->qe4 = rte_zmalloc(mz_name, sz, RTE_CACHE_LINE_SIZE);
-
-	if (qm_port->qe4 == NULL) {
-		DLB_LOG_ERR("dlb: no qe4 memory\n");
-		ret = -ENOMEM;
-		goto error_exit;
-	}
-
-	ret = dlb_init_consume_qe(qm_port, mz_name);
-	if (ret < 0) {
-		DLB_LOG_ERR("dlb: dlb_init_consume_qe ret=%d\n", ret);
-		goto error_exit;
-	}
-
-	return 0;
-
-error_exit:
-
-	dlb_free_qe_mem(qm_port);
-
-	return ret;
-}
-
-/* Wrapper for string to int conversion. Substituted for atoi(...), which is
- * unsafe.
- */
-#define DLB_BASE_10 10
-
-static int
-dlb_string_to_int(int *result, const char *str)
-{
-	long ret;
-	char *endstr;
-
-	if (str == NULL || result == NULL)
-		return -EINVAL;
-
-	errno = 0;
-	ret = strtol(str, &endstr, DLB_BASE_10);
-	if (errno)
-		return -errno;
-
-	/* long int and int may be different width for some architectures */
-	if (ret < INT_MIN || ret > INT_MAX || endstr == str)
-		return -EINVAL;
-
-	*result = ret;
-	return 0;
-}
-
-static int
-set_numa_node(const char *key __rte_unused, const char *value, void *opaque)
-{
-	int *socket_id = opaque;
-	int ret;
-
-	ret = dlb_string_to_int(socket_id, value);
-	if (ret < 0)
-		return ret;
-
-	if (*socket_id > RTE_MAX_NUMA_NODES)
-		return -EINVAL;
-
-	return 0;
-}
-
-static int
-set_max_num_events(const char *key __rte_unused,
-		   const char *value,
-		   void *opaque)
-{
-	int *max_num_events = opaque;
-	int ret;
-
-	if (value == NULL || opaque == NULL) {
-		DLB_LOG_ERR("NULL pointer\n");
-		return -EINVAL;
-	}
-
-	ret = dlb_string_to_int(max_num_events, value);
-	if (ret < 0)
-		return ret;
-
-	if (*max_num_events < 0 || *max_num_events > DLB_MAX_NUM_LDB_CREDITS) {
-		DLB_LOG_ERR("dlb: max_num_events must be between 0 and %d\n",
-			    DLB_MAX_NUM_LDB_CREDITS);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int
-set_num_dir_credits(const char *key __rte_unused,
-		    const char *value,
-		    void *opaque)
-{
-	int *num_dir_credits = opaque;
-	int ret;
-
-	if (value == NULL || opaque == NULL) {
-		DLB_LOG_ERR("NULL pointer\n");
-		return -EINVAL;
-	}
-
-	ret = dlb_string_to_int(num_dir_credits, value);
-	if (ret < 0)
-		return ret;
-
-	if (*num_dir_credits < 0 ||
-	    *num_dir_credits > DLB_MAX_NUM_DIR_CREDITS) {
-		DLB_LOG_ERR("dlb: num_dir_credits must be between 0 and %d\n",
-			    DLB_MAX_NUM_DIR_CREDITS);
-		return -EINVAL;
-	}
-	return 0;
-}
-
-/* VDEV-only notes:
- * This function first unmaps all memory mappings and closes the
- * domain's file descriptor, which causes the driver to reset the
- * scheduling domain. Once that completes (when close() returns), we
- * can safely free the dynamically allocated memory used by the
- * scheduling domain.
- *
- * PF-only notes:
- * We will maintain a use count and use that to determine when
- * a reset is required.  In PF mode, we never mmap, or munmap
- * device memory,  and we own the entire physical PCI device.
- */
-
-static void
-dlb_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig)
-{
-	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
-	enum dlb_configuration_state config_state;
-	int i, j;
-
-	/* Close and reset the domain */
-	dlb_iface_domain_close(dlb);
-
-	/* Free all dynamically allocated port memory */
-	for (i = 0; i < dlb->num_ports; i++)
-		dlb_free_qe_mem(&dlb->ev_ports[i].qm_port);
-
-	/* If reconfiguring, mark the device's queues and ports as "previously
-	 * configured." If the user does not reconfigure them, the PMD will
-	 * reapply their previous configuration when the device is started.
-	 */
-	config_state = (reconfig) ? DLB_PREV_CONFIGURED : DLB_NOT_CONFIGURED;
-
-	for (i = 0; i < dlb->num_ports; i++) {
-		dlb->ev_ports[i].qm_port.config_state = config_state;
-		/* Reset setup_done so ports can be reconfigured */
-		dlb->ev_ports[i].setup_done = false;
-		for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++)
-			dlb->ev_ports[i].link[j].mapped = false;
-	}
-
-	for (i = 0; i < dlb->num_queues; i++)
-		dlb->ev_queues[i].qm_queue.config_state = config_state;
-
-	for (i = 0; i < DLB_MAX_NUM_QUEUES; i++)
-		dlb->ev_queues[i].setup_done = false;
-
-	dlb->num_ports = 0;
-	dlb->num_ldb_ports = 0;
-	dlb->num_dir_ports = 0;
-	dlb->num_queues = 0;
-	dlb->num_ldb_queues = 0;
-	dlb->num_dir_queues = 0;
-	dlb->configured = false;
-}
-
-static int
-dlb_ldb_credit_pool_create(struct dlb_hw_dev *handle)
-{
-	struct dlb_create_ldb_pool_args cfg;
-	struct dlb_cmd_response response;
-	int ret;
-
-	if (handle == NULL)
-		return -EINVAL;
-
-	if (!handle->cfg.resources.num_ldb_credits) {
-		handle->cfg.ldb_credit_pool_id = 0;
-		handle->cfg.num_ldb_credits = 0;
-		return 0;
-	}
-
-	cfg.response = (uintptr_t)&response;
-	cfg.num_ldb_credits = handle->cfg.resources.num_ldb_credits;
-
-	ret = dlb_iface_ldb_credit_pool_create(handle,
-					       &cfg);
-	if (ret < 0) {
-		DLB_LOG_ERR("dlb: ldb_credit_pool_create ret=%d (driver status: %s)\n",
-			    ret, dlb_error_strings[response.status]);
-	}
-
-	handle->cfg.ldb_credit_pool_id = response.id;
-	handle->cfg.num_ldb_credits = cfg.num_ldb_credits;
-
-	return ret;
-}
-
-static int
-dlb_dir_credit_pool_create(struct dlb_hw_dev *handle)
-{
-	struct dlb_create_dir_pool_args cfg;
-	struct dlb_cmd_response response;
-	int ret;
-
-	if (handle == NULL)
-		return -EINVAL;
-
-	if (!handle->cfg.resources.num_dir_credits) {
-		handle->cfg.dir_credit_pool_id = 0;
-		handle->cfg.num_dir_credits = 0;
-		return 0;
-	}
-
-	cfg.response = (uintptr_t)&response;
-	cfg.num_dir_credits = handle->cfg.resources.num_dir_credits;
-
-	ret = dlb_iface_dir_credit_pool_create(handle, &cfg);
-	if (ret < 0)
-		DLB_LOG_ERR("dlb: dir_credit_pool_create ret=%d (driver status: %s)\n",
-			    ret, dlb_error_strings[response.status]);
-
-	handle->cfg.dir_credit_pool_id = response.id;
-	handle->cfg.num_dir_credits = cfg.num_dir_credits;
-
-	return ret;
-}
-
-static int
-dlb_hw_create_sched_domain(struct dlb_hw_dev *handle,
-			   struct dlb_eventdev *dlb,
-			   const struct dlb_hw_rsrcs *resources_asked)
-{
-	int ret = 0;
-	struct dlb_create_sched_domain_args *config_params;
-	struct dlb_cmd_response response;
-
-	if (resources_asked == NULL) {
-		DLB_LOG_ERR("dlb: dlb_create NULL parameter\n");
-		ret = EINVAL;
-		goto error_exit;
-	}
-
-	/* Map generic qm resources to dlb resources */
-	config_params = &handle->cfg.resources;
-
-	config_params->response = (uintptr_t)&response;
-
-	/* DIR ports and queues */
-
-	config_params->num_dir_ports =
-		resources_asked->num_dir_ports;
-
-	config_params->num_dir_credits =
-		resources_asked->num_dir_credits;
-
-	/* LDB ports and queues */
-
-	config_params->num_ldb_queues =
-		resources_asked->num_ldb_queues;
-
-	config_params->num_ldb_ports =
-		resources_asked->num_ldb_ports;
-
-	config_params->num_ldb_credits =
-		resources_asked->num_ldb_credits;
-
-	config_params->num_atomic_inflights =
-		dlb->num_atm_inflights_per_queue *
-		config_params->num_ldb_queues;
-
-	config_params->num_hist_list_entries = config_params->num_ldb_ports *
-		DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
-
-	/* dlb limited to 1 credit pool per queue type */
-	config_params->num_ldb_credit_pools = 1;
-	config_params->num_dir_credit_pools = 1;
-
-	DLB_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d, ldb_cred_pools=%d, dir-credit_pools=%d\n",
-		    config_params->num_ldb_queues,
-		    config_params->num_ldb_ports,
-		    config_params->num_dir_ports,
-		    config_params->num_atomic_inflights,
-		    config_params->num_hist_list_entries,
-		    config_params->num_ldb_credits,
-		    config_params->num_dir_credits,
-		    config_params->num_ldb_credit_pools,
-		    config_params->num_dir_credit_pools);
-
-	/* Configure the QM */
-
-	ret = dlb_iface_sched_domain_create(handle, config_params);
-	if (ret < 0) {
-		DLB_LOG_ERR("dlb: domain create failed, device_id = %d, (driver ret = %d, extra status: %s)\n",
-			    handle->device_id,
-			    ret,
-			    dlb_error_strings[response.status]);
-		goto error_exit;
-	}
-
-	handle->domain_id = response.id;
-	handle->domain_id_valid = 1;
-
-	config_params->response = 0;
-
-	ret = dlb_ldb_credit_pool_create(handle);
-	if (ret < 0) {
-		DLB_LOG_ERR("dlb: create ldb credit pool failed\n");
-		goto error_exit2;
-	}
-
-	ret = dlb_dir_credit_pool_create(handle);
-	if (ret < 0) {
-		DLB_LOG_ERR("dlb: create dir credit pool failed\n");
-		goto error_exit2;
-	}
-
-	handle->cfg.configured = true;
-
-	return 0;
-
-error_exit2:
-	dlb_iface_domain_close(dlb);
-
-error_exit:
-	return ret;
-}
-
-/* End HW specific */
-static void
-dlb_eventdev_info_get(struct rte_eventdev *dev,
-		      struct rte_event_dev_info *dev_info)
-{
-	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
-	int ret;
-
-	ret = dlb_hw_query_resources(dlb);
-	if (ret) {
-		const struct rte_eventdev_data *data = dev->data;
-
-		DLB_LOG_ERR("get resources err=%d, devid=%d\n",
-			    ret, data->dev_id);
-		/* fn is void, so fall through and return values set up in
-		 * probe
-		 */
-	}
-
-	/* Add num resources currently owned by this domain.
-	 * These would become available if the scheduling domain were reset due
-	 * to the application recalling eventdev_configure to *reconfigure* the
-	 * domain.
-	 */
-	evdev_dlb_default_info.max_event_ports += dlb->num_ldb_ports;
-	evdev_dlb_default_info.max_event_queues += dlb->num_ldb_queues;
-	evdev_dlb_default_info.max_num_events += dlb->num_ldb_credits;
-
-	/* In DLB A-stepping hardware, applications are limited to 128
-	 * configured ports (load-balanced or directed). The reported number of
-	 * available ports must reflect this.
-	 */
-	if (dlb->revision < DLB_REV_B0) {
-		int used_ports;
-
-		used_ports = DLB_MAX_NUM_LDB_PORTS + DLB_MAX_NUM_DIR_PORTS -
-			dlb->hw_rsrc_query_results.num_ldb_ports -
-			dlb->hw_rsrc_query_results.num_dir_ports;
-
-		evdev_dlb_default_info.max_event_ports =
-			RTE_MIN(evdev_dlb_default_info.max_event_ports,
-				128 - used_ports);
-	}
-
-	evdev_dlb_default_info.max_event_queues =
-		RTE_MIN(evdev_dlb_default_info.max_event_queues,
-			RTE_EVENT_MAX_QUEUES_PER_DEV);
-
-	evdev_dlb_default_info.max_num_events =
-		RTE_MIN(evdev_dlb_default_info.max_num_events,
-			dlb->max_num_events_override);
-
-	*dev_info = evdev_dlb_default_info;
-}
-
-/* Note: 1 QM instance per QM device, QM instance/device == event device */
-static int
-dlb_eventdev_configure(const struct rte_eventdev *dev)
-{
-	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
-	struct dlb_hw_dev *handle = &dlb->qm_instance;
-	struct dlb_hw_rsrcs *rsrcs = &handle->info.hw_rsrc_max;
-	const struct rte_eventdev_data *data = dev->data;
-	const struct rte_event_dev_config *config = &data->dev_conf;
-	int ret;
-
-	/* If this eventdev is already configured, we must release the current
-	 * scheduling domain before attempting to configure a new one.
-	 */
-	if (dlb->configured) {
-		dlb_hw_reset_sched_domain(dev, true);
-
-		ret = dlb_hw_query_resources(dlb);
-		if (ret) {
-			DLB_LOG_ERR("get resources err=%d, devid=%d\n",
-				    ret, data->dev_id);
-			return ret;
-		}
-	}
-
-	if (config->nb_event_queues > rsrcs->num_queues) {
-		DLB_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).\n",
-			    config->nb_event_queues,
-			    rsrcs->num_queues);
-		return -EINVAL;
-	}
-	if (config->nb_event_ports > (rsrcs->num_ldb_ports
-			+ rsrcs->num_dir_ports)) {
-		DLB_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).\n",
-			    config->nb_event_ports,
-			    (rsrcs->num_ldb_ports + rsrcs->num_dir_ports));
-		return -EINVAL;
-	}
-	if (config->nb_events_limit > rsrcs->nb_events_limit) {
-		DLB_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).\n",
-			    config->nb_events_limit,
-			    rsrcs->nb_events_limit);
-		return -EINVAL;
-	}
-
-	if (config->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
-		dlb->global_dequeue_wait = false;
-	else {
-		uint32_t timeout32;
-
-		dlb->global_dequeue_wait = true;
-
-		timeout32 = config->dequeue_timeout_ns;
-
-		dlb->global_dequeue_wait_ticks =
-			timeout32 * (rte_get_timer_hz() / 1E9);
-	}
-
-	/* Does this platform support umonitor/umwait? */
-	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_WAITPKG)) {
-		if (RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE != 0 &&
-		    RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE != 1) {
-			DLB_LOG_ERR("invalid value (%d) for RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE must be 0 or 1.\n",
-				    RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE);
-			return -EINVAL;
-		}
-		dlb->umwait_allowed = true;
-	}
-
-	rsrcs->num_dir_ports = config->nb_single_link_event_port_queues;
-	rsrcs->num_ldb_ports = config->nb_event_ports - rsrcs->num_dir_ports;
-	/* 1 dir queue per dir port */
-	rsrcs->num_ldb_queues = config->nb_event_queues - rsrcs->num_dir_ports;
-
-	/* Scale down nb_events_limit by 4 for directed credits, since there
-	 * are 4x as many load-balanced credits.
-	 */
-	rsrcs->num_ldb_credits = 0;
-	rsrcs->num_dir_credits = 0;
-
-	if (rsrcs->num_ldb_queues)
-		rsrcs->num_ldb_credits = config->nb_events_limit;
-	if (rsrcs->num_dir_ports)
-		rsrcs->num_dir_credits = config->nb_events_limit / 4;
-	if (dlb->num_dir_credits_override != -1)
-		rsrcs->num_dir_credits = dlb->num_dir_credits_override;
-
-	if (dlb_hw_create_sched_domain(handle, dlb, rsrcs) < 0) {
-		DLB_LOG_ERR("dlb_hw_create_sched_domain failed\n");
-		return -ENODEV;
-	}
-
-	dlb->new_event_limit = config->nb_events_limit;
-	__atomic_store_n(&dlb->inflights, 0, __ATOMIC_SEQ_CST);
-
-	/* Save number of ports/queues for this event dev */
-	dlb->num_ports = config->nb_event_ports;
-	dlb->num_queues = config->nb_event_queues;
-	dlb->num_dir_ports = rsrcs->num_dir_ports;
-	dlb->num_ldb_ports = dlb->num_ports - dlb->num_dir_ports;
-	dlb->num_ldb_queues = dlb->num_queues - dlb->num_dir_ports;
-	dlb->num_dir_queues = dlb->num_dir_ports;
-	dlb->num_ldb_credits = rsrcs->num_ldb_credits;
-	dlb->num_dir_credits = rsrcs->num_dir_credits;
-
-	dlb->configured = true;
-
-	return 0;
-}
-
-static int16_t
-dlb_hw_unmap_ldb_qid_from_port(struct dlb_hw_dev *handle,
-			       uint32_t qm_port_id,
-			       uint16_t qm_qid)
-{
-	struct dlb_unmap_qid_args cfg;
-	struct dlb_cmd_response response;
-	int32_t ret;
-
-	if (handle == NULL)
-		return -EINVAL;
-
-	cfg.response = (uintptr_t)&response;
-	cfg.port_id = qm_port_id;
-	cfg.qid = qm_qid;
-
-	ret = dlb_iface_unmap_qid(handle, &cfg);
-	if (ret < 0)
-		DLB_LOG_ERR("dlb: unmap qid error, ret=%d (driver status: %s)\n",
-			    ret, dlb_error_strings[response.status]);
-
-	return ret;
-}
-
-static int
-dlb_event_queue_detach_ldb(struct dlb_eventdev *dlb,
-			   struct dlb_eventdev_port *ev_port,
-			   struct dlb_eventdev_queue *ev_queue)
-{
-	int ret, i;
-
-	/* Don't unlink until start time. */
-	if (dlb->run_state == DLB_RUN_STATE_STOPPED)
-		return 0;
-
-	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
-		if (ev_port->link[i].valid &&
-		    ev_port->link[i].queue_id == ev_queue->id)
-			break; /* found */
-	}
-
-	/* This is expected with eventdev API!
-	 * It blindly attempts to unmap all queues.
-	 */
-	if (i == DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
-		DLB_LOG_DBG("dlb: ignoring LB QID %d not mapped for qm_port %d.\n",
-			    ev_queue->qm_queue.id,
-			    ev_port->qm_port.id);
-		return 0;
-	}
-
-	ret = dlb_hw_unmap_ldb_qid_from_port(&dlb->qm_instance,
-					     ev_port->qm_port.id,
-					     ev_queue->qm_queue.id);
-	if (!ret)
-		ev_port->link[i].mapped = false;
-
-	return ret;
-}
-
-static int
-dlb_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port,
-			 uint8_t queues[], uint16_t nb_unlinks)
-{
-	struct dlb_eventdev_port *ev_port = event_port;
-	struct dlb_eventdev *dlb;
-	int i;
-
-	RTE_SET_USED(dev);
-
-	if (!ev_port->setup_done) {
-		DLB_LOG_ERR("dlb: evport %d is not configured\n",
-			    ev_port->id);
-		rte_errno = -EINVAL;
-		return 0;
-	}
-
-	if (queues == NULL || nb_unlinks == 0) {
-		DLB_LOG_DBG("dlb: queues is NULL or nb_unlinks is 0\n");
-		return 0; /* Ignore and return success */
-	}
-
-	if (ev_port->qm_port.is_directed) {
-		DLB_LOG_DBG("dlb: ignore unlink from dir port %d\n",
-			    ev_port->id);
-		rte_errno = 0;
-		return nb_unlinks; /* as if success */
-	}
-
-	dlb = ev_port->dlb;
-
-	for (i = 0; i < nb_unlinks; i++) {
-		struct dlb_eventdev_queue *ev_queue;
-		int ret, j;
-
-		if (queues[i] >= dlb->num_queues) {
-			DLB_LOG_ERR("dlb: invalid queue id %d\n", queues[i]);
-			rte_errno = -EINVAL;
-			return i; /* return index of offending queue */
-		}
-
-		ev_queue = &dlb->ev_queues[queues[i]];
-
-		/* Does a link exist? */
-		for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++)
-			if (ev_port->link[j].queue_id == queues[i] &&
-			    ev_port->link[j].valid)
-				break;
-
-		if (j == DLB_MAX_NUM_QIDS_PER_LDB_CQ)
-			continue;
-
-		ret = dlb_event_queue_detach_ldb(dlb, ev_port, ev_queue);
-		if (ret) {
-			DLB_LOG_ERR("unlink err=%d for port %d queue %d\n",
-				    ret, ev_port->id, queues[i]);
-			rte_errno = -ENOENT;
-			return i; /* return index of offending queue */
-		}
-
-		ev_port->link[j].valid = false;
-		ev_port->num_links--;
-		ev_queue->num_links--;
-	}
-
-	return nb_unlinks;
-}
-
-static int
-dlb_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev,
-				      void *event_port)
-{
-	struct dlb_eventdev_port *ev_port = event_port;
-	struct dlb_eventdev *dlb;
-	struct dlb_hw_dev *handle;
-	struct dlb_pending_port_unmaps_args cfg;
-	struct dlb_cmd_response response;
-	int ret;
-
-	RTE_SET_USED(dev);
-
-	if (!ev_port->setup_done) {
-		DLB_LOG_ERR("dlb: evport %d is not configured\n",
-			    ev_port->id);
-		rte_errno = -EINVAL;
-		return 0;
-	}
-
-	cfg.port_id = ev_port->qm_port.id;
-	cfg.response = (uintptr_t)&response;
-	dlb = ev_port->dlb;
-	handle = &dlb->qm_instance;
-	ret = dlb_iface_pending_port_unmaps(handle, &cfg);
-
-	if (ret < 0) {
-		DLB_LOG_ERR("dlb: num_unlinks_in_progress ret=%d (driver status: %s)\n",
-			    ret, dlb_error_strings[response.status]);
-		return ret;
-	}
-
-	return response.id;
-}
-
-static void
-dlb_eventdev_port_default_conf_get(struct rte_eventdev *dev,
-				   uint8_t port_id,
-				   struct rte_event_port_conf *port_conf)
-{
-	RTE_SET_USED(port_id);
-	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
-
-	port_conf->new_event_threshold = dlb->new_event_limit;
-	port_conf->dequeue_depth = 32;
-	port_conf->enqueue_depth = DLB_MAX_ENQUEUE_DEPTH;
-	port_conf->event_port_cfg = 0;
-}
-
-static void
-dlb_eventdev_queue_default_conf_get(struct rte_eventdev *dev,
-				    uint8_t queue_id,
-				    struct rte_event_queue_conf *queue_conf)
-{
-	RTE_SET_USED(dev);
-	RTE_SET_USED(queue_id);
-	queue_conf->nb_atomic_flows = 1024;
-	queue_conf->nb_atomic_order_sequences = 32;
-	queue_conf->event_queue_cfg = 0;
-	queue_conf->priority = 0;
-}
-
-static int
-dlb_hw_create_ldb_port(struct dlb_eventdev *dlb,
-		       struct dlb_eventdev_port *ev_port,
-		       uint32_t dequeue_depth,
-		       uint32_t cq_depth,
-		       uint32_t enqueue_depth,
-		       uint16_t rsvd_tokens,
-		       bool use_rsvd_token_scheme)
-{
-	struct dlb_hw_dev *handle = &dlb->qm_instance;
-	struct dlb_create_ldb_port_args cfg = {0};
-	struct dlb_cmd_response response = {0};
-	int ret;
-	struct dlb_port *qm_port = NULL;
-	char mz_name[RTE_MEMZONE_NAMESIZE];
-	uint32_t qm_port_id;
-
-	if (handle == NULL)
-		return -EINVAL;
-
-	if (cq_depth < DLB_MIN_LDB_CQ_DEPTH) {
-		DLB_LOG_ERR("dlb: invalid cq_depth, must be %d-%d\n",
-			DLB_MIN_LDB_CQ_DEPTH, DLB_MAX_INPUT_QUEUE_DEPTH);
-		return -EINVAL;
-	}
-
-	if (enqueue_depth < DLB_MIN_ENQUEUE_DEPTH) {
-		DLB_LOG_ERR("dlb: invalid enqueue_depth, must be at least %d\n",
-			    DLB_MIN_ENQUEUE_DEPTH);
-		return -EINVAL;
-	}
-
-	rte_spinlock_lock(&handle->resource_lock);
-
-	cfg.response = (uintptr_t)&response;
-
-	/* We round up to the next power of 2 if necessary */
-	cfg.cq_depth = rte_align32pow2(cq_depth);
-	cfg.cq_depth_threshold = rsvd_tokens;
-
-	cfg.cq_history_list_size = DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
-
-	/* User controls the LDB high watermark via enqueue depth. The DIR high
-	 * watermark is equal, unless the directed credit pool is too small.
-	 */
-	cfg.ldb_credit_high_watermark = enqueue_depth;
-
-	/* If there are no directed ports, the kernel driver will ignore this
-	 * port's directed credit settings. Don't use enqueue_depth if it would
-	 * require more directed credits than are available.
-	 */
-	cfg.dir_credit_high_watermark =
-		RTE_MIN(enqueue_depth,
-			handle->cfg.num_dir_credits / dlb->num_ports);
-
-	cfg.ldb_credit_quantum = cfg.ldb_credit_high_watermark / 2;
-	cfg.ldb_credit_low_watermark = RTE_MIN(16, cfg.ldb_credit_quantum);
-
-	cfg.dir_credit_quantum = cfg.dir_credit_high_watermark / 2;
-	cfg.dir_credit_low_watermark = RTE_MIN(16, cfg.dir_credit_quantum);
-
-	/* Per QM values */
-
-	cfg.ldb_credit_pool_id = handle->cfg.ldb_credit_pool_id;
-	cfg.dir_credit_pool_id = handle->cfg.dir_credit_pool_id;
-
-	ret = dlb_iface_ldb_port_create(handle, &cfg, dlb->poll_mode);
-	if (ret < 0) {
-		DLB_LOG_ERR("dlb: dlb_ldb_port_create error, ret=%d (driver status: %s)\n",
-			    ret, dlb_error_strings[response.status]);
-		goto error_exit;
-	}
-
-	qm_port_id = response.id;
-
-	DLB_LOG_DBG("dlb: ev_port %d uses qm LB port %d <<<<<\n",
-		    ev_port->id, qm_port_id);
-
-	qm_port = &ev_port->qm_port;
-	qm_port->ev_port = ev_port; /* back ptr */
-	qm_port->dlb = dlb; /* back ptr */
-
-	/*
-	 * Allocate and init local qe struct(s).
-	 * Note: MOVDIR64 requires the enqueue QE (qe4) to be aligned.
-	 */
-
-	snprintf(mz_name, sizeof(mz_name), "ldb_port%d",
-		 ev_port->id);
-
-	ret = dlb_init_qe_mem(qm_port, mz_name);
-	if (ret < 0) {
-		DLB_LOG_ERR("dlb: init_qe_mem failed, ret=%d\n", ret);
-		goto error_exit;
-	}
-
-	qm_port->pp_mmio_base = DLB_LDB_PP_BASE + PAGE_SIZE * qm_port_id;
-	qm_port->id = qm_port_id;
-
-	/* The credit window is one high water mark of QEs */
-	qm_port->ldb_pushcount_at_credit_expiry = 0;
-	qm_port->cached_ldb_credits = cfg.ldb_credit_high_watermark;
-	/* The credit window is one high water mark of QEs */
-	qm_port->dir_pushcount_at_credit_expiry = 0;
-	qm_port->cached_dir_credits = cfg.dir_credit_high_watermark;
-	/* CQs with depth < 8 use an 8-entry queue, but withhold credits so
-	 * the effective depth is smaller.
-	 */
-	qm_port->cq_depth = cfg.cq_depth <= 8 ? 8 : cfg.cq_depth;
-	qm_port->cq_idx = 0;
-	qm_port->cq_idx_unmasked = 0;
-	if (dlb->poll_mode == DLB_CQ_POLL_MODE_SPARSE)
-		qm_port->cq_depth_mask = (qm_port->cq_depth * 4) - 1;
-	else
-		qm_port->cq_depth_mask = qm_port->cq_depth - 1;
-
-	qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
-	/* starting value of gen bit - it toggles at wrap time */
-	qm_port->gen_bit = 1;
-
-	qm_port->use_rsvd_token_scheme = use_rsvd_token_scheme;
-	qm_port->cq_rsvd_token_deficit = rsvd_tokens;
-	qm_port->int_armed = false;
-
-	/* Save off for later use in info and lookup APIs. */
-	qm_port->qid_mappings = &dlb->qm_ldb_to_ev_queue_id[0];
-
-	qm_port->dequeue_depth = dequeue_depth;
-
-	/* When using the reserved token scheme, token_pop_thresh is
-	 * initially 2 * dequeue_depth. Once the tokens are reserved,
-	 * the enqueue code re-assigns it to dequeue_depth.
-	 */
-	qm_port->token_pop_thresh = cq_depth;
-
-	/* When the deferred scheduling vdev arg is selected, use deferred pop
-	 * for all single-entry CQs.
-	 */
-	if (cfg.cq_depth == 1 || (cfg.cq_depth == 2 && use_rsvd_token_scheme)) {
-		if (dlb->defer_sched)
-			qm_port->token_pop_mode = DEFERRED_POP;
-	}
-
-	/* The default enqueue functions do not include delayed-pop support for
-	 * performance reasons.
-	 */
-	if (qm_port->token_pop_mode == DELAYED_POP) {
-		dlb->event_dev->enqueue = dlb_event_enqueue_delayed;
-		dlb->event_dev->enqueue_burst =
-			dlb_event_enqueue_burst_delayed;
-		dlb->event_dev->enqueue_new_burst =
-			dlb_event_enqueue_new_burst_delayed;
-		dlb->event_dev->enqueue_forward_burst =
-			dlb_event_enqueue_forward_burst_delayed;
-	}
-
-	qm_port->owed_tokens = 0;
-	qm_port->issued_releases = 0;
-
-	/* update state */
-	qm_port->state = PORT_STARTED; /* enabled at create time */
-	qm_port->config_state = DLB_CONFIGURED;
-
-	qm_port->dir_credits = cfg.dir_credit_high_watermark;
-	qm_port->ldb_credits = cfg.ldb_credit_high_watermark;
-
-	DLB_LOG_DBG("dlb: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d\n",
-		    qm_port_id,
-		    cq_depth,
-		    qm_port->ldb_credits,
-		    qm_port->dir_credits);
-
-	rte_spinlock_unlock(&handle->resource_lock);
-
-	return 0;
-
-error_exit:
-	if (qm_port) {
-		dlb_free_qe_mem(qm_port);
-		qm_port->pp_mmio_base = 0;
-	}
-
-	rte_spinlock_unlock(&handle->resource_lock);
-
-	DLB_LOG_ERR("dlb: create ldb port failed!\n");
-
-	return ret;
-}
-
-static int
-dlb_hw_create_dir_port(struct dlb_eventdev *dlb,
-		       struct dlb_eventdev_port *ev_port,
-		       uint32_t dequeue_depth,
-		       uint32_t cq_depth,
-		       uint32_t enqueue_depth,
-		       uint16_t rsvd_tokens,
-		       bool use_rsvd_token_scheme)
-{
-	struct dlb_hw_dev *handle = &dlb->qm_instance;
-	struct dlb_create_dir_port_args cfg = {0};
-	struct dlb_cmd_response response = {0};
-	int ret;
-	struct dlb_port *qm_port = NULL;
-	char mz_name[RTE_MEMZONE_NAMESIZE];
-	uint32_t qm_port_id;
-
-	if (dlb == NULL || handle == NULL)
-		return -EINVAL;
-
-	if (cq_depth < DLB_MIN_DIR_CQ_DEPTH) {
-		DLB_LOG_ERR("dlb: invalid cq_depth, must be at least %d\n",
-			    DLB_MIN_DIR_CQ_DEPTH);
-		return -EINVAL;
-	}
-
-	if (enqueue_depth < DLB_MIN_ENQUEUE_DEPTH) {
-		DLB_LOG_ERR("dlb: invalid enqueue_depth, must be at least %d\n",
-			    DLB_MIN_ENQUEUE_DEPTH);
-		return -EINVAL;
-	}
-
-	rte_spinlock_lock(&handle->resource_lock);
-
-	/* Directed queues are configured at link time. */
-	cfg.queue_id = -1;
-
-	cfg.response = (uintptr_t)&response;
-
-	/* We round up to the next power of 2 if necessary */
-	cfg.cq_depth = rte_align32pow2(cq_depth);
-	cfg.cq_depth_threshold = rsvd_tokens;
-
-	/* User controls the LDB high watermark via enqueue depth. The DIR high
-	 * watermark is equal, unless the directed credit pool is too small.
-	 */
-	cfg.ldb_credit_high_watermark = enqueue_depth;
-
-	/* Don't use enqueue_depth if it would require more directed credits
-	 * than are available.
-	 */
-	cfg.dir_credit_high_watermark =
-		RTE_MIN(enqueue_depth,
-			handle->cfg.num_dir_credits / dlb->num_ports);
-
-	cfg.ldb_credit_quantum = cfg.ldb_credit_high_watermark / 2;
-	cfg.ldb_credit_low_watermark = RTE_MIN(16, cfg.ldb_credit_quantum);
-
-	cfg.dir_credit_quantum = cfg.dir_credit_high_watermark / 2;
-	cfg.dir_credit_low_watermark = RTE_MIN(16, cfg.dir_credit_quantum);
-
-	/* Per QM values */
-
-	cfg.ldb_credit_pool_id = handle->cfg.ldb_credit_pool_id;
-	cfg.dir_credit_pool_id = handle->cfg.dir_credit_pool_id;
-
-	ret = dlb_iface_dir_port_create(handle, &cfg, dlb->poll_mode);
-	if (ret < 0) {
-		DLB_LOG_ERR("dlb: dlb_dir_port_create error, ret=%d (driver status: %s)\n",
-			    ret, dlb_error_strings[response.status]);
-		goto error_exit;
-	}
-
-	qm_port_id = response.id;
-
-	DLB_LOG_DBG("dlb: ev_port %d uses qm DIR port %d <<<<<\n",
-		    ev_port->id, qm_port_id);
-
-	qm_port = &ev_port->qm_port;
-	qm_port->ev_port = ev_port; /* back ptr */
-	qm_port->dlb = dlb;  /* back ptr */
-
-	/*
-	 * Init local qe struct(s).
-	 * Note: MOVDIR64 requires the enqueue QE to be aligned
-	 */
-
-	snprintf(mz_name, sizeof(mz_name), "dir_port%d",
-		 ev_port->id);
-
-	ret = dlb_init_qe_mem(qm_port, mz_name);
-
-	if (ret < 0) {
-		DLB_LOG_ERR("dlb: init_qe_mem failed, ret=%d\n", ret);
-		goto error_exit;
-	}
-
-	qm_port->pp_mmio_base = DLB_DIR_PP_BASE + PAGE_SIZE * qm_port_id;
-	qm_port->id = qm_port_id;
-
-	/* The credit window is one high water mark of QEs */
-	qm_port->ldb_pushcount_at_credit_expiry = 0;
-	qm_port->cached_ldb_credits = cfg.ldb_credit_high_watermark;
-	/* The credit window is one high water mark of QEs */
-	qm_port->dir_pushcount_at_credit_expiry = 0;
-	qm_port->cached_dir_credits = cfg.dir_credit_high_watermark;
-	qm_port->cq_depth = cfg.cq_depth;
-	qm_port->cq_idx = 0;
-	qm_port->cq_idx_unmasked = 0;
-	if (dlb->poll_mode == DLB_CQ_POLL_MODE_SPARSE)
-		qm_port->cq_depth_mask = (cfg.cq_depth * 4) - 1;
-	else
-		qm_port->cq_depth_mask = cfg.cq_depth - 1;
-
-	qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
-	/* starting value of gen bit - it toggles at wrap time */
-	qm_port->gen_bit = 1;
-
-	qm_port->use_rsvd_token_scheme = use_rsvd_token_scheme;
-	qm_port->cq_rsvd_token_deficit = rsvd_tokens;
-	qm_port->int_armed = false;
-
-	/* Save off for later use in info and lookup APIs. */
-	qm_port->qid_mappings = &dlb->qm_dir_to_ev_queue_id[0];
-
-	qm_port->dequeue_depth = dequeue_depth;
-
-	/* Directed ports are auto-pop, by default. */
-	qm_port->token_pop_mode = AUTO_POP;
-	qm_port->owed_tokens = 0;
-	qm_port->issued_releases = 0;
-
-	/* update state */
-	qm_port->state = PORT_STARTED; /* enabled at create time */
-	qm_port->config_state = DLB_CONFIGURED;
-
-	qm_port->dir_credits = cfg.dir_credit_high_watermark;
-	qm_port->ldb_credits = cfg.ldb_credit_high_watermark;
-
-	DLB_LOG_DBG("dlb: created dir port %d, depth = %d cr=%d,%d\n",
-		    qm_port_id,
-		    cq_depth,
-		    cfg.dir_credit_high_watermark,
-		    cfg.ldb_credit_high_watermark);
-
-	rte_spinlock_unlock(&handle->resource_lock);
-
-	return 0;
-
-error_exit:
-	if (qm_port) {
-		qm_port->pp_mmio_base = 0;
-		dlb_free_qe_mem(qm_port);
-	}
-
-	rte_spinlock_unlock(&handle->resource_lock);
-
-	DLB_LOG_ERR("dlb: create dir port failed!\n");
-
-	return ret;
-}
-
-static int32_t
-dlb_hw_create_ldb_queue(struct dlb_eventdev *dlb,
-			struct dlb_queue *queue,
-			const struct rte_event_queue_conf *evq_conf)
-{
-	struct dlb_hw_dev *handle = &dlb->qm_instance;
-	struct dlb_create_ldb_queue_args cfg;
-	struct dlb_cmd_response response;
-	int32_t ret;
-	uint32_t qm_qid;
-	int sched_type = -1;
-
-	if (evq_conf == NULL)
-		return -EINVAL;
-
-	if (evq_conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) {
-		if (evq_conf->nb_atomic_order_sequences != 0)
-			sched_type = RTE_SCHED_TYPE_ORDERED;
-		else
-			sched_type = RTE_SCHED_TYPE_PARALLEL;
-	} else
-		sched_type = evq_conf->schedule_type;
-
-	cfg.response = (uintptr_t)&response;
-	cfg.num_atomic_inflights = dlb->num_atm_inflights_per_queue;
-	cfg.num_sequence_numbers = evq_conf->nb_atomic_order_sequences;
-	cfg.num_qid_inflights = evq_conf->nb_atomic_order_sequences;
-
-	if (sched_type != RTE_SCHED_TYPE_ORDERED) {
-		cfg.num_sequence_numbers = 0;
-		cfg.num_qid_inflights = DLB_DEF_UNORDERED_QID_INFLIGHTS;
-	}
-
-	ret = dlb_iface_ldb_queue_create(handle, &cfg);
-	if (ret < 0) {
-		DLB_LOG_ERR("dlb: create LB event queue error, ret=%d (driver status: %s)\n",
-			    ret, dlb_error_strings[response.status]);
-		return -EINVAL;
-	}
-
-	qm_qid = response.id;
-
-	/* Save off queue config for debug, resource lookups, and reconfig */
-	queue->num_qid_inflights = cfg.num_qid_inflights;
-	queue->num_atm_inflights = cfg.num_atomic_inflights;
-
-	queue->sched_type = sched_type;
-	queue->config_state = DLB_CONFIGURED;
-
-	DLB_LOG_DBG("Created LB event queue %d, nb_inflights=%d, nb_seq=%d, qid inflights=%d\n",
-		    qm_qid,
-		    cfg.num_atomic_inflights,
-		    cfg.num_sequence_numbers,
-		    cfg.num_qid_inflights);
-
-	return qm_qid;
-}
-
-static int32_t
-dlb_get_sn_allocation(struct dlb_eventdev *dlb, int group)
-{
-	struct dlb_hw_dev *handle = &dlb->qm_instance;
-	struct dlb_get_sn_allocation_args cfg;
-	struct dlb_cmd_response response;
-	int ret;
-
-	cfg.group = group;
-	cfg.response = (uintptr_t)&response;
-
-	ret = dlb_iface_get_sn_allocation(handle, &cfg);
-	if (ret < 0) {
-		DLB_LOG_ERR("dlb: get_sn_allocation ret=%d (driver status: %s)\n",
-			    ret, dlb_error_strings[response.status]);
-		return ret;
-	}
-
-	return response.id;
-}
-
-static int
-dlb_set_sn_allocation(struct dlb_eventdev *dlb, int group, int num)
-{
-	struct dlb_hw_dev *handle = &dlb->qm_instance;
-	struct dlb_set_sn_allocation_args cfg;
-	struct dlb_cmd_response response;
-	int ret;
-
-	cfg.num = num;
-	cfg.group = group;
-	cfg.response = (uintptr_t)&response;
-
-	ret = dlb_iface_set_sn_allocation(handle, &cfg);
-	if (ret < 0) {
-		DLB_LOG_ERR("dlb: set_sn_allocation ret=%d (driver status: %s)\n",
-			    ret, dlb_error_strings[response.status]);
-		return ret;
-	}
-
-	return ret;
-}
-
-static int32_t
-dlb_get_sn_occupancy(struct dlb_eventdev *dlb, int group)
-{
-	struct dlb_hw_dev *handle = &dlb->qm_instance;
-	struct dlb_get_sn_occupancy_args cfg;
-	struct dlb_cmd_response response;
-	int ret;
-
-	cfg.group = group;
-	cfg.response = (uintptr_t)&response;
-
-	ret = dlb_iface_get_sn_occupancy(handle, &cfg);
-	if (ret < 0) {
-		DLB_LOG_ERR("dlb: get_sn_occupancy ret=%d (driver status: %s)\n",
-			    ret, dlb_error_strings[response.status]);
-		return ret;
-	}
-
-	return response.id;
-}
-
-/* Query the current sequence number allocations and, if they conflict with the
- * requested LDB queue configuration, attempt to re-allocate sequence numbers.
- * This is best-effort; if it fails, the PMD will attempt to configure the
- * load-balanced queue and return an error.
- */
-static void
-dlb_program_sn_allocation(struct dlb_eventdev *dlb,
-			  const struct rte_event_queue_conf *queue_conf)
-{
-	int grp_occupancy[DLB_NUM_SN_GROUPS];
-	int grp_alloc[DLB_NUM_SN_GROUPS];
-	int i, sequence_numbers;
-
-	sequence_numbers = (int)queue_conf->nb_atomic_order_sequences;
-
-	for (i = 0; i < DLB_NUM_SN_GROUPS; i++) {
-		int total_slots;
-
-		grp_alloc[i] = dlb_get_sn_allocation(dlb, i);
-		if (grp_alloc[i] < 0)
-			return;
-
-		total_slots = DLB_MAX_LDB_SN_ALLOC / grp_alloc[i];
-
-		grp_occupancy[i] = dlb_get_sn_occupancy(dlb, i);
-		if (grp_occupancy[i] < 0)
-			return;
-
-		/* DLB has at least one available slot for the requested
-		 * sequence numbers, so no further configuration required.
-		 */
-		if (grp_alloc[i] == sequence_numbers &&
-		    grp_occupancy[i] < total_slots)
-			return;
-	}
-
-	/* None of the sequence number groups are configured for the requested
-	 * sequence numbers, so we have to reconfigure one of them. This is
-	 * only possible if a group is not in use.
-	 */
-	for (i = 0; i < DLB_NUM_SN_GROUPS; i++) {
-		if (grp_occupancy[i] == 0)
-			break;
-	}
-
-	if (i == DLB_NUM_SN_GROUPS) {
-		DLB_LOG_ERR("[%s()] No groups with %d sequence_numbers are available or have free slots\n",
-		       __func__, sequence_numbers);
-		return;
-	}
-
-	/* Attempt to configure slot i with the requested number of sequence
-	 * numbers. Ignore the return value -- if this fails, the error will be
-	 * caught during subsequent queue configuration.
-	 */
-	dlb_set_sn_allocation(dlb, i, sequence_numbers);
-}
-
-static int
-dlb_eventdev_ldb_queue_setup(struct rte_eventdev *dev,
-			     struct dlb_eventdev_queue *ev_queue,
-			     const struct rte_event_queue_conf *queue_conf)
-{
-	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
-	int32_t qm_qid;
-
-	if (queue_conf->nb_atomic_order_sequences)
-		dlb_program_sn_allocation(dlb, queue_conf);
-
-	qm_qid = dlb_hw_create_ldb_queue(dlb,
-					 &ev_queue->qm_queue,
-					 queue_conf);
-	if (qm_qid < 0) {
-		DLB_LOG_ERR("Failed to create the load-balanced queue\n");
-
-		return qm_qid;
-	}
-
-	dlb->qm_ldb_to_ev_queue_id[qm_qid] = ev_queue->id;
-
-	ev_queue->qm_queue.id = qm_qid;
-
-	return 0;
-}
-
-static int dlb_num_dir_queues_setup(struct dlb_eventdev *dlb)
-{
-	int i, num = 0;
-
-	for (i = 0; i < dlb->num_queues; i++) {
-		if (dlb->ev_queues[i].setup_done &&
-		    dlb->ev_queues[i].qm_queue.is_directed)
-			num++;
-	}
-
-	return num;
-}
-
-static void
-dlb_queue_link_teardown(struct dlb_eventdev *dlb,
-			struct dlb_eventdev_queue *ev_queue)
-{
-	struct dlb_eventdev_port *ev_port;
-	int i, j;
-
-	for (i = 0; i < dlb->num_ports; i++) {
-		ev_port = &dlb->ev_ports[i];
-
-		for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
-			if (!ev_port->link[j].valid ||
-			    ev_port->link[j].queue_id != ev_queue->id)
-				continue;
-
-			ev_port->link[j].valid = false;
-			ev_port->num_links--;
-		}
-	}
-
-	ev_queue->num_links = 0;
-}
-
-static int
-dlb_eventdev_queue_setup(struct rte_eventdev *dev,
-			 uint8_t ev_qid,
-			 const struct rte_event_queue_conf *queue_conf)
-{
-	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
-	struct dlb_eventdev_queue *ev_queue;
-	int ret;
-
-	if (queue_conf == NULL)
-		return -EINVAL;
-
-	if (ev_qid >= dlb->num_queues)
-		return -EINVAL;
-
-	ev_queue = &dlb->ev_queues[ev_qid];
-
-	ev_queue->qm_queue.is_directed = queue_conf->event_queue_cfg &
-		RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
-	ev_queue->id = ev_qid;
-	ev_queue->conf = *queue_conf;
-
-	if (!ev_queue->qm_queue.is_directed) {
-		ret = dlb_eventdev_ldb_queue_setup(dev, ev_queue, queue_conf);
-	} else {
-		/* The directed queue isn't setup until link time, at which
-		 * point we know its directed port ID. Directed queue setup
-		 * will only fail if this queue is already setup or there are
-		 * no directed queues left to configure.
-		 */
-		ret = 0;
-
-		ev_queue->qm_queue.config_state = DLB_NOT_CONFIGURED;
-
-		if (ev_queue->setup_done ||
-		    dlb_num_dir_queues_setup(dlb) == dlb->num_dir_queues)
-			ret = -EINVAL;
-	}
-
-	/* Tear down pre-existing port->queue links */
-	if (!ret && dlb->run_state == DLB_RUN_STATE_STOPPED)
-		dlb_queue_link_teardown(dlb, ev_queue);
-
-	if (!ret)
-		ev_queue->setup_done = true;
-
-	return ret;
-}
-
-static void
-dlb_port_link_teardown(struct dlb_eventdev *dlb,
-		       struct dlb_eventdev_port *ev_port)
-{
-	struct dlb_eventdev_queue *ev_queue;
-	int i;
-
-	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
-		if (!ev_port->link[i].valid)
-			continue;
-
-		ev_queue = &dlb->ev_queues[ev_port->link[i].queue_id];
-
-		ev_port->link[i].valid = false;
-		ev_port->num_links--;
-		ev_queue->num_links--;
-	}
-}
-
-static int
-dlb_eventdev_port_setup(struct rte_eventdev *dev,
-			uint8_t ev_port_id,
-			const struct rte_event_port_conf *port_conf)
-{
-	struct dlb_eventdev *dlb;
-	struct dlb_eventdev_port *ev_port;
-	bool use_rsvd_token_scheme;
-	uint32_t adj_cq_depth;
-	uint16_t rsvd_tokens;
-	int ret;
-
-	if (dev == NULL || port_conf == NULL) {
-		DLB_LOG_ERR("Null parameter\n");
-		return -EINVAL;
-	}
-
-	dlb = dlb_pmd_priv(dev);
-
-	if (ev_port_id >= DLB_MAX_NUM_PORTS)
-		return -EINVAL;
-
-	if (port_conf->dequeue_depth >
-		evdev_dlb_default_info.max_event_port_dequeue_depth ||
-	    port_conf->enqueue_depth >
-		evdev_dlb_default_info.max_event_port_enqueue_depth)
-		return -EINVAL;
-
-	ev_port = &dlb->ev_ports[ev_port_id];
-	/* configured? */
-	if (ev_port->setup_done) {
-		DLB_LOG_ERR("evport %d is already configured\n", ev_port_id);
-		return -EINVAL;
-	}
-
-	/* The reserved token interrupt arming scheme requires that one or more
-	 * CQ tokens be reserved by the PMD. This limits the amount of CQ space
-	 * usable by the DLB, so in order to give an *effective* CQ depth equal
-	 * to the user-requested value, we double CQ depth and reserve half of
-	 * its tokens. If the user requests the max CQ depth (256) then we
-	 * cannot double it, so we reserve one token and give an effective
-	 * depth of 255 entries.
-	 */
-	use_rsvd_token_scheme = true;
-	rsvd_tokens = 1;
-	adj_cq_depth = port_conf->dequeue_depth;
-
-	if (use_rsvd_token_scheme && adj_cq_depth < 256) {
-		rsvd_tokens = adj_cq_depth;
-		adj_cq_depth *= 2;
-	}
-
-	ev_port->qm_port.is_directed = port_conf->event_port_cfg &
-		RTE_EVENT_PORT_CFG_SINGLE_LINK;
-
-	if (!ev_port->qm_port.is_directed) {
-		ret = dlb_hw_create_ldb_port(dlb,
-					     ev_port,
-					     port_conf->dequeue_depth,
-					     adj_cq_depth,
-					     port_conf->enqueue_depth,
-					     rsvd_tokens,
-					     use_rsvd_token_scheme);
-		if (ret < 0) {
-			DLB_LOG_ERR("Failed to create the lB port ve portId=%d\n",
-				    ev_port_id);
-			return ret;
-		}
-	} else {
-		ret = dlb_hw_create_dir_port(dlb,
-					     ev_port,
-					     port_conf->dequeue_depth,
-					     adj_cq_depth,
-					     port_conf->enqueue_depth,
-					     rsvd_tokens,
-					     use_rsvd_token_scheme);
-		if (ret < 0) {
-			DLB_LOG_ERR("Failed to create the DIR port\n");
-			return ret;
-		}
-	}
-
-	/* Save off port config for reconfig */
-	dlb->ev_ports[ev_port_id].conf = *port_conf;
-
-	dlb->ev_ports[ev_port_id].id = ev_port_id;
-	dlb->ev_ports[ev_port_id].enq_configured = true;
-	dlb->ev_ports[ev_port_id].setup_done = true;
-	dlb->ev_ports[ev_port_id].inflight_max =
-		port_conf->new_event_threshold;
-	dlb->ev_ports[ev_port_id].implicit_release =
-		!(port_conf->event_port_cfg &
-		  RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
-	dlb->ev_ports[ev_port_id].outstanding_releases = 0;
-	dlb->ev_ports[ev_port_id].inflight_credits = 0;
-	dlb->ev_ports[ev_port_id].credit_update_quanta =
-		RTE_LIBRTE_PMD_DLB_SW_CREDIT_QUANTA;
-	dlb->ev_ports[ev_port_id].dlb = dlb; /* reverse link */
-
-	/* Tear down pre-existing port->queue links */
-	if (dlb->run_state == DLB_RUN_STATE_STOPPED)
-		dlb_port_link_teardown(dlb, &dlb->ev_ports[ev_port_id]);
-
-	dev->data->ports[ev_port_id] = &dlb->ev_ports[ev_port_id];
-
-	return 0;
-}
-
-static int
-dlb_eventdev_reapply_configuration(struct rte_eventdev *dev)
-{
-	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
-	int ret, i;
-
-	/* If an event queue or port was previously configured, but hasn't been
-	 * reconfigured, reapply its original configuration.
-	 */
-	for (i = 0; i < dlb->num_queues; i++) {
-		struct dlb_eventdev_queue *ev_queue;
-
-		ev_queue = &dlb->ev_queues[i];
-
-		if (ev_queue->qm_queue.config_state != DLB_PREV_CONFIGURED)
-			continue;
-
-		ret = dlb_eventdev_queue_setup(dev, i, &ev_queue->conf);
-		if (ret < 0) {
-			DLB_LOG_ERR("dlb: failed to reconfigure queue %d", i);
-			return ret;
-		}
-	}
-
-	for (i = 0; i < dlb->num_ports; i++) {
-		struct dlb_eventdev_port *ev_port = &dlb->ev_ports[i];
-
-		if (ev_port->qm_port.config_state != DLB_PREV_CONFIGURED)
-			continue;
-
-		ret = dlb_eventdev_port_setup(dev, i, &ev_port->conf);
-		if (ret < 0) {
-			DLB_LOG_ERR("dlb: failed to reconfigure ev_port %d",
-				    i);
-			return ret;
-		}
-	}
-
-	return 0;
-}
-
-static int
-set_dev_id(const char *key __rte_unused,
-	   const char *value,
-	   void *opaque)
-{
-	int *dev_id = opaque;
-	int ret;
-
-	if (value == NULL || opaque == NULL) {
-		DLB_LOG_ERR("NULL pointer\n");
-		return -EINVAL;
-	}
-
-	ret = dlb_string_to_int(dev_id, value);
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
-static int
-set_defer_sched(const char *key __rte_unused,
-		const char *value,
-		void *opaque)
-{
-	int *defer_sched = opaque;
-
-	if (value == NULL || opaque == NULL) {
-		DLB_LOG_ERR("NULL pointer\n");
-		return -EINVAL;
-	}
-
-	if (strncmp(value, "on", 2) != 0) {
-		DLB_LOG_ERR("Invalid defer_sched argument \"%s\" (expected \"on\")\n",
-			    value);
-		return -EINVAL;
-	}
-
-	*defer_sched = 1;
-
-	return 0;
-}
-
-static int
-set_num_atm_inflights(const char *key __rte_unused,
-		      const char *value,
-		      void *opaque)
-{
-	int *num_atm_inflights = opaque;
-	int ret;
-
-	if (value == NULL || opaque == NULL) {
-		DLB_LOG_ERR("NULL pointer\n");
-		return -EINVAL;
-	}
-
-	ret = dlb_string_to_int(num_atm_inflights, value);
-	if (ret < 0)
-		return ret;
-
-	if (*num_atm_inflights < 0 ||
-	    *num_atm_inflights > DLB_MAX_NUM_ATM_INFLIGHTS) {
-		DLB_LOG_ERR("dlb: atm_inflights must be between 0 and %d\n",
-			    DLB_MAX_NUM_ATM_INFLIGHTS);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int
-dlb_validate_port_link(struct dlb_eventdev_port *ev_port,
-		       uint8_t queue_id,
-		       bool link_exists,
-		       int index)
-{
-	struct dlb_eventdev *dlb = ev_port->dlb;
-	struct dlb_eventdev_queue *ev_queue;
-	bool port_is_dir, queue_is_dir;
-
-	if (queue_id > dlb->num_queues) {
-		DLB_LOG_ERR("queue_id %d > num queues %d\n",
-			    queue_id, dlb->num_queues);
-		rte_errno = -EINVAL;
-		return -1;
-	}
-
-	ev_queue = &dlb->ev_queues[queue_id];
-
-	if (!ev_queue->setup_done &&
-	    ev_queue->qm_queue.config_state != DLB_PREV_CONFIGURED) {
-		DLB_LOG_ERR("setup not done and not previously configured\n");
-		rte_errno = -EINVAL;
-		return -1;
-	}
-
-	port_is_dir = ev_port->qm_port.is_directed;
-	queue_is_dir = ev_queue->qm_queue.is_directed;
-
-	if (port_is_dir != queue_is_dir) {
-		DLB_LOG_ERR("%s queue %u can't link to %s port %u\n",
-			    queue_is_dir ? "DIR" : "LDB", ev_queue->id,
-			    port_is_dir ? "DIR" : "LDB", ev_port->id);
-
-		rte_errno = -EINVAL;
-		return -1;
-	}
-
-	/* Check if there is space for the requested link */
-	if (!link_exists && index == -1) {
-		DLB_LOG_ERR("no space for new link\n");
-		rte_errno = -ENOSPC;
-		return -1;
-	}
-
-	/* Check if the directed port is already linked */
-	if (ev_port->qm_port.is_directed && ev_port->num_links > 0 &&
-	    !link_exists) {
-		DLB_LOG_ERR("Can't link DIR port %d to >1 queues\n",
-			    ev_port->id);
-		rte_errno = -EINVAL;
-		return -1;
-	}
-
-	/* Check if the directed queue is already linked */
-	if (ev_queue->qm_queue.is_directed && ev_queue->num_links > 0 &&
-	    !link_exists) {
-		DLB_LOG_ERR("Can't link DIR queue %d to >1 ports\n",
-			    ev_queue->id);
-		rte_errno = -EINVAL;
-		return -1;
-	}
-
-	return 0;
-}
-
-static int32_t
-dlb_hw_create_dir_queue(struct dlb_eventdev *dlb, int32_t qm_port_id)
-{
-	struct dlb_hw_dev *handle = &dlb->qm_instance;
-	struct dlb_create_dir_queue_args cfg;
-	struct dlb_cmd_response response = {0};
-	int32_t ret;
-
-	cfg.response = (uintptr_t)&response;
-
-	/* The directed port is always configured before its queue */
-	cfg.port_id = qm_port_id;
-
-	ret = dlb_iface_dir_queue_create(handle, &cfg);
-	if (ret < 0) {
-		DLB_LOG_ERR("dlb: create DIR event queue error, ret=%d (driver status: %s)\n",
-			    ret, dlb_error_strings[response.status]);
-		return -EINVAL;
-	}
-
-	return response.id;
-}
-
-static int
-dlb_eventdev_dir_queue_setup(struct dlb_eventdev *dlb,
-			     struct dlb_eventdev_queue *ev_queue,
-			     struct dlb_eventdev_port *ev_port)
-{
-	int32_t qm_qid;
-
-	qm_qid = dlb_hw_create_dir_queue(dlb, ev_port->qm_port.id);
-
-	if (qm_qid < 0) {
-		DLB_LOG_ERR("Failed to create the DIR queue\n");
-		return qm_qid;
-	}
-
-	dlb->qm_dir_to_ev_queue_id[qm_qid] = ev_queue->id;
-
-	ev_queue->qm_queue.id = qm_qid;
-
-	return 0;
-}
-
-static int16_t
-dlb_hw_map_ldb_qid_to_port(struct dlb_hw_dev *handle,
-			   uint32_t qm_port_id,
-			   uint16_t qm_qid,
-			   uint8_t priority)
-{
-	struct dlb_map_qid_args cfg;
-	struct dlb_cmd_response response;
-	int32_t ret;
-
-	if (handle == NULL)
-		return -EINVAL;
-
-	/* Build message */
-	cfg.response = (uintptr_t)&response;
-	cfg.port_id = qm_port_id;
-	cfg.qid = qm_qid;
-	cfg.priority = EV_TO_DLB_PRIO(priority);
-
-	ret = dlb_iface_map_qid(handle, &cfg);
-	if (ret < 0) {
-		DLB_LOG_ERR("dlb: map qid error, ret=%d (driver status: %s)\n",
-			    ret, dlb_error_strings[response.status]);
-		DLB_LOG_ERR("dlb: device_id=%d grp=%d, qm_port=%d, qm_qid=%d prio=%d\n",
-			    handle->device_id,
-			    handle->domain_id, cfg.port_id,
-			    cfg.qid,
-			    cfg.priority);
-	} else {
-		DLB_LOG_DBG("dlb: mapped queue %d to qm_port %d\n",
-			    qm_qid, qm_port_id);
-	}
-
-	return ret;
-}
-
-static int
-dlb_event_queue_join_ldb(struct dlb_eventdev *dlb,
-			 struct dlb_eventdev_port *ev_port,
-			 struct dlb_eventdev_queue *ev_queue,
-			 uint8_t priority)
-{
-	int first_avail = -1;
-	int ret, i;
-
-	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
-		if (ev_port->link[i].valid) {
-			if (ev_port->link[i].queue_id == ev_queue->id &&
-			    ev_port->link[i].priority == priority) {
-				if (ev_port->link[i].mapped)
-					return 0; /* already mapped */
-				first_avail = i;
-			}
-		} else {
-			if (first_avail == -1)
-				first_avail = i;
-		}
-	}
-	if (first_avail == -1) {
-		DLB_LOG_ERR("dlb: qm_port %d has no available QID slots.\n",
-			    ev_port->qm_port.id);
-		return -EINVAL;
-	}
-
-	ret = dlb_hw_map_ldb_qid_to_port(&dlb->qm_instance,
-					 ev_port->qm_port.id,
-					 ev_queue->qm_queue.id,
-					 priority);
-
-	if (!ret)
-		ev_port->link[first_avail].mapped = true;
-
-	return ret;
-}
-
-static int
-dlb_do_port_link(struct rte_eventdev *dev,
-		 struct dlb_eventdev_queue *ev_queue,
-		 struct dlb_eventdev_port *ev_port,
-		 uint8_t prio)
-{
-	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
-	int err;
-
-	/* Don't link until start time. */
-	if (dlb->run_state == DLB_RUN_STATE_STOPPED)
-		return 0;
-
-	if (ev_queue->qm_queue.is_directed)
-		err = dlb_eventdev_dir_queue_setup(dlb, ev_queue, ev_port);
-	else
-		err = dlb_event_queue_join_ldb(dlb, ev_port, ev_queue, prio);
-
-	if (err) {
-		DLB_LOG_ERR("port link failure for %s ev_q %d, ev_port %d\n",
-			    ev_queue->qm_queue.is_directed ? "DIR" : "LDB",
-			    ev_queue->id, ev_port->id);
-
-		rte_errno = err;
-		return -1;
-	}
-
-	return 0;
-}
-
-static int
-dlb_eventdev_apply_port_links(struct rte_eventdev *dev)
-{
-	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
-	int i;
-
-	/* Perform requested port->queue links */
-	for (i = 0; i < dlb->num_ports; i++) {
-		struct dlb_eventdev_port *ev_port = &dlb->ev_ports[i];
-		int j;
-
-		for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
-			struct dlb_eventdev_queue *ev_queue;
-			uint8_t prio, queue_id;
-
-			if (!ev_port->link[j].valid)
-				continue;
-
-			prio = ev_port->link[j].priority;
-			queue_id = ev_port->link[j].queue_id;
-
-			if (dlb_validate_port_link(ev_port, queue_id, true, j))
-				return -EINVAL;
-
-			ev_queue = &dlb->ev_queues[queue_id];
-
-			if (dlb_do_port_link(dev, ev_queue, ev_port, prio))
-				return -EINVAL;
-		}
-	}
-
-	return 0;
-}
-
-static int
-dlb_eventdev_port_link(struct rte_eventdev *dev, void *event_port,
-		       const uint8_t queues[], const uint8_t priorities[],
-		       uint16_t nb_links)
-
-{
-	struct dlb_eventdev_port *ev_port = event_port;
-	struct dlb_eventdev *dlb;
-	int i, j;
-
-	RTE_SET_USED(dev);
-
-	if (ev_port == NULL) {
-		DLB_LOG_ERR("dlb: evport not setup\n");
-		rte_errno = -EINVAL;
-		return 0;
-	}
-
-	if (!ev_port->setup_done &&
-	    ev_port->qm_port.config_state != DLB_PREV_CONFIGURED) {
-		DLB_LOG_ERR("dlb: evport not setup\n");
-		rte_errno = -EINVAL;
-		return 0;
-	}
-
-	/* Note: rte_event_port_link() ensures the PMD won't receive a NULL
-	 * queues pointer.
-	 */
-	if (nb_links == 0) {
-		DLB_LOG_DBG("dlb: nb_links is 0\n");
-		return 0; /* Ignore and return success */
-	}
-
-	dlb = ev_port->dlb;
-
-	DLB_LOG_DBG("Linking %u queues to %s port %d\n",
-		    nb_links,
-		    ev_port->qm_port.is_directed ? "DIR" : "LDB",
-		    ev_port->id);
-
-	for (i = 0; i < nb_links; i++) {
-		struct dlb_eventdev_queue *ev_queue;
-		uint8_t queue_id, prio;
-		bool found = false;
-		int index = -1;
-
-		queue_id = queues[i];
-		prio = priorities[i];
-
-		/* Check if the link already exists. */
-		for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++)
-			if (ev_port->link[j].valid) {
-				if (ev_port->link[j].queue_id == queue_id) {
-					found = true;
-					index = j;
-					break;
-				}
-			} else {
-				if (index == -1)
-					index = j;
-			}
-
-		/* could not link */
-		if (index == -1)
-			break;
-
-		/* Check if already linked at the requested priority */
-		if (found && ev_port->link[j].priority == prio)
-			continue;
-
-		if (dlb_validate_port_link(ev_port, queue_id, found, index))
-			break; /* return index of offending queue */
-
-		ev_queue = &dlb->ev_queues[queue_id];
-
-		if (dlb_do_port_link(dev, ev_queue, ev_port, prio))
-			break; /* return index of offending queue */
-
-		ev_queue->num_links++;
-
-		ev_port->link[index].queue_id = queue_id;
-		ev_port->link[index].priority = prio;
-		ev_port->link[index].valid = true;
-		/* Entry already exists?  If so, then must be prio change */
-		if (!found)
-			ev_port->num_links++;
-	}
-	return i;
-}
-
-static int
-dlb_eventdev_start(struct rte_eventdev *dev)
-{
-	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
-	struct dlb_hw_dev *handle = &dlb->qm_instance;
-	struct dlb_start_domain_args cfg;
-	struct dlb_cmd_response response;
-	int ret, i;
-
-	rte_spinlock_lock(&dlb->qm_instance.resource_lock);
-	if (dlb->run_state != DLB_RUN_STATE_STOPPED) {
-		DLB_LOG_ERR("bad state %d for dev_start\n",
-			    (int)dlb->run_state);
-		rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
-		return -EINVAL;
-	}
-	dlb->run_state	= DLB_RUN_STATE_STARTING;
-	rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
-
-	/* If the device was configured more than once, some event ports and/or
-	 * queues may need to be reconfigured.
-	 */
-	ret = dlb_eventdev_reapply_configuration(dev);
-	if (ret)
-		return ret;
-
-	/* The DLB PMD delays port links until the device is started. */
-	ret = dlb_eventdev_apply_port_links(dev);
-	if (ret)
-		return ret;
-
-	cfg.response = (uintptr_t)&response;
-
-	for (i = 0; i < dlb->num_ports; i++) {
-		if (!dlb->ev_ports[i].setup_done) {
-			DLB_LOG_ERR("dlb: port %d not setup", i);
-			return -ESTALE;
-		}
-	}
-
-	for (i = 0; i < dlb->num_queues; i++) {
-		if (dlb->ev_queues[i].num_links == 0) {
-			DLB_LOG_ERR("dlb: queue %d is not linked", i);
-			return -ENOLINK;
-		}
-	}
-
-	ret = dlb_iface_sched_domain_start(handle, &cfg);
-	if (ret < 0) {
-		DLB_LOG_ERR("dlb: sched_domain_start ret=%d (driver status: %s)\n",
-			    ret, dlb_error_strings[response.status]);
-		return ret;
-	}
-
-	dlb->run_state = DLB_RUN_STATE_STARTED;
-	DLB_LOG_DBG("dlb: sched_domain_start completed OK\n");
-
-	return 0;
-}
-
-static inline int
-dlb_check_enqueue_sw_credits(struct dlb_eventdev *dlb,
-			     struct dlb_eventdev_port *ev_port)
-{
-	uint32_t sw_inflights = __atomic_load_n(&dlb->inflights,
-						__ATOMIC_SEQ_CST);
-	const int num = 1;
-
-	if (unlikely(ev_port->inflight_max < sw_inflights)) {
-		DLB_INC_STAT(ev_port->stats.traffic.tx_nospc_inflight_max, 1);
-		rte_errno = -ENOSPC;
-		return 1;
-	}
-
-	if (ev_port->inflight_credits < num) {
-		/* check if event enqueue brings ev_port over max threshold */
-		uint32_t credit_update_quanta = ev_port->credit_update_quanta;
-
-		if (sw_inflights + credit_update_quanta >
-		    dlb->new_event_limit) {
-			DLB_INC_STAT(
-				ev_port->stats.traffic.tx_nospc_new_event_limit,
-				1);
-			rte_errno = -ENOSPC;
-			return 1;
-		}
-
-		__atomic_fetch_add(&dlb->inflights, credit_update_quanta,
-				   __ATOMIC_SEQ_CST);
-		ev_port->inflight_credits += (credit_update_quanta);
-
-		if (ev_port->inflight_credits < num) {
-			DLB_INC_STAT(
-			    ev_port->stats.traffic.tx_nospc_inflight_credits,
-			    1);
-			rte_errno = -ENOSPC;
-			return 1;
-		}
-	}
-
-	return 0;
-}
-
-static inline void
-dlb_replenish_sw_credits(struct dlb_eventdev *dlb,
-			 struct dlb_eventdev_port *ev_port)
-{
-	uint16_t quanta = ev_port->credit_update_quanta;
-
-	if (ev_port->inflight_credits >= quanta * 2) {
-		/* Replenish credits, saving one quanta for enqueues */
-		uint16_t val = ev_port->inflight_credits - quanta;
-
-		__atomic_fetch_sub(&dlb->inflights, val, __ATOMIC_SEQ_CST);
-		ev_port->inflight_credits -= val;
-	}
-}
-
-static __rte_always_inline uint16_t
-dlb_read_pc(struct process_local_port_data *port_data, bool ldb)
-{
-	volatile uint16_t *popcount;
-
-	if (ldb)
-		popcount = port_data->ldb_popcount;
-	else
-		popcount = port_data->dir_popcount;
-
-	return *popcount;
-}
-
-static inline int
-dlb_check_enqueue_hw_ldb_credits(struct dlb_port *qm_port,
-				 struct process_local_port_data *port_data)
-{
-	if (unlikely(qm_port->cached_ldb_credits == 0)) {
-		uint16_t pc;
-
-		pc = dlb_read_pc(port_data, true);
-
-		qm_port->cached_ldb_credits = pc -
-			qm_port->ldb_pushcount_at_credit_expiry;
-		if (unlikely(qm_port->cached_ldb_credits == 0)) {
-			DLB_INC_STAT(
-			qm_port->ev_port->stats.traffic.tx_nospc_ldb_hw_credits,
-			1);
-
-			DLB_LOG_DBG("ldb credits exhausted\n");
-			return 1;
-		}
-		qm_port->ldb_pushcount_at_credit_expiry +=
-			qm_port->cached_ldb_credits;
-	}
-
-	return 0;
-}
-
-static inline int
-dlb_check_enqueue_hw_dir_credits(struct dlb_port *qm_port,
-				 struct process_local_port_data *port_data)
-{
-	if (unlikely(qm_port->cached_dir_credits == 0)) {
-		uint16_t pc;
-
-		pc = dlb_read_pc(port_data, false);
-
-		qm_port->cached_dir_credits = pc -
-			qm_port->dir_pushcount_at_credit_expiry;
-
-		if (unlikely(qm_port->cached_dir_credits == 0)) {
-			DLB_INC_STAT(
-			qm_port->ev_port->stats.traffic.tx_nospc_dir_hw_credits,
-			1);
-
-			DLB_LOG_DBG("dir credits exhausted\n");
-			return 1;
-		}
-		qm_port->dir_pushcount_at_credit_expiry +=
-			qm_port->cached_dir_credits;
-	}
-
-	return 0;
-}
-
-static inline int
-dlb_event_enqueue_prep(struct dlb_eventdev_port *ev_port,
-		       struct dlb_port *qm_port,
-		       const struct rte_event ev[],
-		       struct process_local_port_data *port_data,
-		       uint8_t *sched_type,
-		       uint8_t *queue_id)
-{
-	struct dlb_eventdev *dlb = ev_port->dlb;
-	struct dlb_eventdev_queue *ev_queue;
-	uint16_t *cached_credits = NULL;
-	struct dlb_queue *qm_queue;
-
-	ev_queue = &dlb->ev_queues[ev->queue_id];
-	qm_queue = &ev_queue->qm_queue;
-	*queue_id = qm_queue->id;
-
-	/* Ignore sched_type and hardware credits on release events */
-	if (ev->op == RTE_EVENT_OP_RELEASE)
-		goto op_check;
-
-	if (!qm_queue->is_directed) {
-		/* Load balanced destination queue */
-
-		if (dlb_check_enqueue_hw_ldb_credits(qm_port, port_data)) {
-			rte_errno = -ENOSPC;
-			return 1;
-		}
-		cached_credits = &qm_port->cached_ldb_credits;
-
-		switch (ev->sched_type) {
-		case RTE_SCHED_TYPE_ORDERED:
-			DLB_LOG_DBG("dlb: put_qe: RTE_SCHED_TYPE_ORDERED\n");
-			if (qm_queue->sched_type != RTE_SCHED_TYPE_ORDERED) {
-				DLB_LOG_ERR("dlb: tried to send ordered event to unordered queue %d\n",
-					    *queue_id);
-				rte_errno = -EINVAL;
-				return 1;
-			}
-			*sched_type = DLB_SCHED_ORDERED;
-			break;
-		case RTE_SCHED_TYPE_ATOMIC:
-			DLB_LOG_DBG("dlb: put_qe: RTE_SCHED_TYPE_ATOMIC\n");
-			*sched_type = DLB_SCHED_ATOMIC;
-			break;
-		case RTE_SCHED_TYPE_PARALLEL:
-			DLB_LOG_DBG("dlb: put_qe: RTE_SCHED_TYPE_PARALLEL\n");
-			if (qm_queue->sched_type == RTE_SCHED_TYPE_ORDERED)
-				*sched_type = DLB_SCHED_ORDERED;
-			else
-				*sched_type = DLB_SCHED_UNORDERED;
-			break;
-		default:
-			DLB_LOG_ERR("Unsupported LDB sched type in put_qe\n");
-			DLB_INC_STAT(ev_port->stats.tx_invalid, 1);
-			rte_errno = -EINVAL;
-			return 1;
-		}
-	} else {
-		/* Directed destination queue */
-
-		if (dlb_check_enqueue_hw_dir_credits(qm_port, port_data)) {
-			rte_errno = -ENOSPC;
-			return 1;
-		}
-		cached_credits = &qm_port->cached_dir_credits;
-
-		DLB_LOG_DBG("dlb: put_qe: RTE_SCHED_TYPE_DIRECTED\n");
-
-		*sched_type = DLB_SCHED_DIRECTED;
-	}
-
-op_check:
-	switch (ev->op) {
-	case RTE_EVENT_OP_NEW:
-		/* Check that a sw credit is available */
-		if (dlb_check_enqueue_sw_credits(dlb, ev_port)) {
-			rte_errno = -ENOSPC;
-			return 1;
-		}
-		ev_port->inflight_credits--;
-		(*cached_credits)--;
-		break;
-	case RTE_EVENT_OP_FORWARD:
-		/* Check for outstanding_releases underflow. If this occurs,
-		 * the application is not using the EVENT_OPs correctly; for
-		 * example, forwarding or releasing events that were not
-		 * dequeued.
-		 */
-		RTE_ASSERT(ev_port->outstanding_releases > 0);
-		ev_port->outstanding_releases--;
-		qm_port->issued_releases++;
-		(*cached_credits)--;
-		break;
-	case RTE_EVENT_OP_RELEASE:
-		ev_port->inflight_credits++;
-		/* Check for outstanding_releases underflow. If this occurs,
-		 * the application is not using the EVENT_OPs correctly; for
-		 * example, forwarding or releasing events that were not
-		 * dequeued.
-		 */
-		RTE_ASSERT(ev_port->outstanding_releases > 0);
-		ev_port->outstanding_releases--;
-		qm_port->issued_releases++;
-		/* Replenish s/w credits if enough are cached */
-		dlb_replenish_sw_credits(dlb, ev_port);
-		break;
-	}
-
-	DLB_INC_STAT(ev_port->stats.tx_op_cnt[ev->op], 1);
-	DLB_INC_STAT(ev_port->stats.traffic.tx_ok, 1);
-
-#ifndef RTE_LIBRTE_PMD_DLB_QUELL_STATS
-	if (ev->op != RTE_EVENT_OP_RELEASE) {
-		DLB_INC_STAT(ev_port->stats.enq_ok[ev->queue_id], 1);
-		DLB_INC_STAT(ev_port->stats.tx_sched_cnt[*sched_type], 1);
-	}
-#endif
-
-	return 0;
-}
-
-static uint8_t cmd_byte_map[NUM_DLB_PORT_TYPES][DLB_NUM_HW_SCHED_TYPES] = {
-	{
-		/* Load-balanced cmd bytes */
-		[RTE_EVENT_OP_NEW] = DLB_NEW_CMD_BYTE,
-		[RTE_EVENT_OP_FORWARD] = DLB_FWD_CMD_BYTE,
-		[RTE_EVENT_OP_RELEASE] = DLB_COMP_CMD_BYTE,
-	},
-	{
-		/* Directed cmd bytes */
-		[RTE_EVENT_OP_NEW] = DLB_NEW_CMD_BYTE,
-		[RTE_EVENT_OP_FORWARD] = DLB_NEW_CMD_BYTE,
-		[RTE_EVENT_OP_RELEASE] = DLB_NOOP_CMD_BYTE,
-	},
-};
-
-static inline void
-dlb_event_build_hcws(struct dlb_port *qm_port,
-		     const struct rte_event ev[],
-		     int num,
-		     uint8_t *sched_type,
-		     uint8_t *queue_id)
-{
-	struct dlb_enqueue_qe *qe;
-	uint16_t sched_word[4];
-	__m128i sse_qe[2];
-	int i;
-
-	qe = qm_port->qe4;
-
-	sse_qe[0] = _mm_setzero_si128();
-	sse_qe[1] = _mm_setzero_si128();
-
-	switch (num) {
-	case 4:
-		/* Construct the metadata portion of two HCWs in one 128b SSE
-		 * register. HCW metadata is constructed in the SSE registers
-		 * like so:
-		 * sse_qe[0][63:0]:   qe[0]'s metadata
-		 * sse_qe[0][127:64]: qe[1]'s metadata
-		 * sse_qe[1][63:0]:   qe[2]'s metadata
-		 * sse_qe[1][127:64]: qe[3]'s metadata
-		 */
-
-		/* Convert the event operation into a command byte and store it
-		 * in the metadata:
-		 * sse_qe[0][63:56]   = cmd_byte_map[is_directed][ev[0].op]
-		 * sse_qe[0][127:120] = cmd_byte_map[is_directed][ev[1].op]
-		 * sse_qe[1][63:56]   = cmd_byte_map[is_directed][ev[2].op]
-		 * sse_qe[1][127:120] = cmd_byte_map[is_directed][ev[3].op]
-		 */
-#define DLB_QE_CMD_BYTE 7
-		sse_qe[0] = _mm_insert_epi8(sse_qe[0],
-				cmd_byte_map[qm_port->is_directed][ev[0].op],
-				DLB_QE_CMD_BYTE);
-		sse_qe[0] = _mm_insert_epi8(sse_qe[0],
-				cmd_byte_map[qm_port->is_directed][ev[1].op],
-				DLB_QE_CMD_BYTE + 8);
-		sse_qe[1] = _mm_insert_epi8(sse_qe[1],
-				cmd_byte_map[qm_port->is_directed][ev[2].op],
-				DLB_QE_CMD_BYTE);
-		sse_qe[1] = _mm_insert_epi8(sse_qe[1],
-				cmd_byte_map[qm_port->is_directed][ev[3].op],
-				DLB_QE_CMD_BYTE + 8);
-
-		/* Store priority, scheduling type, and queue ID in the sched
-		 * word array because these values are re-used when the
-		 * destination is a directed queue.
-		 */
-		sched_word[0] = EV_TO_DLB_PRIO(ev[0].priority) << 10 |
-				sched_type[0] << 8 |
-				queue_id[0];
-		sched_word[1] = EV_TO_DLB_PRIO(ev[1].priority) << 10 |
-				sched_type[1] << 8 |
-				queue_id[1];
-		sched_word[2] = EV_TO_DLB_PRIO(ev[2].priority) << 10 |
-				sched_type[2] << 8 |
-				queue_id[2];
-		sched_word[3] = EV_TO_DLB_PRIO(ev[3].priority) << 10 |
-				sched_type[3] << 8 |
-				queue_id[3];
-
-		/* Store the event priority, scheduling type, and queue ID in
-		 * the metadata:
-		 * sse_qe[0][31:16] = sched_word[0]
-		 * sse_qe[0][95:80] = sched_word[1]
-		 * sse_qe[1][31:16] = sched_word[2]
-		 * sse_qe[1][95:80] = sched_word[3]
-		 */
-#define DLB_QE_QID_SCHED_WORD 1
-		sse_qe[0] = _mm_insert_epi16(sse_qe[0],
-					     sched_word[0],
-					     DLB_QE_QID_SCHED_WORD);
-		sse_qe[0] = _mm_insert_epi16(sse_qe[0],
-					     sched_word[1],
-					     DLB_QE_QID_SCHED_WORD + 4);
-		sse_qe[1] = _mm_insert_epi16(sse_qe[1],
-					     sched_word[2],
-					     DLB_QE_QID_SCHED_WORD);
-		sse_qe[1] = _mm_insert_epi16(sse_qe[1],
-					     sched_word[3],
-					     DLB_QE_QID_SCHED_WORD + 4);
-
-		/* If the destination is a load-balanced queue, store the lock
-		 * ID. If it is a directed queue, DLB places this field in
-		 * bytes 10-11 of the received QE, so we format it accordingly:
-		 * sse_qe[0][47:32]  = dir queue ? sched_word[0] : flow_id[0]
-		 * sse_qe[0][111:96] = dir queue ? sched_word[1] : flow_id[1]
-		 * sse_qe[1][47:32]  = dir queue ? sched_word[2] : flow_id[2]
-		 * sse_qe[1][111:96] = dir queue ? sched_word[3] : flow_id[3]
-		 */
-#define DLB_QE_LOCK_ID_WORD 2
-		sse_qe[0] = _mm_insert_epi16(sse_qe[0],
-				(sched_type[0] == DLB_SCHED_DIRECTED) ?
-					sched_word[0] : ev[0].flow_id,
-				DLB_QE_LOCK_ID_WORD);
-		sse_qe[0] = _mm_insert_epi16(sse_qe[0],
-				(sched_type[1] == DLB_SCHED_DIRECTED) ?
-					sched_word[1] : ev[1].flow_id,
-				DLB_QE_LOCK_ID_WORD + 4);
-		sse_qe[1] = _mm_insert_epi16(sse_qe[1],
-				(sched_type[2] == DLB_SCHED_DIRECTED) ?
-					sched_word[2] : ev[2].flow_id,
-				DLB_QE_LOCK_ID_WORD);
-		sse_qe[1] = _mm_insert_epi16(sse_qe[1],
-				(sched_type[3] == DLB_SCHED_DIRECTED) ?
-					sched_word[3] : ev[3].flow_id,
-				DLB_QE_LOCK_ID_WORD + 4);
-
-		/* Store the event type and sub event type in the metadata:
-		 * sse_qe[0][15:0]  = flow_id[0]
-		 * sse_qe[0][79:64] = flow_id[1]
-		 * sse_qe[1][15:0]  = flow_id[2]
-		 * sse_qe[1][79:64] = flow_id[3]
-		 */
-#define DLB_QE_EV_TYPE_WORD 0
-		sse_qe[0] = _mm_insert_epi16(sse_qe[0],
-					     ev[0].sub_event_type << 8 |
-						ev[0].event_type,
-					     DLB_QE_EV_TYPE_WORD);
-		sse_qe[0] = _mm_insert_epi16(sse_qe[0],
-					     ev[1].sub_event_type << 8 |
-						ev[1].event_type,
-					     DLB_QE_EV_TYPE_WORD + 4);
-		sse_qe[1] = _mm_insert_epi16(sse_qe[1],
-					     ev[2].sub_event_type << 8 |
-						ev[2].event_type,
-					     DLB_QE_EV_TYPE_WORD);
-		sse_qe[1] = _mm_insert_epi16(sse_qe[1],
-					     ev[3].sub_event_type << 8 |
-						ev[3].event_type,
-					     DLB_QE_EV_TYPE_WORD + 4);
-
-		/* Store the metadata to memory (use the double-precision
-		 * _mm_storeh_pd because there is no integer function for
-		 * storing the upper 64b):
-		 * qe[0] metadata = sse_qe[0][63:0]
-		 * qe[1] metadata = sse_qe[0][127:64]
-		 * qe[2] metadata = sse_qe[1][63:0]
-		 * qe[3] metadata = sse_qe[1][127:64]
-		 */
-		_mm_storel_epi64((__m128i *)&qe[0].u.opaque_data, sse_qe[0]);
-		_mm_storeh_pd((double *)&qe[1].u.opaque_data,
-			      (__m128d) sse_qe[0]);
-		_mm_storel_epi64((__m128i *)&qe[2].u.opaque_data, sse_qe[1]);
-		_mm_storeh_pd((double *)&qe[3].u.opaque_data,
-			      (__m128d) sse_qe[1]);
-
-		qe[0].data = ev[0].u64;
-		qe[1].data = ev[1].u64;
-		qe[2].data = ev[2].u64;
-		qe[3].data = ev[3].u64;
-
-		break;
-	case 3:
-	case 2:
-	case 1:
-		for (i = 0; i < num; i++) {
-			qe[i].cmd_byte =
-				cmd_byte_map[qm_port->is_directed][ev[i].op];
-			qe[i].sched_type = sched_type[i];
-			qe[i].data = ev[i].u64;
-			qe[i].qid = queue_id[i];
-			qe[i].priority = EV_TO_DLB_PRIO(ev[i].priority);
-			qe[i].lock_id = ev[i].flow_id;
-			if (sched_type[i] == DLB_SCHED_DIRECTED) {
-				struct dlb_msg_info *info =
-					(struct dlb_msg_info *)&qe[i].lock_id;
-
-				info->qid = queue_id[i];
-				info->sched_type = DLB_SCHED_DIRECTED;
-				info->priority = qe[i].priority;
-			}
-			qe[i].u.event_type.major = ev[i].event_type;
-			qe[i].u.event_type.sub = ev[i].sub_event_type;
-		}
-		break;
-	case 0:
-		break;
-	}
-}
-
-static inline void
-dlb_construct_token_pop_qe(struct dlb_port *qm_port, int idx)
-{
-	struct dlb_cq_pop_qe *qe = (void *)qm_port->qe4;
-	int num = qm_port->owed_tokens;
-
-	if (qm_port->use_rsvd_token_scheme) {
-		/* Check if there's a deficit of reserved tokens, and return
-		 * early if there are no (unreserved) tokens to consume.
-		 */
-		if (num <= qm_port->cq_rsvd_token_deficit) {
-			qm_port->cq_rsvd_token_deficit -= num;
-			qm_port->owed_tokens = 0;
-			return;
-		}
-		num -= qm_port->cq_rsvd_token_deficit;
-		qm_port->cq_rsvd_token_deficit = 0;
-	}
-
-	qe[idx].cmd_byte = DLB_POP_CMD_BYTE;
-	qe[idx].tokens = num - 1;
-	qm_port->owed_tokens = 0;
-}
-
-static __rte_always_inline void
-dlb_pp_write(struct dlb_enqueue_qe *qe4,
-	     struct process_local_port_data *port_data)
-{
-	dlb_movdir64b(port_data->pp_addr, qe4);
-}
-
-static inline void
-dlb_hw_do_enqueue(struct dlb_port *qm_port,
-		  bool do_sfence,
-		  struct process_local_port_data *port_data)
-{
-	DLB_LOG_DBG("dlb: Flushing QE(s) to DLB\n");
-
-	/* Since MOVDIR64B is weakly-ordered, use an SFENCE to ensure that
-	 * application writes complete before enqueueing the release HCW.
-	 */
-	if (do_sfence)
-		rte_wmb();
-
-	dlb_pp_write(qm_port->qe4, port_data);
-}
-
-static inline int
-dlb_consume_qe_immediate(struct dlb_port *qm_port, int num)
-{
-	struct process_local_port_data *port_data;
-	struct dlb_cq_pop_qe *qe;
-
-	RTE_ASSERT(qm_port->config_state == DLB_CONFIGURED);
-
-	if (qm_port->use_rsvd_token_scheme) {
-		/* Check if there's a deficit of reserved tokens, and return
-		 * early if there are no (unreserved) tokens to consume.
-		 */
-		if (num <= qm_port->cq_rsvd_token_deficit) {
-			qm_port->cq_rsvd_token_deficit -= num;
-			qm_port->owed_tokens = 0;
-			return 0;
-		}
-		num -= qm_port->cq_rsvd_token_deficit;
-		qm_port->cq_rsvd_token_deficit = 0;
-	}
-
-	qe = qm_port->consume_qe;
-
-	qe->tokens = num - 1;
-	qe->int_arm = 0;
-
-	/* No store fence needed since no pointer is being sent, and CQ token
-	 * pops can be safely reordered with other HCWs.
-	 */
-	port_data = &dlb_port[qm_port->id][PORT_TYPE(qm_port)];
-
-	dlb_movntdq_single(port_data->pp_addr, qe);
-
-	DLB_LOG_DBG("dlb: consume immediate - %d QEs\n", num);
-
-	qm_port->owed_tokens = 0;
-
-	return 0;
-}
-
-static inline uint16_t
-__dlb_event_enqueue_burst(void *event_port,
-			  const struct rte_event events[],
-			  uint16_t num,
-			  bool use_delayed)
-{
-	struct dlb_eventdev_port *ev_port = event_port;
-	struct dlb_port *qm_port = &ev_port->qm_port;
-	struct process_local_port_data *port_data;
-	int i;
-
-	RTE_ASSERT(ev_port->enq_configured);
-	RTE_ASSERT(events != NULL);
-
-	rte_errno = 0;
-	i = 0;
-
-	port_data = &dlb_port[qm_port->id][PORT_TYPE(qm_port)];
-
-	while (i < num) {
-		uint8_t sched_types[DLB_NUM_QES_PER_CACHE_LINE];
-		uint8_t queue_ids[DLB_NUM_QES_PER_CACHE_LINE];
-		int pop_offs = 0;
-		int j = 0;
-
-		memset(qm_port->qe4,
-		       0,
-		       DLB_NUM_QES_PER_CACHE_LINE *
-		       sizeof(struct dlb_enqueue_qe));
-
-		for (; j < DLB_NUM_QES_PER_CACHE_LINE && (i + j) < num; j++) {
-			const struct rte_event *ev = &events[i + j];
-			int16_t thresh = qm_port->token_pop_thresh;
-
-			if (use_delayed &&
-			    qm_port->token_pop_mode == DELAYED_POP &&
-			    (ev->op == RTE_EVENT_OP_FORWARD ||
-			     ev->op == RTE_EVENT_OP_RELEASE) &&
-			    qm_port->issued_releases >= thresh - 1) {
-				/* Insert the token pop QE and break out. This
-				 * may result in a partial HCW, but that is
-				 * simpler than supporting arbitrary QE
-				 * insertion.
-				 */
-				dlb_construct_token_pop_qe(qm_port, j);
-
-				/* Reset the releases for the next QE batch */
-				qm_port->issued_releases -= thresh;
-
-				/* When using delayed token pop mode, the
-				 * initial token threshold is the full CQ
-				 * depth. After the first token pop, we need to
-				 * reset it to the dequeue_depth.
-				 */
-				qm_port->token_pop_thresh =
-					qm_port->dequeue_depth;
-
-				pop_offs = 1;
-				j++;
-				break;
-			}
-
-			if (dlb_event_enqueue_prep(ev_port, qm_port, ev,
-						   port_data, &sched_types[j],
-						   &queue_ids[j]))
-				break;
-		}
-
-		if (j == 0)
-			break;
-
-		dlb_event_build_hcws(qm_port, &events[i], j - pop_offs,
-				     sched_types, queue_ids);
-
-		dlb_hw_do_enqueue(qm_port, i == 0, port_data);
-
-		/* Don't include the token pop QE in the enqueue count */
-		i += j - pop_offs;
-
-		/* Don't interpret j < DLB_NUM_... as out-of-credits if
-		 * pop_offs != 0
-		 */
-		if (j < DLB_NUM_QES_PER_CACHE_LINE && pop_offs == 0)
-			break;
-	}
-
-	RTE_ASSERT(!((i == 0 && rte_errno != -ENOSPC)));
-
-	return i;
-}
-
-static inline uint16_t
-dlb_event_enqueue_burst(void *event_port,
-			const struct rte_event events[],
-			uint16_t num)
-{
-	return __dlb_event_enqueue_burst(event_port, events, num, false);
-}
-
-static inline uint16_t
-dlb_event_enqueue_burst_delayed(void *event_port,
-				const struct rte_event events[],
-				uint16_t num)
-{
-	return __dlb_event_enqueue_burst(event_port, events, num, true);
-}
-
-static inline uint16_t
-dlb_event_enqueue(void *event_port,
-		  const struct rte_event events[])
-{
-	return __dlb_event_enqueue_burst(event_port, events, 1, false);
-}
-
-static inline uint16_t
-dlb_event_enqueue_delayed(void *event_port,
-			  const struct rte_event events[])
-{
-	return __dlb_event_enqueue_burst(event_port, events, 1, true);
-}
-
-static uint16_t
-dlb_event_enqueue_new_burst(void *event_port,
-			    const struct rte_event events[],
-			    uint16_t num)
-{
-	return __dlb_event_enqueue_burst(event_port, events, num, false);
-}
-
-static uint16_t
-dlb_event_enqueue_new_burst_delayed(void *event_port,
-				    const struct rte_event events[],
-				    uint16_t num)
-{
-	return __dlb_event_enqueue_burst(event_port, events, num, true);
-}
-
-static uint16_t
-dlb_event_enqueue_forward_burst(void *event_port,
-				const struct rte_event events[],
-				uint16_t num)
-{
-	return __dlb_event_enqueue_burst(event_port, events, num, false);
-}
-
-static uint16_t
-dlb_event_enqueue_forward_burst_delayed(void *event_port,
-					const struct rte_event events[],
-					uint16_t num)
-{
-	return __dlb_event_enqueue_burst(event_port, events, num, true);
-}
-
-static __rte_always_inline int
-dlb_recv_qe(struct dlb_port *qm_port, struct dlb_dequeue_qe *qe,
-	    uint8_t *offset)
-{
-	uint8_t xor_mask[2][4] = { {0x0F, 0x0E, 0x0C, 0x08},
-				   {0x00, 0x01, 0x03, 0x07} };
-	uint8_t and_mask[4] = {0x0F, 0x0E, 0x0C, 0x08};
-	volatile struct dlb_dequeue_qe *cq_addr;
-	__m128i *qes = (__m128i *)qe;
-	uint64_t *cache_line_base;
-	uint8_t gen_bits;
-
-	cq_addr = dlb_port[qm_port->id][PORT_TYPE(qm_port)].cq_base;
-	cq_addr = &cq_addr[qm_port->cq_idx];
-
-	cache_line_base = (void *)(((uintptr_t)cq_addr) & ~0x3F);
-	*offset = ((uintptr_t)cq_addr & 0x30) >> 4;
-
-	/* Load the next CQ cache line from memory. Pack these reads as tight
-	 * as possible to reduce the chance that DLB invalidates the line while
-	 * the CPU is reading it. Read the cache line backwards to ensure that
-	 * if QE[N] (N > 0) is valid, then QEs[0:N-1] are too.
-	 *
-	 * (Valid QEs start at &qe[offset])
-	 */
-	qes[3] = _mm_load_si128((__m128i *)&cache_line_base[6]);
-	qes[2] = _mm_load_si128((__m128i *)&cache_line_base[4]);
-	qes[1] = _mm_load_si128((__m128i *)&cache_line_base[2]);
-	qes[0] = _mm_load_si128((__m128i *)&cache_line_base[0]);
-
-	/* Evict the cache line ASAP */
-	rte_cldemote(cache_line_base);
-
-	/* Extract and combine the gen bits */
-	gen_bits = ((_mm_extract_epi8(qes[0], 15) & 0x1) << 0) |
-		   ((_mm_extract_epi8(qes[1], 15) & 0x1) << 1) |
-		   ((_mm_extract_epi8(qes[2], 15) & 0x1) << 2) |
-		   ((_mm_extract_epi8(qes[3], 15) & 0x1) << 3);
-
-	/* XOR the combined bits such that a 1 represents a valid QE */
-	gen_bits ^= xor_mask[qm_port->gen_bit][*offset];
-
-	/* Mask off gen bits we don't care about */
-	gen_bits &= and_mask[*offset];
-
-	return __builtin_popcount(gen_bits);
-}
-
-static inline void
-dlb_inc_cq_idx(struct dlb_port *qm_port, int cnt)
-{
-	uint16_t idx = qm_port->cq_idx_unmasked + cnt;
-
-	qm_port->cq_idx_unmasked = idx;
-	qm_port->cq_idx = idx & qm_port->cq_depth_mask;
-	qm_port->gen_bit = (~(idx >> qm_port->gen_bit_shift)) & 0x1;
-}
-
-static inline int
-dlb_process_dequeue_qes(struct dlb_eventdev_port *ev_port,
-			struct dlb_port *qm_port,
-			struct rte_event *events,
-			struct dlb_dequeue_qe *qes,
-			int cnt)
-{
-	uint8_t *qid_mappings = qm_port->qid_mappings;
-	int i, num;
-
-	RTE_SET_USED(ev_port);  /* avoids unused variable error */
-
-	for (i = 0, num = 0; i < cnt; i++) {
-		struct dlb_dequeue_qe *qe = &qes[i];
-		int sched_type_map[4] = {
-			[DLB_SCHED_ATOMIC] = RTE_SCHED_TYPE_ATOMIC,
-			[DLB_SCHED_UNORDERED] = RTE_SCHED_TYPE_PARALLEL,
-			[DLB_SCHED_ORDERED] = RTE_SCHED_TYPE_ORDERED,
-			[DLB_SCHED_DIRECTED] = RTE_SCHED_TYPE_ATOMIC,
-		};
-
-		DLB_LOG_DBG("dequeue success, data = 0x%llx, qid=%d, event_type=%d, subevent=%d\npp_id = %d, sched_type = %d, qid = %d, err=%d\n",
-			    (long long)qe->data, qe->qid,
-			    qe->u.event_type.major,
-			    qe->u.event_type.sub,
-			    qe->pp_id, qe->sched_type, qe->qid, qe->error);
-
-		/* Fill in event information.
-		 * Note that flow_id must be embedded in the data by
-		 * the app, such as the mbuf RSS hash field if the data
-		 * buffer is a mbuf.
-		 */
-		if (unlikely(qe->error)) {
-			DLB_LOG_ERR("QE error bit ON\n");
-			DLB_INC_STAT(ev_port->stats.traffic.rx_drop, 1);
-			dlb_consume_qe_immediate(qm_port, 1);
-			continue; /* Ignore */
-		}
-
-		events[num].u64 = qe->data;
-		events[num].queue_id = qid_mappings[qe->qid];
-		events[num].priority = DLB_TO_EV_PRIO((uint8_t)qe->priority);
-		events[num].event_type = qe->u.event_type.major;
-		events[num].sub_event_type = qe->u.event_type.sub;
-		events[num].sched_type = sched_type_map[qe->sched_type];
-		DLB_INC_STAT(ev_port->stats.rx_sched_cnt[qe->sched_type], 1);
-		num++;
-	}
-	DLB_INC_STAT(ev_port->stats.traffic.rx_ok, num);
-
-	return num;
-}
-
-static inline int
-dlb_process_dequeue_four_qes(struct dlb_eventdev_port *ev_port,
-			     struct dlb_port *qm_port,
-			     struct rte_event *events,
-			     struct dlb_dequeue_qe *qes)
-{
-	int sched_type_map[] = {
-		[DLB_SCHED_ATOMIC] = RTE_SCHED_TYPE_ATOMIC,
-		[DLB_SCHED_UNORDERED] = RTE_SCHED_TYPE_PARALLEL,
-		[DLB_SCHED_ORDERED] = RTE_SCHED_TYPE_ORDERED,
-		[DLB_SCHED_DIRECTED] = RTE_SCHED_TYPE_ATOMIC,
-	};
-	const int num_events = DLB_NUM_QES_PER_CACHE_LINE;
-	uint8_t *qid_mappings = qm_port->qid_mappings;
-	__m128i sse_evt[2];
-	int i;
-
-	/* In the unlikely case that any of the QE error bits are set, process
-	 * them one at a time.
-	 */
-	if (unlikely(qes[0].error || qes[1].error ||
-		     qes[2].error || qes[3].error))
-		return dlb_process_dequeue_qes(ev_port, qm_port, events,
-					       qes, num_events);
-
-	for (i = 0; i < DLB_NUM_QES_PER_CACHE_LINE; i++) {
-		DLB_LOG_DBG("dequeue success, data = 0x%llx, qid=%d, event_type=%d, subevent=%d\npp_id = %d, sched_type = %d, qid = %d, err=%d\n",
-			    (long long)qes[i].data, qes[i].qid,
-			    qes[i].u.event_type.major,
-			    qes[i].u.event_type.sub,
-			    qes[i].pp_id, qes[i].sched_type, qes[i].qid,
-			    qes[i].error);
-	}
-
-	events[0].u64 = qes[0].data;
-	events[1].u64 = qes[1].data;
-	events[2].u64 = qes[2].data;
-	events[3].u64 = qes[3].data;
-
-	/* Construct the metadata portion of two struct rte_events
-	 * in one 128b SSE register. Event metadata is constructed in the SSE
-	 * registers like so:
-	 * sse_evt[0][63:0]:   event[0]'s metadata
-	 * sse_evt[0][127:64]: event[1]'s metadata
-	 * sse_evt[1][63:0]:   event[2]'s metadata
-	 * sse_evt[1][127:64]: event[3]'s metadata
-	 */
-	sse_evt[0] = _mm_setzero_si128();
-	sse_evt[1] = _mm_setzero_si128();
-
-	/* Convert the hardware queue ID to an event queue ID and store it in
-	 * the metadata:
-	 * sse_evt[0][47:40]   = qid_mappings[qes[0].qid]
-	 * sse_evt[0][111:104] = qid_mappings[qes[1].qid]
-	 * sse_evt[1][47:40]   = qid_mappings[qes[2].qid]
-	 * sse_evt[1][111:104] = qid_mappings[qes[3].qid]
-	 */
-#define DLB_EVENT_QUEUE_ID_BYTE 5
-	sse_evt[0] = _mm_insert_epi8(sse_evt[0],
-				     qid_mappings[qes[0].qid],
-				     DLB_EVENT_QUEUE_ID_BYTE);
-	sse_evt[0] = _mm_insert_epi8(sse_evt[0],
-				     qid_mappings[qes[1].qid],
-				     DLB_EVENT_QUEUE_ID_BYTE + 8);
-	sse_evt[1] = _mm_insert_epi8(sse_evt[1],
-				     qid_mappings[qes[2].qid],
-				     DLB_EVENT_QUEUE_ID_BYTE);
-	sse_evt[1] = _mm_insert_epi8(sse_evt[1],
-				     qid_mappings[qes[3].qid],
-				     DLB_EVENT_QUEUE_ID_BYTE + 8);
-
-	/* Convert the hardware priority to an event priority and store it in
-	 * the metadata:
-	 * sse_evt[0][55:48]   = DLB_TO_EV_PRIO(qes[0].priority)
-	 * sse_evt[0][119:112] = DLB_TO_EV_PRIO(qes[1].priority)
-	 * sse_evt[1][55:48]   = DLB_TO_EV_PRIO(qes[2].priority)
-	 * sse_evt[1][119:112] = DLB_TO_EV_PRIO(qes[3].priority)
-	 */
-#define DLB_EVENT_PRIO_BYTE 6
-	sse_evt[0] = _mm_insert_epi8(sse_evt[0],
-				     DLB_TO_EV_PRIO((uint8_t)qes[0].priority),
-				     DLB_EVENT_PRIO_BYTE);
-	sse_evt[0] = _mm_insert_epi8(sse_evt[0],
-				     DLB_TO_EV_PRIO((uint8_t)qes[1].priority),
-				     DLB_EVENT_PRIO_BYTE + 8);
-	sse_evt[1] = _mm_insert_epi8(sse_evt[1],
-				     DLB_TO_EV_PRIO((uint8_t)qes[2].priority),
-				     DLB_EVENT_PRIO_BYTE);
-	sse_evt[1] = _mm_insert_epi8(sse_evt[1],
-				     DLB_TO_EV_PRIO((uint8_t)qes[3].priority),
-				     DLB_EVENT_PRIO_BYTE + 8);
-
-	/* Write the event type and sub event type to the event metadata. Leave
-	 * flow ID unspecified, since the hardware does not maintain it during
-	 * scheduling:
-	 * sse_evt[0][31:0]   = qes[0].u.event_type.major << 28 |
-	 *			qes[0].u.event_type.sub << 20;
-	 * sse_evt[0][95:64]  = qes[1].u.event_type.major << 28 |
-	 *			qes[1].u.event_type.sub << 20;
-	 * sse_evt[1][31:0]   = qes[2].u.event_type.major << 28 |
-	 *			qes[2].u.event_type.sub << 20;
-	 * sse_evt[1][95:64]  = qes[3].u.event_type.major << 28 |
-	 *			qes[3].u.event_type.sub << 20;
-	 */
-#define DLB_EVENT_EV_TYPE_DW 0
-#define DLB_EVENT_EV_TYPE_SHIFT 28
-#define DLB_EVENT_SUB_EV_TYPE_SHIFT 20
-	sse_evt[0] = _mm_insert_epi32(sse_evt[0],
-			qes[0].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
-			qes[0].u.event_type.sub << DLB_EVENT_SUB_EV_TYPE_SHIFT,
-			DLB_EVENT_EV_TYPE_DW);
-	sse_evt[0] = _mm_insert_epi32(sse_evt[0],
-			qes[1].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
-			qes[1].u.event_type.sub <<  DLB_EVENT_SUB_EV_TYPE_SHIFT,
-			DLB_EVENT_EV_TYPE_DW + 2);
-	sse_evt[1] = _mm_insert_epi32(sse_evt[1],
-			qes[2].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
-			qes[2].u.event_type.sub <<  DLB_EVENT_SUB_EV_TYPE_SHIFT,
-			DLB_EVENT_EV_TYPE_DW);
-	sse_evt[1] = _mm_insert_epi32(sse_evt[1],
-			qes[3].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT  |
-			qes[3].u.event_type.sub << DLB_EVENT_SUB_EV_TYPE_SHIFT,
-			DLB_EVENT_EV_TYPE_DW + 2);
-
-	/* Write the sched type to the event metadata. 'op' and 'rsvd' are not
-	 * set:
-	 * sse_evt[0][39:32]  = sched_type_map[qes[0].sched_type] << 6
-	 * sse_evt[0][103:96] = sched_type_map[qes[1].sched_type] << 6
-	 * sse_evt[1][39:32]  = sched_type_map[qes[2].sched_type] << 6
-	 * sse_evt[1][103:96] = sched_type_map[qes[3].sched_type] << 6
-	 */
-#define DLB_EVENT_SCHED_TYPE_BYTE 4
-#define DLB_EVENT_SCHED_TYPE_SHIFT 6
-	sse_evt[0] = _mm_insert_epi8(sse_evt[0],
-		sched_type_map[qes[0].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
-		DLB_EVENT_SCHED_TYPE_BYTE);
-	sse_evt[0] = _mm_insert_epi8(sse_evt[0],
-		sched_type_map[qes[1].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
-		DLB_EVENT_SCHED_TYPE_BYTE + 8);
-	sse_evt[1] = _mm_insert_epi8(sse_evt[1],
-		sched_type_map[qes[2].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
-		DLB_EVENT_SCHED_TYPE_BYTE);
-	sse_evt[1] = _mm_insert_epi8(sse_evt[1],
-		sched_type_map[qes[3].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
-		DLB_EVENT_SCHED_TYPE_BYTE + 8);
-
-	/* Store the metadata to the event (use the double-precision
-	 * _mm_storeh_pd because there is no integer function for storing the
-	 * upper 64b):
-	 * events[0].event = sse_evt[0][63:0]
-	 * events[1].event = sse_evt[0][127:64]
-	 * events[2].event = sse_evt[1][63:0]
-	 * events[3].event = sse_evt[1][127:64]
-	 */
-	_mm_storel_epi64((__m128i *)&events[0].event, sse_evt[0]);
-	_mm_storeh_pd((double *)&events[1].event, (__m128d) sse_evt[0]);
-	_mm_storel_epi64((__m128i *)&events[2].event, sse_evt[1]);
-	_mm_storeh_pd((double *)&events[3].event, (__m128d) sse_evt[1]);
-
-	DLB_INC_STAT(ev_port->stats.rx_sched_cnt[qes[0].sched_type], 1);
-	DLB_INC_STAT(ev_port->stats.rx_sched_cnt[qes[1].sched_type], 1);
-	DLB_INC_STAT(ev_port->stats.rx_sched_cnt[qes[2].sched_type], 1);
-	DLB_INC_STAT(ev_port->stats.rx_sched_cnt[qes[3].sched_type], 1);
-
-	DLB_INC_STAT(ev_port->stats.traffic.rx_ok, num_events);
-
-	return num_events;
-}
-
-static inline int
-dlb_dequeue_wait(struct dlb_eventdev *dlb,
-		 struct dlb_eventdev_port *ev_port,
-		 struct dlb_port *qm_port,
-		 uint64_t timeout,
-		 uint64_t start_ticks)
-{
-	struct process_local_port_data *port_data;
-	uint64_t elapsed_ticks;
-
-	port_data = &dlb_port[qm_port->id][PORT_TYPE(qm_port)];
-
-	elapsed_ticks = rte_get_timer_cycles() - start_ticks;
-
-	/* Wait/poll time expired */
-	if (elapsed_ticks >= timeout) {
-		/* Interrupts not supported by PF PMD */
-		return 1;
-	} else if (dlb->umwait_allowed) {
-		struct rte_power_monitor_cond pmc;
-		volatile struct dlb_dequeue_qe *cq_base;
-		union {
-			uint64_t raw_qe[2];
-			struct dlb_dequeue_qe qe;
-		} qe_mask;
-		uint64_t expected_value;
-		volatile uint64_t *monitor_addr;
-
-		qe_mask.qe.cq_gen = 1; /* set mask */
-
-		cq_base = port_data->cq_base;
-		monitor_addr = (volatile uint64_t *)(volatile void *)
-			&cq_base[qm_port->cq_idx];
-		monitor_addr++; /* cq_gen bit is in second 64bit location */
-
-		if (qm_port->gen_bit)
-			expected_value = qe_mask.raw_qe[1];
-		else
-			expected_value = 0;
-
-		pmc.addr = monitor_addr;
-		pmc.val = expected_value;
-		pmc.mask = qe_mask.raw_qe[1];
-		pmc.size = sizeof(uint64_t);
-
-		rte_power_monitor(&pmc, timeout + start_ticks);
-
-		DLB_INC_STAT(ev_port->stats.traffic.rx_umonitor_umwait, 1);
-	} else {
-		uint64_t poll_interval = RTE_LIBRTE_PMD_DLB_POLL_INTERVAL;
-		uint64_t curr_ticks = rte_get_timer_cycles();
-		uint64_t init_ticks = curr_ticks;
-
-		while ((curr_ticks - start_ticks < timeout) &&
-		       (curr_ticks - init_ticks < poll_interval))
-			curr_ticks = rte_get_timer_cycles();
-	}
-
-	return 0;
-}
-
-static inline int16_t
-dlb_hw_dequeue(struct dlb_eventdev *dlb,
-	       struct dlb_eventdev_port *ev_port,
-	       struct rte_event *events,
-	       uint16_t max_num,
-	       uint64_t dequeue_timeout_ticks)
-{
-	uint64_t timeout;
-	uint64_t start_ticks = 0ULL;
-	struct dlb_port *qm_port;
-	int num = 0;
-
-	qm_port = &ev_port->qm_port;
-
-	/* If configured for per dequeue wait, then use wait value provided
-	 * to this API. Otherwise we must use the global
-	 * value from eventdev config time.
-	 */
-	if (!dlb->global_dequeue_wait)
-		timeout = dequeue_timeout_ticks;
-	else
-		timeout = dlb->global_dequeue_wait_ticks;
-
-	if (timeout)
-		start_ticks = rte_get_timer_cycles();
-
-	while (num < max_num) {
-		struct dlb_dequeue_qe qes[DLB_NUM_QES_PER_CACHE_LINE];
-		uint8_t offset;
-		int num_avail;
-
-		/* Copy up to 4 QEs from the current cache line into qes */
-		num_avail = dlb_recv_qe(qm_port, qes, &offset);
-
-		/* But don't process more than the user requested */
-		num_avail = RTE_MIN(num_avail, max_num - num);
-
-		dlb_inc_cq_idx(qm_port, num_avail);
-
-		if (num_avail == DLB_NUM_QES_PER_CACHE_LINE)
-			num += dlb_process_dequeue_four_qes(ev_port,
-							     qm_port,
-							     &events[num],
-							     &qes[offset]);
-		else if (num_avail)
-			num += dlb_process_dequeue_qes(ev_port,
-							qm_port,
-							&events[num],
-							&qes[offset],
-							num_avail);
-		else if ((timeout == 0) || (num > 0))
-			/* Not waiting in any form, or 1+ events received? */
-			break;
-		else if (dlb_dequeue_wait(dlb, ev_port, qm_port,
-					  timeout, start_ticks))
-			break;
-	}
-
-	qm_port->owed_tokens += num;
-
-	if (num && qm_port->token_pop_mode == AUTO_POP)
-		dlb_consume_qe_immediate(qm_port, num);
-
-	ev_port->outstanding_releases += num;
-
-	return num;
-}
-
-static __rte_always_inline int
-dlb_recv_qe_sparse(struct dlb_port *qm_port, struct dlb_dequeue_qe *qe)
-{
-	volatile struct dlb_dequeue_qe *cq_addr;
-	uint8_t xor_mask[2] = {0x0F, 0x00};
-	const uint8_t and_mask = 0x0F;
-	__m128i *qes = (__m128i *)qe;
-	uint8_t gen_bits, gen_bit;
-	uintptr_t addr[4];
-	uint16_t idx;
-
-	cq_addr = dlb_port[qm_port->id][PORT_TYPE(qm_port)].cq_base;
-
-	idx = qm_port->cq_idx;
-
-	/* Load the next 4 QEs */
-	addr[0] = (uintptr_t)&cq_addr[idx];
-	addr[1] = (uintptr_t)&cq_addr[(idx +  4) & qm_port->cq_depth_mask];
-	addr[2] = (uintptr_t)&cq_addr[(idx +  8) & qm_port->cq_depth_mask];
-	addr[3] = (uintptr_t)&cq_addr[(idx + 12) & qm_port->cq_depth_mask];
-
-	/* Prefetch next batch of QEs (all CQs occupy minimum 8 cache lines) */
-	rte_prefetch0(&cq_addr[(idx + 16) & qm_port->cq_depth_mask]);
-	rte_prefetch0(&cq_addr[(idx + 20) & qm_port->cq_depth_mask]);
-	rte_prefetch0(&cq_addr[(idx + 24) & qm_port->cq_depth_mask]);
-	rte_prefetch0(&cq_addr[(idx + 28) & qm_port->cq_depth_mask]);
-
-	/* Correct the xor_mask for wrap-around QEs */
-	gen_bit = qm_port->gen_bit;
-	xor_mask[gen_bit] ^= !!((idx +  4) > qm_port->cq_depth_mask) << 1;
-	xor_mask[gen_bit] ^= !!((idx +  8) > qm_port->cq_depth_mask) << 2;
-	xor_mask[gen_bit] ^= !!((idx + 12) > qm_port->cq_depth_mask) << 3;
-
-	/* Read the cache lines backwards to ensure that if QE[N] (N > 0) is
-	 * valid, then QEs[0:N-1] are too.
-	 */
-	qes[3] = _mm_load_si128((__m128i *)(void *)addr[3]);
-	rte_compiler_barrier();
-	qes[2] = _mm_load_si128((__m128i *)(void *)addr[2]);
-	rte_compiler_barrier();
-	qes[1] = _mm_load_si128((__m128i *)(void *)addr[1]);
-	rte_compiler_barrier();
-	qes[0] = _mm_load_si128((__m128i *)(void *)addr[0]);
-
-	/* Extract and combine the gen bits */
-	gen_bits = ((_mm_extract_epi8(qes[0], 15) & 0x1) << 0) |
-		   ((_mm_extract_epi8(qes[1], 15) & 0x1) << 1) |
-		   ((_mm_extract_epi8(qes[2], 15) & 0x1) << 2) |
-		   ((_mm_extract_epi8(qes[3], 15) & 0x1) << 3);
-
-	/* XOR the combined bits such that a 1 represents a valid QE */
-	gen_bits ^= xor_mask[gen_bit];
-
-	/* Mask off gen bits we don't care about */
-	gen_bits &= and_mask;
-
-	return __builtin_popcount(gen_bits);
-}
-
-static inline int16_t
-dlb_hw_dequeue_sparse(struct dlb_eventdev *dlb,
-		      struct dlb_eventdev_port *ev_port,
-		      struct rte_event *events,
-		      uint16_t max_num,
-		      uint64_t dequeue_timeout_ticks)
-{
-	uint64_t timeout;
-	uint64_t start_ticks = 0ULL;
-	struct dlb_port *qm_port;
-	int num = 0;
-
-	qm_port = &ev_port->qm_port;
-
-	/* If configured for per dequeue wait, then use wait value provided
-	 * to this API. Otherwise we must use the global
-	 * value from eventdev config time.
-	 */
-	if (!dlb->global_dequeue_wait)
-		timeout = dequeue_timeout_ticks;
-	else
-		timeout = dlb->global_dequeue_wait_ticks;
-
-	if (timeout)
-		start_ticks = rte_get_timer_cycles();
-
-	while (num < max_num) {
-		struct dlb_dequeue_qe qes[DLB_NUM_QES_PER_CACHE_LINE];
-		int num_avail;
-
-		/* Copy up to 4 QEs from the current cache line into qes */
-		num_avail = dlb_recv_qe_sparse(qm_port, qes);
-
-		/* But don't process more than the user requested */
-		num_avail = RTE_MIN(num_avail, max_num - num);
-
-		dlb_inc_cq_idx(qm_port, num_avail << 2);
-
-		if (num_avail == DLB_NUM_QES_PER_CACHE_LINE)
-			num += dlb_process_dequeue_four_qes(ev_port,
-							     qm_port,
-							     &events[num],
-							     &qes[0]);
-		else if (num_avail)
-			num += dlb_process_dequeue_qes(ev_port,
-							qm_port,
-							&events[num],
-							&qes[0],
-							num_avail);
-		else if ((timeout == 0) || (num > 0))
-			/* Not waiting in any form, or 1+ events received? */
-			break;
-		else if (dlb_dequeue_wait(dlb, ev_port, qm_port,
-					  timeout, start_ticks))
-			break;
-	}
-
-	qm_port->owed_tokens += num;
-
-	if (num && qm_port->token_pop_mode == AUTO_POP)
-		dlb_consume_qe_immediate(qm_port, num);
-
-	ev_port->outstanding_releases += num;
-
-	return num;
-}
-
-static int
-dlb_event_release(struct dlb_eventdev *dlb, uint8_t port_id, int n)
-{
-	struct process_local_port_data *port_data;
-	struct dlb_eventdev_port *ev_port;
-	struct dlb_port *qm_port;
-	int i;
-
-	if (port_id > dlb->num_ports) {
-		DLB_LOG_ERR("Invalid port id %d in dlb-event_release\n",
-			    port_id);
-		rte_errno = -EINVAL;
-		return rte_errno;
-	}
-
-	ev_port = &dlb->ev_ports[port_id];
-	qm_port = &ev_port->qm_port;
-	port_data = &dlb_port[qm_port->id][PORT_TYPE(qm_port)];
-
-	i = 0;
-
-	if (qm_port->is_directed) {
-		i = n;
-		goto sw_credit_update;
-	}
-
-	while (i < n) {
-		int pop_offs = 0;
-		int j = 0;
-
-		/* Zero-out QEs */
-		qm_port->qe4[0].cmd_byte = 0;
-		qm_port->qe4[1].cmd_byte = 0;
-		qm_port->qe4[2].cmd_byte = 0;
-		qm_port->qe4[3].cmd_byte = 0;
-
-		for (; j < DLB_NUM_QES_PER_CACHE_LINE && (i + j) < n; j++) {
-			int16_t thresh = qm_port->token_pop_thresh;
-
-			if (qm_port->token_pop_mode == DELAYED_POP &&
-			    qm_port->issued_releases >= thresh - 1) {
-				/* Insert the token pop QE */
-				dlb_construct_token_pop_qe(qm_port, j);
-
-				/* Reset the releases for the next QE batch */
-				qm_port->issued_releases -= thresh;
-
-				/* When using delayed token pop mode, the
-				 * initial token threshold is the full CQ
-				 * depth. After the first token pop, we need to
-				 * reset it to the dequeue_depth.
-				 */
-				qm_port->token_pop_thresh =
-					qm_port->dequeue_depth;
-
-				pop_offs = 1;
-				j++;
-				break;
-			}
-
-			qm_port->qe4[j].cmd_byte = DLB_COMP_CMD_BYTE;
-			qm_port->issued_releases++;
-		}
-
-		dlb_hw_do_enqueue(qm_port, i == 0, port_data);
-
-		/* Don't include the token pop QE in the release count */
-		i += j - pop_offs;
-	}
-
-sw_credit_update:
-	/* each release returns one credit */
-	if (!ev_port->outstanding_releases) {
-		DLB_LOG_ERR("Unrecoverable application error. Outstanding releases underflowed.\n");
-		rte_errno = -ENOTRECOVERABLE;
-		return rte_errno;
-	}
-
-	ev_port->outstanding_releases -= i;
-	ev_port->inflight_credits += i;
-
-	/* Replenish s/w credits if enough releases are performed */
-	dlb_replenish_sw_credits(dlb, ev_port);
-	return 0;
-}
-
-static uint16_t
-dlb_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
-			uint64_t wait)
-{
-	struct dlb_eventdev_port *ev_port = event_port;
-	struct dlb_port *qm_port = &ev_port->qm_port;
-	struct dlb_eventdev *dlb = ev_port->dlb;
-	uint16_t cnt;
-	int ret;
-
-	rte_errno = 0;
-
-	RTE_ASSERT(ev_port->setup_done);
-	RTE_ASSERT(ev != NULL);
-
-	if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
-		uint16_t out_rels = ev_port->outstanding_releases;
-
-		ret = dlb_event_release(dlb, ev_port->id, out_rels);
-		if (ret)
-			return(ret);
-
-		DLB_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
-	}
-
-	if (qm_port->token_pop_mode == DEFERRED_POP &&
-			qm_port->owed_tokens)
-		dlb_consume_qe_immediate(qm_port, qm_port->owed_tokens);
-
-	cnt = dlb_hw_dequeue(dlb, ev_port, ev, num, wait);
-
-	DLB_INC_STAT(ev_port->stats.traffic.total_polls, 1);
-	DLB_INC_STAT(ev_port->stats.traffic.zero_polls, ((cnt == 0) ? 1 : 0));
-	return cnt;
-}
-
-static uint16_t
-dlb_event_dequeue(void *event_port, struct rte_event *ev, uint64_t wait)
-{
-	return dlb_event_dequeue_burst(event_port, ev, 1, wait);
-}
-
-static uint16_t
-dlb_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
-			       uint16_t num, uint64_t wait)
-{
-	struct dlb_eventdev_port *ev_port = event_port;
-	struct dlb_port *qm_port = &ev_port->qm_port;
-	struct dlb_eventdev *dlb = ev_port->dlb;
-	uint16_t cnt;
-	int ret;
-
-	rte_errno = 0;
-
-	RTE_ASSERT(ev_port->setup_done);
-	RTE_ASSERT(ev != NULL);
-
-	if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
-		uint16_t out_rels = ev_port->outstanding_releases;
-
-		ret = dlb_event_release(dlb, ev_port->id, out_rels);
-		if (ret)
-			return(ret);
-
-		DLB_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
-	}
-
-	if (qm_port->token_pop_mode == DEFERRED_POP &&
-	    qm_port->owed_tokens)
-		dlb_consume_qe_immediate(qm_port, qm_port->owed_tokens);
-
-	cnt = dlb_hw_dequeue_sparse(dlb, ev_port, ev, num, wait);
-
-	DLB_INC_STAT(ev_port->stats.traffic.total_polls, 1);
-	DLB_INC_STAT(ev_port->stats.traffic.zero_polls, ((cnt == 0) ? 1 : 0));
-	return cnt;
-}
-
-static uint16_t
-dlb_event_dequeue_sparse(void *event_port, struct rte_event *ev, uint64_t wait)
-{
-	return dlb_event_dequeue_burst_sparse(event_port, ev, 1, wait);
-}
-
-static uint32_t
-dlb_get_ldb_queue_depth(struct dlb_eventdev *dlb,
-			struct dlb_eventdev_queue *queue)
-{
-	struct dlb_hw_dev *handle = &dlb->qm_instance;
-	struct dlb_get_ldb_queue_depth_args cfg;
-	struct dlb_cmd_response response = {0};
-	int ret;
-
-	cfg.queue_id = queue->qm_queue.id;
-	cfg.response = (uintptr_t)&response;
-
-	ret = dlb_iface_get_ldb_queue_depth(handle, &cfg);
-	if (ret < 0) {
-		DLB_LOG_ERR("dlb: get_ldb_queue_depth ret=%d (driver status: %s)\n",
-			    ret, dlb_error_strings[response.status]);
-		return ret;
-	}
-
-	return response.id;
-}
-
-static uint32_t
-dlb_get_dir_queue_depth(struct dlb_eventdev *dlb,
-			struct dlb_eventdev_queue *queue)
-{
-	struct dlb_hw_dev *handle = &dlb->qm_instance;
-	struct dlb_get_dir_queue_depth_args cfg;
-	struct dlb_cmd_response response = {0};
-	int ret;
-
-	cfg.queue_id = queue->qm_queue.id;
-	cfg.response = (uintptr_t)&response;
-
-	ret = dlb_iface_get_dir_queue_depth(handle, &cfg);
-	if (ret < 0) {
-		DLB_LOG_ERR("dlb: get_dir_queue_depth ret=%d (driver status: %s)\n",
-			    ret, dlb_error_strings[response.status]);
-		return ret;
-	}
-
-	return response.id;
-}
-
-uint32_t
-dlb_get_queue_depth(struct dlb_eventdev *dlb,
-		    struct dlb_eventdev_queue *queue)
-{
-	if (queue->qm_queue.is_directed)
-		return dlb_get_dir_queue_depth(dlb, queue);
-	else
-		return dlb_get_ldb_queue_depth(dlb, queue);
-}
-
-static bool
-dlb_queue_is_empty(struct dlb_eventdev *dlb,
-		   struct dlb_eventdev_queue *queue)
-{
-	return dlb_get_queue_depth(dlb, queue) == 0;
-}
-
-static bool
-dlb_linked_queues_empty(struct dlb_eventdev *dlb)
-{
-	int i;
-
-	for (i = 0; i < dlb->num_queues; i++) {
-		if (dlb->ev_queues[i].num_links == 0)
-			continue;
-		if (!dlb_queue_is_empty(dlb, &dlb->ev_queues[i]))
-			return false;
-	}
-
-	return true;
-}
-
-static bool
-dlb_queues_empty(struct dlb_eventdev *dlb)
-{
-	int i;
-
-	for (i = 0; i < dlb->num_queues; i++) {
-		if (!dlb_queue_is_empty(dlb, &dlb->ev_queues[i]))
-			return false;
-	}
-
-	return true;
-}
-
-static void
-dlb_flush_port(struct rte_eventdev *dev, int port_id)
-{
-	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
-	eventdev_stop_flush_t flush;
-	struct rte_event ev;
-	uint8_t dev_id;
-	void *arg;
-	int i;
-
-	flush = dev->dev_ops->dev_stop_flush;
-	dev_id = dev->data->dev_id;
-	arg = dev->data->dev_stop_flush_arg;
-
-	while (rte_event_dequeue_burst(dev_id, port_id, &ev, 1, 0)) {
-		if (flush)
-			flush(dev_id, ev, arg);
-
-		if (dlb->ev_ports[port_id].qm_port.is_directed)
-			continue;
-
-		ev.op = RTE_EVENT_OP_RELEASE;
-
-		rte_event_enqueue_burst(dev_id, port_id, &ev, 1);
-	}
-
-	/* Enqueue any additional outstanding releases */
-	ev.op = RTE_EVENT_OP_RELEASE;
-
-	for (i = dlb->ev_ports[port_id].outstanding_releases; i > 0; i--)
-		rte_event_enqueue_burst(dev_id, port_id, &ev, 1);
-}
-
-static void
-dlb_drain(struct rte_eventdev *dev)
-{
-	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
-	struct dlb_eventdev_port *ev_port = NULL;
-	uint8_t dev_id;
-	int i;
-
-	dev_id = dev->data->dev_id;
-
-	while (!dlb_linked_queues_empty(dlb)) {
-		/* Flush all the ev_ports, which will drain all their connected
-		 * queues.
-		 */
-		for (i = 0; i < dlb->num_ports; i++)
-			dlb_flush_port(dev, i);
-	}
-
-	/* The queues are empty, but there may be events left in the ports. */
-	for (i = 0; i < dlb->num_ports; i++)
-		dlb_flush_port(dev, i);
-
-	/* If the domain's queues are empty, we're done. */
-	if (dlb_queues_empty(dlb))
-		return;
-
-	/* Else, there must be at least one unlinked load-balanced queue.
-	 * Select a load-balanced port with which to drain the unlinked
-	 * queue(s).
-	 */
-	for (i = 0; i < dlb->num_ports; i++) {
-		ev_port = &dlb->ev_ports[i];
-
-		if (!ev_port->qm_port.is_directed)
-			break;
-	}
-
-	if (i == dlb->num_ports) {
-		DLB_LOG_ERR("internal error: no LDB ev_ports\n");
-		return;
-	}
-
-	rte_errno = 0;
-	rte_event_port_unlink(dev_id, ev_port->id, NULL, 0);
-
-	if (rte_errno) {
-		DLB_LOG_ERR("internal error: failed to unlink ev_port %d\n",
-			    ev_port->id);
-		return;
-	}
-
-	for (i = 0; i < dlb->num_queues; i++) {
-		uint8_t qid, prio;
-		int ret;
-
-		if (dlb_queue_is_empty(dlb, &dlb->ev_queues[i]))
-			continue;
-
-		qid = i;
-		prio = 0;
-
-		/* Link the ev_port to the queue */
-		ret = rte_event_port_link(dev_id, ev_port->id, &qid, &prio, 1);
-		if (ret != 1) {
-			DLB_LOG_ERR("internal error: failed to link ev_port %d to queue %d\n",
-				    ev_port->id, qid);
-			return;
-		}
-
-		/* Flush the queue */
-		while (!dlb_queue_is_empty(dlb, &dlb->ev_queues[i]))
-			dlb_flush_port(dev, ev_port->id);
-
-		/* Drain any extant events in the ev_port. */
-		dlb_flush_port(dev, ev_port->id);
-
-		/* Unlink the ev_port from the queue */
-		ret = rte_event_port_unlink(dev_id, ev_port->id, &qid, 1);
-		if (ret != 1) {
-			DLB_LOG_ERR("internal error: failed to unlink ev_port %d to queue %d\n",
-				    ev_port->id, qid);
-			return;
-		}
-	}
-}
-
-static void
-dlb_eventdev_stop(struct rte_eventdev *dev)
-{
-	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
-
-	rte_spinlock_lock(&dlb->qm_instance.resource_lock);
-
-	if (dlb->run_state == DLB_RUN_STATE_STOPPED) {
-		DLB_LOG_DBG("Internal error: already stopped\n");
-		rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
-		return;
-	} else if (dlb->run_state != DLB_RUN_STATE_STARTED) {
-		DLB_LOG_ERR("Internal error: bad state %d for dev_stop\n",
-			    (int)dlb->run_state);
-		rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
-		return;
-	}
-
-	dlb->run_state = DLB_RUN_STATE_STOPPING;
-
-	rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
-
-	dlb_drain(dev);
-
-	dlb->run_state = DLB_RUN_STATE_STOPPED;
-}
-
-static int
-dlb_eventdev_close(struct rte_eventdev *dev)
-{
-	dlb_hw_reset_sched_domain(dev, false);
-
-	return 0;
-}
-
-static void
-dlb_eventdev_port_release(void *port)
-{
-	struct dlb_eventdev_port *ev_port = port;
-
-	if (ev_port) {
-		struct dlb_port *qm_port = &ev_port->qm_port;
-
-		if (qm_port->config_state == DLB_CONFIGURED)
-			dlb_free_qe_mem(qm_port);
-	}
-}
-
-static void
-dlb_eventdev_queue_release(struct rte_eventdev *dev, uint8_t id)
-{
-	RTE_SET_USED(dev);
-	RTE_SET_USED(id);
-
-	/* This function intentionally left blank. */
-}
-
-static int
-dlb_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
-			   uint64_t *timeout_ticks)
-{
-	RTE_SET_USED(dev);
-	uint64_t cycles_per_ns = rte_get_timer_hz() / 1E9;
-
-	*timeout_ticks = ns * cycles_per_ns;
-
-	return 0;
-}
-
-void
-dlb_entry_points_init(struct rte_eventdev *dev)
-{
-	struct dlb_eventdev *dlb;
-
-	static struct rte_eventdev_ops dlb_eventdev_entry_ops = {
-		.dev_infos_get    = dlb_eventdev_info_get,
-		.dev_configure    = dlb_eventdev_configure,
-		.dev_start        = dlb_eventdev_start,
-		.dev_stop         = dlb_eventdev_stop,
-		.dev_close        = dlb_eventdev_close,
-		.queue_def_conf   = dlb_eventdev_queue_default_conf_get,
-		.port_def_conf    = dlb_eventdev_port_default_conf_get,
-		.queue_setup      = dlb_eventdev_queue_setup,
-		.queue_release    = dlb_eventdev_queue_release,
-		.port_setup       = dlb_eventdev_port_setup,
-		.port_release     = dlb_eventdev_port_release,
-		.port_link        = dlb_eventdev_port_link,
-		.port_unlink      = dlb_eventdev_port_unlink,
-		.port_unlinks_in_progress =
-				    dlb_eventdev_port_unlinks_in_progress,
-		.timeout_ticks    = dlb_eventdev_timeout_ticks,
-		.dump             = dlb_eventdev_dump,
-		.xstats_get       = dlb_eventdev_xstats_get,
-		.xstats_get_names = dlb_eventdev_xstats_get_names,
-		.xstats_get_by_name = dlb_eventdev_xstats_get_by_name,
-		.xstats_reset	    = dlb_eventdev_xstats_reset,
-		.dev_selftest     = test_dlb_eventdev,
-	};
-
-	/* Expose PMD's eventdev interface */
-	dev->dev_ops = &dlb_eventdev_entry_ops;
-
-	dev->enqueue = dlb_event_enqueue;
-	dev->enqueue_burst = dlb_event_enqueue_burst;
-	dev->enqueue_new_burst = dlb_event_enqueue_new_burst;
-	dev->enqueue_forward_burst = dlb_event_enqueue_forward_burst;
-	dev->dequeue = dlb_event_dequeue;
-	dev->dequeue_burst = dlb_event_dequeue_burst;
-
-	dlb = dev->data->dev_private;
-
-	if (dlb->poll_mode == DLB_CQ_POLL_MODE_SPARSE) {
-		dev->dequeue = dlb_event_dequeue_sparse;
-		dev->dequeue_burst = dlb_event_dequeue_burst_sparse;
-	}
-}
-
-int
-dlb_primary_eventdev_probe(struct rte_eventdev *dev,
-			   const char *name,
-			   struct dlb_devargs *dlb_args)
-{
-	struct dlb_eventdev *dlb;
-	int err, i;
-
-	dlb = dev->data->dev_private;
-
-	dlb->event_dev = dev; /* backlink */
-
-	evdev_dlb_default_info.driver_name = name;
-
-	dlb->max_num_events_override = dlb_args->max_num_events;
-	dlb->num_dir_credits_override = dlb_args->num_dir_credits_override;
-	dlb->defer_sched = dlb_args->defer_sched;
-	dlb->num_atm_inflights_per_queue = dlb_args->num_atm_inflights;
-
-	/* Open the interface.
-	 * For vdev mode, this means open the dlb kernel module.
-	 */
-	err = dlb_iface_open(&dlb->qm_instance, name);
-	if (err < 0) {
-		DLB_LOG_ERR("could not open event hardware device, err=%d\n",
-			    err);
-		return err;
-	}
-
-	err = dlb_iface_get_device_version(&dlb->qm_instance, &dlb->revision);
-	if (err < 0) {
-		DLB_LOG_ERR("dlb: failed to get the device version, err=%d\n",
-			    err);
-		return err;
-	}
-
-	err = dlb_hw_query_resources(dlb);
-	if (err) {
-		DLB_LOG_ERR("get resources err=%d for %s\n", err, name);
-		return err;
-	}
-
-	err = dlb_iface_get_cq_poll_mode(&dlb->qm_instance, &dlb->poll_mode);
-	if (err < 0) {
-		DLB_LOG_ERR("dlb: failed to get the poll mode, err=%d\n", err);
-		return err;
-	}
-
-	/* Complete xtstats runtime initialization */
-	err = dlb_xstats_init(dlb);
-	if (err) {
-		DLB_LOG_ERR("dlb: failed to init xstats, err=%d\n", err);
-		return err;
-	}
-
-	/* Initialize each port's token pop mode */
-	for (i = 0; i < DLB_MAX_NUM_PORTS; i++)
-		dlb->ev_ports[i].qm_port.token_pop_mode = AUTO_POP;
-
-	rte_spinlock_init(&dlb->qm_instance.resource_lock);
-
-	dlb_iface_low_level_io_init(dlb);
-
-	dlb_entry_points_init(dev);
-
-	return 0;
-}
-
-int
-dlb_secondary_eventdev_probe(struct rte_eventdev *dev,
-			     const char *name)
-{
-	struct dlb_eventdev *dlb;
-	int err;
-
-	dlb = dev->data->dev_private;
-
-	evdev_dlb_default_info.driver_name = name;
-
-	err = dlb_iface_open(&dlb->qm_instance, name);
-	if (err < 0) {
-		DLB_LOG_ERR("could not open event hardware device, err=%d\n",
-			    err);
-		return err;
-	}
-
-	err = dlb_hw_query_resources(dlb);
-	if (err) {
-		DLB_LOG_ERR("get resources err=%d for %s\n", err, name);
-		return err;
-	}
-
-	dlb_iface_low_level_io_init(dlb);
-
-	dlb_entry_points_init(dev);
-
-	return 0;
-}
-
-int
-dlb_parse_params(const char *params,
-		 const char *name,
-		 struct dlb_devargs *dlb_args)
-{
-	int ret = 0;
-	static const char * const args[] = { NUMA_NODE_ARG,
-					     DLB_MAX_NUM_EVENTS,
-					     DLB_NUM_DIR_CREDITS,
-					     DEV_ID_ARG,
-					     DLB_DEFER_SCHED_ARG,
-					     DLB_NUM_ATM_INFLIGHTS_ARG,
-					     NULL };
-
-	if (params && params[0] != '\0') {
-		struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
-
-		if (kvlist == NULL) {
-			DLB_LOG_INFO("Ignoring unsupported parameters when creating device '%s'\n",
-				     name);
-		} else {
-			int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
-						     set_numa_node,
-						     &dlb_args->socket_id);
-			if (ret != 0) {
-				DLB_LOG_ERR("%s: Error parsing numa node parameter",
-					    name);
-				rte_kvargs_free(kvlist);
-				return ret;
-			}
-
-			ret = rte_kvargs_process(kvlist, DLB_MAX_NUM_EVENTS,
-						 set_max_num_events,
-						 &dlb_args->max_num_events);
-			if (ret != 0) {
-				DLB_LOG_ERR("%s: Error parsing max_num_events parameter",
-					    name);
-				rte_kvargs_free(kvlist);
-				return ret;
-			}
-
-			ret = rte_kvargs_process(kvlist,
-					DLB_NUM_DIR_CREDITS,
-					set_num_dir_credits,
-					&dlb_args->num_dir_credits_override);
-			if (ret != 0) {
-				DLB_LOG_ERR("%s: Error parsing num_dir_credits parameter",
-					    name);
-				rte_kvargs_free(kvlist);
-				return ret;
-			}
-
-			ret = rte_kvargs_process(kvlist, DEV_ID_ARG,
-						 set_dev_id,
-						 &dlb_args->dev_id);
-			if (ret != 0) {
-				DLB_LOG_ERR("%s: Error parsing dev_id parameter",
-					    name);
-				rte_kvargs_free(kvlist);
-				return ret;
-			}
-
-			ret = rte_kvargs_process(kvlist, DLB_DEFER_SCHED_ARG,
-						 set_defer_sched,
-						 &dlb_args->defer_sched);
-			if (ret != 0) {
-				DLB_LOG_ERR("%s: Error parsing defer_sched parameter",
-					    name);
-				rte_kvargs_free(kvlist);
-				return ret;
-			}
-
-			ret = rte_kvargs_process(kvlist,
-						 DLB_NUM_ATM_INFLIGHTS_ARG,
-						 set_num_atm_inflights,
-						 &dlb_args->num_atm_inflights);
-			if (ret != 0) {
-				DLB_LOG_ERR("%s: Error parsing atm_inflights parameter",
-					    name);
-				rte_kvargs_free(kvlist);
-				return ret;
-			}
-
-			rte_kvargs_free(kvlist);
-		}
-	}
-	return ret;
-}
-RTE_LOG_REGISTER(eventdev_dlb_log_level, pmd.event.dlb, NOTICE);
diff --git a/drivers/event/dlb/dlb_iface.c b/drivers/event/dlb/dlb_iface.c
deleted file mode 100644
index 44f958f5d..000000000
--- a/drivers/event/dlb/dlb_iface.c
+++ /dev/null
@@ -1,79 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#include <stdint.h>
-
-#include "dlb_priv.h"
-
-/* DLB PMD Internal interface function pointers.
- * If VDEV (bifurcated PMD),  these will resolve to functions that issue ioctls
- * serviced by DLB kernel module.
- * If PCI (PF PMD),  these will be implemented locally in user mode.
- */
-
-void (*dlb_iface_low_level_io_init)(struct dlb_eventdev *dlb);
-
-int (*dlb_iface_open)(struct dlb_hw_dev *handle, const char *name);
-
-void (*dlb_iface_domain_close)(struct dlb_eventdev *dlb);
-
-int (*dlb_iface_get_device_version)(struct dlb_hw_dev *handle,
-				    uint8_t *revision);
-
-int (*dlb_iface_get_num_resources)(struct dlb_hw_dev *handle,
-				   struct dlb_get_num_resources_args *rsrcs);
-
-int (*dlb_iface_sched_domain_create)(struct dlb_hw_dev *handle,
-				     struct dlb_create_sched_domain_args *args);
-
-int (*dlb_iface_ldb_credit_pool_create)(struct dlb_hw_dev *handle,
-					struct dlb_create_ldb_pool_args *cfg);
-
-int (*dlb_iface_dir_credit_pool_create)(struct dlb_hw_dev *handle,
-					struct dlb_create_dir_pool_args *cfg);
-
-int (*dlb_iface_dir_queue_create)(struct dlb_hw_dev *handle,
-				  struct dlb_create_dir_queue_args *cfg);
-
-int (*dlb_iface_ldb_queue_create)(struct dlb_hw_dev *handle,
-				  struct dlb_create_ldb_queue_args *cfg);
-
-int (*dlb_iface_ldb_port_create)(struct dlb_hw_dev *handle,
-				 struct dlb_create_ldb_port_args *cfg,
-				 enum dlb_cq_poll_modes poll_mode);
-
-int (*dlb_iface_dir_port_create)(struct dlb_hw_dev *handle,
-				 struct dlb_create_dir_port_args *cfg,
-				 enum dlb_cq_poll_modes poll_mode);
-
-int (*dlb_iface_map_qid)(struct dlb_hw_dev *handle,
-			 struct dlb_map_qid_args *cfg);
-
-int (*dlb_iface_unmap_qid)(struct dlb_hw_dev *handle,
-			   struct dlb_unmap_qid_args *cfg);
-
-int (*dlb_iface_sched_domain_start)(struct dlb_hw_dev *handle,
-				    struct dlb_start_domain_args *cfg);
-
-int (*dlb_iface_pending_port_unmaps)(struct dlb_hw_dev *handle,
-				     struct dlb_pending_port_unmaps_args *args);
-
-int (*dlb_iface_get_cq_poll_mode)(struct dlb_hw_dev *handle,
-				  enum dlb_cq_poll_modes *mode);
-
-int (*dlb_iface_get_sn_allocation)(struct dlb_hw_dev *handle,
-				   struct dlb_get_sn_allocation_args *args);
-
-int (*dlb_iface_set_sn_allocation)(struct dlb_hw_dev *handle,
-				   struct dlb_set_sn_allocation_args *args);
-
-int (*dlb_iface_get_sn_occupancy)(struct dlb_hw_dev *handle,
-				  struct dlb_get_sn_occupancy_args *args);
-
-int (*dlb_iface_get_ldb_queue_depth)(struct dlb_hw_dev *handle,
-				     struct dlb_get_ldb_queue_depth_args *args);
-
-int (*dlb_iface_get_dir_queue_depth)(struct dlb_hw_dev *handle,
-				     struct dlb_get_dir_queue_depth_args *args);
-
diff --git a/drivers/event/dlb/dlb_iface.h b/drivers/event/dlb/dlb_iface.h
deleted file mode 100644
index 9f61135ce..000000000
--- a/drivers/event/dlb/dlb_iface.h
+++ /dev/null
@@ -1,82 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#ifndef _DLB_IFACE_H
-#define _DLB_IFACE_H
-
-/* DLB PMD Internal interface function pointers.
- * If VDEV (bifurcated PMD), these will resolve to functions that issue ioctls
- * serviced by DLB kernel module.
- * If PCI (PF PMD), these will be implemented locally in user mode.
- */
-
-extern void (*dlb_iface_low_level_io_init)(struct dlb_eventdev *dlb);
-
-extern int (*dlb_iface_open)(struct dlb_hw_dev *handle, const char *name);
-
-extern void (*dlb_iface_domain_close)(struct dlb_eventdev *dlb);
-
-extern int (*dlb_iface_get_device_version)(struct dlb_hw_dev *handle,
-					   uint8_t *revision);
-
-extern int (*dlb_iface_get_num_resources)(struct dlb_hw_dev *handle,
-				   struct dlb_get_num_resources_args *rsrcs);
-
-extern int (*dlb_iface_sched_domain_create)(struct dlb_hw_dev *handle,
-				     struct dlb_create_sched_domain_args *args);
-
-extern int (*dlb_iface_ldb_credit_pool_create)(struct dlb_hw_dev *handle,
-					struct dlb_create_ldb_pool_args *cfg);
-
-extern int (*dlb_iface_dir_credit_pool_create)(struct dlb_hw_dev *handle,
-					struct dlb_create_dir_pool_args *cfg);
-
-extern int (*dlb_iface_ldb_queue_create)(struct dlb_hw_dev *handle,
-				  struct dlb_create_ldb_queue_args *cfg);
-
-extern int (*dlb_iface_dir_queue_create)(struct dlb_hw_dev *handle,
-				  struct dlb_create_dir_queue_args *cfg);
-
-extern int (*dlb_iface_ldb_port_create)(struct dlb_hw_dev *handle,
-					struct dlb_create_ldb_port_args *cfg,
-					enum dlb_cq_poll_modes poll_mode);
-
-extern int (*dlb_iface_dir_port_create)(struct dlb_hw_dev *handle,
-					struct dlb_create_dir_port_args *cfg,
-					enum dlb_cq_poll_modes poll_mode);
-
-extern int (*dlb_iface_ldb_queue_create)(struct dlb_hw_dev *handle,
-				  struct dlb_create_ldb_queue_args *cfg);
-
-extern int (*dlb_iface_map_qid)(struct dlb_hw_dev *handle,
-			 struct dlb_map_qid_args *cfg);
-
-extern int (*dlb_iface_unmap_qid)(struct dlb_hw_dev *handle,
-				  struct dlb_unmap_qid_args *cfg);
-
-extern int (*dlb_iface_sched_domain_start)(struct dlb_hw_dev *handle,
-				    struct dlb_start_domain_args *cfg);
-
-extern int (*dlb_iface_pending_port_unmaps)(struct dlb_hw_dev *handle,
-				struct dlb_pending_port_unmaps_args *args);
-
-extern int (*dlb_iface_get_cq_poll_mode)(struct dlb_hw_dev *handle,
-					 enum dlb_cq_poll_modes *mode);
-
-extern int (*dlb_iface_get_sn_allocation)(struct dlb_hw_dev *handle,
-				  struct dlb_get_sn_allocation_args *args);
-
-extern int (*dlb_iface_set_sn_allocation)(struct dlb_hw_dev *handle,
-				  struct dlb_set_sn_allocation_args *args);
-
-extern int (*dlb_iface_get_sn_occupancy)(struct dlb_hw_dev *handle,
-				  struct dlb_get_sn_occupancy_args *args);
-
-extern int (*dlb_iface_get_ldb_queue_depth)(struct dlb_hw_dev *handle,
-				    struct dlb_get_ldb_queue_depth_args *args);
-
-extern int (*dlb_iface_get_dir_queue_depth)(struct dlb_hw_dev *handle,
-				    struct dlb_get_dir_queue_depth_args *args);
-
-#endif /* _DLB_IFACE_H */
diff --git a/drivers/event/dlb/dlb_inline_fns.h b/drivers/event/dlb/dlb_inline_fns.h
deleted file mode 100644
index aae94dc3c..000000000
--- a/drivers/event/dlb/dlb_inline_fns.h
+++ /dev/null
@@ -1,36 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#ifndef _DLB_INLINE_FNS_H_
-#define _DLB_INLINE_FNS_H_
-
-#include "rte_memcpy.h"
-#include "rte_io.h"
-
-/* Inline functions required in more than one source file. */
-
-static inline struct dlb_eventdev *
-dlb_pmd_priv(const struct rte_eventdev *eventdev)
-{
-	return eventdev->data->dev_private;
-}
-
-static inline void
-dlb_movntdq_single(void *dest, void *src)
-{
-	long long *_src  = (long long *)src;
-	__m128i src_data0 = (__m128i){_src[0], _src[1]};
-
-	_mm_stream_si128(dest, src_data0);
-}
-
-static inline void
-dlb_movdir64b(void *dest, void *src)
-{
-	asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
-		:
-		: "a" (dest), "d" (src));
-}
-
-#endif /* _DLB_INLINE_FNS_H_ */
diff --git a/drivers/event/dlb/dlb_log.h b/drivers/event/dlb/dlb_log.h
deleted file mode 100644
index c69c9e5be..000000000
--- a/drivers/event/dlb/dlb_log.h
+++ /dev/null
@@ -1,25 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#ifndef _DLB_EVDEV_LOG_H_
-#define _DLB_EVDEV_LOG_H_
-
-extern int eventdev_dlb_log_level;
-
-/* Dynamic logging */
-#define DLB_LOG_IMPL(level, fmt, args...) \
-	rte_log(RTE_LOG_ ## level, eventdev_dlb_log_level, "%s" fmt "\n", \
-		__func__, ##args)
-
-#define DLB_LOG_INFO(fmt, args...) \
-	DLB_LOG_IMPL(INFO, fmt, ## args)
-
-#define DLB_LOG_ERR(fmt, args...) \
-	DLB_LOG_IMPL(ERR, fmt, ## args)
-
-/* remove debug logs at compile time unless actually debugging */
-#define DLB_LOG_DBG(fmt, args...) \
-	RTE_LOG_DP(DEBUG, PMD, fmt, ## args)
-
-#endif /* _DLB_EVDEV_LOG_H_ */
diff --git a/drivers/event/dlb/dlb_priv.h b/drivers/event/dlb/dlb_priv.h
deleted file mode 100644
index 272e17482..000000000
--- a/drivers/event/dlb/dlb_priv.h
+++ /dev/null
@@ -1,513 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#ifndef _DLB_PRIV_H_
-#define _DLB_PRIV_H_
-
-#include <emmintrin.h>
-#include <stdbool.h>
-
-#include <rte_bus_pci.h>
-#include <rte_eventdev.h>
-#include <eventdev_pmd.h>
-#include <eventdev_pmd_pci.h>
-#include <rte_pci.h>
-
-#include "dlb_user.h"
-#include "dlb_log.h"
-#include "rte_pmd_dlb.h"
-
-#ifndef RTE_LIBRTE_PMD_DLB_QUELL_STATS
-#define DLB_INC_STAT(_stat, _incr_val) ((_stat) += _incr_val)
-#else
-#define DLB_INC_STAT(_stat, _incr_val)
-#endif
-
-#define EVDEV_DLB_NAME_PMD_STR "dlb_event"
-
-/* command line arg strings */
-#define NUMA_NODE_ARG "numa_node"
-#define DLB_MAX_NUM_EVENTS "max_num_events"
-#define DLB_NUM_DIR_CREDITS "num_dir_credits"
-#define DEV_ID_ARG "dev_id"
-#define DLB_DEFER_SCHED_ARG "defer_sched"
-#define DLB_NUM_ATM_INFLIGHTS_ARG "atm_inflights"
-
-/* Begin HW related defines and structs */
-
-#define DLB_MAX_NUM_DOMAINS 32
-#define DLB_MAX_NUM_VFS 16
-#define DLB_MAX_NUM_LDB_QUEUES 128
-#define DLB_MAX_NUM_LDB_PORTS 64
-#define DLB_MAX_NUM_DIR_PORTS 128
-#define DLB_MAX_NUM_DIR_QUEUES 128
-#define DLB_MAX_NUM_FLOWS (64 * 1024)
-#define DLB_MAX_NUM_LDB_CREDITS 16384
-#define DLB_MAX_NUM_DIR_CREDITS 4096
-#define DLB_MAX_NUM_LDB_CREDIT_POOLS 64
-#define DLB_MAX_NUM_DIR_CREDIT_POOLS 64
-#define DLB_MAX_NUM_HIST_LIST_ENTRIES 5120
-#define DLB_MAX_NUM_ATM_INFLIGHTS 2048
-#define DLB_MAX_NUM_QIDS_PER_LDB_CQ 8
-#define DLB_QID_PRIORITIES 8
-#define DLB_MAX_DEVICE_PATH 32
-#define DLB_MIN_DEQUEUE_TIMEOUT_NS 1
-#define DLB_NUM_SN_GROUPS 4
-#define DLB_MAX_LDB_SN_ALLOC 1024
-/* Note: "- 1" here to support the timeout range check in eventdev_autotest */
-#define DLB_MAX_DEQUEUE_TIMEOUT_NS (UINT32_MAX - 1)
-#define DLB_DEF_UNORDERED_QID_INFLIGHTS 2048
-
-/* 5120 total hist list entries and 64 total ldb ports, which
- * makes for 5120/64 == 80 hist list entries per port. However, CQ
- * depth must be a power of 2 and must also be >= HIST LIST entries.
- * As a result we just limit the maximum dequeue depth to 64.
- */
-#define DLB_MIN_LDB_CQ_DEPTH 1
-#define DLB_MIN_DIR_CQ_DEPTH 8
-#define DLB_MIN_HARDWARE_CQ_DEPTH 8
-#define DLB_MAX_CQ_DEPTH 64
-#define DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT \
-	DLB_MAX_CQ_DEPTH
-
-/* Static per queue/port provisioning values */
-#define DLB_NUM_ATOMIC_INFLIGHTS_PER_QUEUE 16
-
-#define PP_BASE(is_dir) ((is_dir) ? DLB_DIR_PP_BASE : DLB_LDB_PP_BASE)
-
-#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
-
-#define DLB_NUM_QES_PER_CACHE_LINE 4
-
-#define DLB_MAX_ENQUEUE_DEPTH 64
-#define DLB_MIN_ENQUEUE_DEPTH 4
-
-#define DLB_NAME_SIZE 64
-
-/* Use the upper 3 bits of the event priority to select the DLB priority */
-#define EV_TO_DLB_PRIO(x) ((x) >> 5)
-#define DLB_TO_EV_PRIO(x) ((x) << 5)
-
-enum dlb_hw_port_type {
-	DLB_LDB,
-	DLB_DIR,
-
-	/* NUM_DLB_PORT_TYPES must be last */
-	NUM_DLB_PORT_TYPES
-};
-
-#define PORT_TYPE(p) ((p)->is_directed ? DLB_DIR : DLB_LDB)
-
-/* Do not change - must match hardware! */
-enum dlb_hw_sched_type {
-	DLB_SCHED_ATOMIC = 0,
-	DLB_SCHED_UNORDERED,
-	DLB_SCHED_ORDERED,
-	DLB_SCHED_DIRECTED,
-
-	/* DLB_NUM_HW_SCHED_TYPES must be last */
-	DLB_NUM_HW_SCHED_TYPES
-};
-
-struct dlb_devargs {
-	int socket_id;
-	int max_num_events;
-	int num_dir_credits_override;
-	int dev_id;
-	int defer_sched;
-	int num_atm_inflights;
-};
-
-struct dlb_hw_rsrcs {
-	int32_t nb_events_limit;
-	uint32_t num_queues;		/* Total queues (ldb + dir) */
-	uint32_t num_ldb_queues;	/* Number of available ldb queues */
-	uint32_t num_ldb_ports;         /* Number of load balanced ports */
-	uint32_t num_dir_ports;         /* Number of directed ports */
-	uint32_t num_ldb_credits;       /* Number of load balanced credits */
-	uint32_t num_dir_credits;       /* Number of directed credits */
-	uint32_t reorder_window_size;   /* Size of reorder window */
-};
-
-struct dlb_hw_resource_info {
-	/**> Max resources that can be provided */
-	struct dlb_hw_rsrcs hw_rsrc_max;
-	int num_sched_domains;
-	uint32_t socket_id;
-	/**> EAL flags passed to this DLB instance, allowing the application to
-	 * identify the pmd backend indicating hardware or software.
-	 */
-	const char *eal_flags;
-};
-
-/* hw-specific format - do not change */
-
-struct dlb_event_type {
-	uint8_t major:4;
-	uint8_t unused:4;
-	uint8_t sub;
-};
-
-union dlb_opaque_data {
-	uint16_t opaque_data;
-	struct dlb_event_type event_type;
-};
-
-struct dlb_msg_info {
-	uint8_t qid;
-	uint8_t sched_type:2;
-	uint8_t priority:3;
-	uint8_t msg_type:3;
-};
-
-#define DLB_NEW_CMD_BYTE 0x08
-#define DLB_FWD_CMD_BYTE 0x0A
-#define DLB_COMP_CMD_BYTE 0x02
-#define DLB_NOOP_CMD_BYTE 0x00
-#define DLB_POP_CMD_BYTE 0x01
-
-/* hw-specific format - do not change */
-struct dlb_enqueue_qe {
-	uint64_t data;
-	/* Word 3 */
-	union dlb_opaque_data u;
-	uint8_t qid;
-	uint8_t sched_type:2;
-	uint8_t priority:3;
-	uint8_t msg_type:3;
-	/* Word 4 */
-	uint16_t lock_id;
-	uint8_t meas_lat:1;
-	uint8_t rsvd1:2;
-	uint8_t no_dec:1;
-	uint8_t cmp_id:4;
-	union {
-		uint8_t cmd_byte;
-		struct {
-			uint8_t cq_token:1;
-			uint8_t qe_comp:1;
-			uint8_t qe_frag:1;
-			uint8_t qe_valid:1;
-			uint8_t int_arm:1;
-			uint8_t error:1;
-			uint8_t rsvd:2;
-		};
-	};
-};
-
-/* hw-specific format - do not change */
-struct dlb_cq_pop_qe {
-	uint64_t data;
-	union dlb_opaque_data u;
-	uint8_t qid;
-	uint8_t sched_type:2;
-	uint8_t priority:3;
-	uint8_t msg_type:3;
-	uint16_t tokens:10;
-	uint16_t rsvd2:6;
-	uint8_t meas_lat:1;
-	uint8_t rsvd1:2;
-	uint8_t no_dec:1;
-	uint8_t cmp_id:4;
-	union {
-		uint8_t cmd_byte;
-		struct {
-			uint8_t cq_token:1;
-			uint8_t qe_comp:1;
-			uint8_t qe_frag:1;
-			uint8_t qe_valid:1;
-			uint8_t int_arm:1;
-			uint8_t error:1;
-			uint8_t rsvd:2;
-		};
-	};
-};
-
-/* hw-specific format - do not change */
-struct dlb_dequeue_qe {
-	uint64_t data;
-	union dlb_opaque_data u;
-	uint8_t qid;
-	uint8_t sched_type:2;
-	uint8_t priority:3;
-	uint8_t msg_type:3;
-	uint16_t pp_id:10;
-	uint16_t rsvd0:6;
-	uint8_t debug;
-	uint8_t cq_gen:1;
-	uint8_t qid_depth:1;
-	uint8_t rsvd1:3;
-	uint8_t error:1;
-	uint8_t rsvd2:2;
-};
-
-enum dlb_port_state {
-	PORT_CLOSED,
-	PORT_STARTED,
-	PORT_STOPPED
-};
-
-enum dlb_configuration_state {
-	/* The resource has not been configured */
-	DLB_NOT_CONFIGURED,
-	/* The resource was configured, but the device was stopped */
-	DLB_PREV_CONFIGURED,
-	/* The resource is currently configured */
-	DLB_CONFIGURED
-};
-
-struct dlb_port {
-	uint32_t id;
-	bool is_directed;
-	bool gen_bit;
-	uint16_t dir_credits;
-	uint32_t dequeue_depth;
-	enum dlb_token_pop_mode token_pop_mode;
-	int pp_mmio_base;
-	uint16_t cached_ldb_credits;
-	uint16_t ldb_pushcount_at_credit_expiry;
-	uint16_t ldb_credits;
-	uint16_t cached_dir_credits;
-	uint16_t dir_pushcount_at_credit_expiry;
-	bool int_armed;
-	bool use_rsvd_token_scheme;
-	uint8_t cq_rsvd_token_deficit;
-	uint16_t owed_tokens;
-	int16_t issued_releases;
-	int16_t token_pop_thresh;
-	int cq_depth;
-	uint16_t cq_idx;
-	uint16_t cq_idx_unmasked;
-	uint16_t cq_depth_mask;
-	uint16_t gen_bit_shift;
-	enum dlb_port_state state;
-	enum dlb_configuration_state config_state;
-	int num_mapped_qids;
-	uint8_t *qid_mappings;
-	struct dlb_enqueue_qe *qe4; /* Cache line's worth of QEs (4) */
-	struct dlb_cq_pop_qe *consume_qe;
-	struct dlb_eventdev *dlb; /* back ptr */
-	struct dlb_eventdev_port *ev_port; /* back ptr */
-};
-
-/* Per-process per-port mmio and memory pointers */
-struct process_local_port_data {
-	uint64_t *pp_addr;
-	uint16_t *ldb_popcount;
-	uint16_t *dir_popcount;
-	struct dlb_dequeue_qe *cq_base;
-	const struct rte_memzone *mz;
-	bool mmaped;
-};
-
-struct dlb_config {
-	int configured;
-	int reserved;
-	uint32_t ldb_credit_pool_id;
-	uint32_t dir_credit_pool_id;
-	uint32_t num_ldb_credits;
-	uint32_t num_dir_credits;
-	struct dlb_create_sched_domain_args resources;
-};
-
-struct dlb_hw_dev {
-	struct dlb_config cfg;
-	struct dlb_hw_resource_info info;
-	void *pf_dev; /* opaque pointer to PF PMD dev (struct dlb_dev) */
-	int device_id;
-	uint32_t domain_id;
-	int domain_id_valid;
-	rte_spinlock_t resource_lock; /* for MP support */
-} __rte_cache_aligned;
-
-/* End HW related defines and structs */
-
-/* Begin DLB PMD Eventdev related defines and structs */
-
-#define DLB_MAX_NUM_QUEUES \
-	(DLB_MAX_NUM_DIR_QUEUES + DLB_MAX_NUM_LDB_QUEUES)
-
-#define DLB_MAX_NUM_PORTS (DLB_MAX_NUM_DIR_PORTS + DLB_MAX_NUM_LDB_PORTS)
-#define DLB_MAX_INPUT_QUEUE_DEPTH 256
-
-/** Structure to hold the queue to port link establishment attributes */
-
-struct dlb_event_queue_link {
-	uint8_t queue_id;
-	uint8_t priority;
-	bool mapped;
-	bool valid;
-};
-
-struct dlb_traffic_stats {
-	uint64_t rx_ok;
-	uint64_t rx_drop;
-	uint64_t rx_interrupt_wait;
-	uint64_t rx_umonitor_umwait;
-	uint64_t tx_ok;
-	uint64_t total_polls;
-	uint64_t zero_polls;
-	uint64_t tx_nospc_ldb_hw_credits;
-	uint64_t tx_nospc_dir_hw_credits;
-	uint64_t tx_nospc_inflight_max;
-	uint64_t tx_nospc_new_event_limit;
-	uint64_t tx_nospc_inflight_credits;
-};
-
-struct dlb_port_stats {
-	struct dlb_traffic_stats traffic;
-	uint64_t tx_op_cnt[4]; /* indexed by rte_event.op */
-	uint64_t tx_implicit_rel;
-	uint64_t tx_sched_cnt[DLB_NUM_HW_SCHED_TYPES];
-	uint64_t tx_invalid;
-	uint64_t rx_sched_cnt[DLB_NUM_HW_SCHED_TYPES];
-	uint64_t rx_sched_invalid;
-	uint64_t enq_ok[DLB_MAX_NUM_QUEUES]; /* per-queue enq_ok */
-};
-
-struct dlb_eventdev_port {
-	struct dlb_port qm_port; /* hw specific data structure */
-	struct rte_event_port_conf conf; /* user-supplied configuration */
-	uint16_t inflight_credits; /* num credits this port has right now */
-	uint16_t credit_update_quanta;
-	struct dlb_eventdev *dlb; /* backlink optimization */
-	struct dlb_port_stats stats __rte_cache_aligned;
-	struct dlb_event_queue_link link[DLB_MAX_NUM_QIDS_PER_LDB_CQ];
-	int num_links;
-	uint32_t id;
-	/* num releases yet to be completed on this port.
-	 * Only applies to load-balanced ports.
-	 */
-	uint16_t outstanding_releases;
-	uint16_t inflight_max; /* app requested max inflights for this port */
-	/* setup_done is set when the event port is setup */
-	bool setup_done;
-	/* enq_configured is set when the qm port is created */
-	bool enq_configured;
-	uint8_t implicit_release; /* release events before dequeueing */
-} __rte_cache_aligned;
-
-struct dlb_queue {
-	uint32_t num_qid_inflights; /* User config */
-	uint32_t num_atm_inflights; /* User config */
-	enum dlb_configuration_state config_state;
-	int sched_type; /* LB queue only */
-	uint32_t id;
-	bool is_directed;
-};
-
-struct dlb_eventdev_queue {
-	struct dlb_queue qm_queue;
-	struct rte_event_queue_conf conf; /* User config */
-	uint64_t enq_ok;
-	uint32_t id;
-	bool setup_done;
-	uint8_t num_links;
-};
-
-enum dlb_run_state {
-	DLB_RUN_STATE_STOPPED = 0,
-	DLB_RUN_STATE_STOPPING,
-	DLB_RUN_STATE_STARTING,
-	DLB_RUN_STATE_STARTED
-};
-
-struct dlb_eventdev {
-	struct dlb_eventdev_port ev_ports[DLB_MAX_NUM_PORTS];
-	struct dlb_eventdev_queue ev_queues[DLB_MAX_NUM_QUEUES];
-	uint8_t qm_ldb_to_ev_queue_id[DLB_MAX_NUM_QUEUES];
-	uint8_t qm_dir_to_ev_queue_id[DLB_MAX_NUM_QUEUES];
-
-	/* store num stats and offset of the stats for each queue */
-	uint16_t xstats_count_per_qid[DLB_MAX_NUM_QUEUES];
-	uint16_t xstats_offset_for_qid[DLB_MAX_NUM_QUEUES];
-
-	/* store num stats and offset of the stats for each port */
-	uint16_t xstats_count_per_port[DLB_MAX_NUM_PORTS];
-	uint16_t xstats_offset_for_port[DLB_MAX_NUM_PORTS];
-	struct dlb_get_num_resources_args hw_rsrc_query_results;
-	uint32_t xstats_count_mode_queue;
-	struct dlb_hw_dev qm_instance; /* strictly hw related */
-	uint64_t global_dequeue_wait_ticks;
-	struct dlb_xstats_entry *xstats;
-	struct rte_eventdev *event_dev; /* backlink to dev */
-	uint32_t xstats_count_mode_port;
-	uint32_t xstats_count_mode_dev;
-	uint32_t xstats_count;
-	uint32_t inflights; /* use __atomic builtins to access */
-	uint32_t new_event_limit;
-	int max_num_events_override;
-	int num_dir_credits_override;
-	volatile enum dlb_run_state run_state;
-	uint16_t num_dir_queues; /* total num of evdev dir queues requested */
-	uint16_t num_dir_credits;
-	uint16_t num_ldb_credits;
-	uint16_t num_queues; /* total queues */
-	uint16_t num_ldb_queues; /* total num of evdev ldb queues requested */
-	uint16_t num_ports; /* total num of evdev ports requested */
-	uint16_t num_ldb_ports; /* total num of ldb ports requested */
-	uint16_t num_dir_ports; /* total num of dir ports requested */
-	bool is_vdev;
-	bool umwait_allowed;
-	bool global_dequeue_wait; /* Not using per dequeue wait if true */
-	bool defer_sched;
-	unsigned int num_atm_inflights_per_queue;
-	enum dlb_cq_poll_modes poll_mode;
-	uint8_t revision;
-	bool configured;
-};
-
-/* End Eventdev related defines and structs */
-
-/* externs */
-
-extern struct process_local_port_data dlb_port[][NUM_DLB_PORT_TYPES];
-
-/* Forwards for non-inlined functions */
-
-void dlb_eventdev_dump(struct rte_eventdev *dev, FILE *f);
-
-int dlb_xstats_init(struct dlb_eventdev *dlb);
-
-void dlb_xstats_uninit(struct dlb_eventdev *dlb);
-
-int dlb_eventdev_xstats_get(const struct rte_eventdev *dev,
-			    enum rte_event_dev_xstats_mode mode,
-			    uint8_t queue_port_id, const unsigned int ids[],
-			    uint64_t values[], unsigned int n);
-
-int dlb_eventdev_xstats_get_names(const struct rte_eventdev *dev,
-				  enum rte_event_dev_xstats_mode mode,
-				  uint8_t queue_port_id,
-				  struct rte_event_dev_xstats_name *xstat_names,
-				  unsigned int *ids, unsigned int size);
-
-uint64_t dlb_eventdev_xstats_get_by_name(const struct rte_eventdev *dev,
-					 const char *name, unsigned int *id);
-
-int dlb_eventdev_xstats_reset(struct rte_eventdev *dev,
-			      enum rte_event_dev_xstats_mode mode,
-			      int16_t queue_port_id,
-			      const uint32_t ids[],
-			      uint32_t nb_ids);
-
-int test_dlb_eventdev(void);
-
-int dlb_primary_eventdev_probe(struct rte_eventdev *dev,
-			       const char *name,
-			       struct dlb_devargs *dlb_args);
-
-int dlb_secondary_eventdev_probe(struct rte_eventdev *dev,
-				 const char *name);
-
-uint32_t dlb_get_queue_depth(struct dlb_eventdev *dlb,
-			     struct dlb_eventdev_queue *queue);
-
-int dlb_parse_params(const char *params,
-		     const char *name,
-		     struct dlb_devargs *dlb_args);
-
-void dlb_entry_points_init(struct rte_eventdev *dev);
-
-#endif	/* _DLB_PRIV_H_ */
diff --git a/drivers/event/dlb/dlb_selftest.c b/drivers/event/dlb/dlb_selftest.c
deleted file mode 100644
index 8ab00ba87..000000000
--- a/drivers/event/dlb/dlb_selftest.c
+++ /dev/null
@@ -1,1544 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <stdint.h>
-#include <errno.h>
-#include <unistd.h>
-#include <sys/queue.h>
-
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_launch.h>
-#include <rte_eal.h>
-#include <rte_lcore.h>
-#include <rte_debug.h>
-#include <rte_cycles.h>
-#include <rte_eventdev.h>
-#include <rte_mempool.h>
-#include <rte_mbuf.h>
-
-#include "dlb_priv.h"
-#include "rte_pmd_dlb.h"
-
-#define MAX_PORTS 32
-#define MAX_QIDS 32
-#define DEFAULT_NUM_SEQ_NUMS 32
-
-static struct rte_mempool *eventdev_func_mempool;
-static int evdev;
-
-struct test {
-	struct rte_mempool *mbuf_pool;
-	int nb_qids;
-};
-
-/* initialization and config */
-static inline int
-init(struct test *t, int nb_queues, int nb_ports)
-{
-	struct rte_event_dev_config config = {0};
-	struct rte_event_dev_info info;
-	int ret;
-
-	memset(t, 0, sizeof(*t));
-
-	t->mbuf_pool = eventdev_func_mempool;
-
-	if (rte_event_dev_info_get(evdev, &info)) {
-		printf("%d: Error querying device info\n", __LINE__);
-		return -1;
-	}
-
-	config.nb_event_queues = nb_queues;
-	config.nb_event_ports = nb_ports;
-	config.nb_event_queue_flows = info.max_event_queue_flows;
-	config.nb_events_limit = info.max_num_events;
-	config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
-	config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
-	config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
-	config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
-
-	ret = rte_event_dev_configure(evdev, &config);
-	if (ret < 0)
-		printf("%d: Error configuring device\n", __LINE__);
-
-	return ret;
-}
-
-static inline int
-create_ports(int num_ports)
-{
-	int i;
-
-	if (num_ports > MAX_PORTS)
-		return -1;
-
-	for (i = 0; i < num_ports; i++) {
-		struct rte_event_port_conf conf;
-
-		if (rte_event_port_default_conf_get(evdev, i, &conf)) {
-			printf("%d: Error querying default port conf\n",
-			       __LINE__);
-			return -1;
-		}
-
-		if (rte_event_port_setup(evdev, i, &conf) < 0) {
-			printf("%d: Error setting up port %d\n", __LINE__, i);
-			return -1;
-		}
-	}
-
-	return 0;
-}
-
-static inline int
-create_lb_qids(struct test *t, int num_qids, uint32_t flags)
-{
-	int i;
-
-	for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
-		struct rte_event_queue_conf conf;
-
-		if (rte_event_queue_default_conf_get(evdev, i, &conf)) {
-			printf("%d: Error querying default queue conf\n",
-			       __LINE__);
-			return -1;
-		}
-
-		conf.schedule_type = flags;
-
-		if (conf.schedule_type == RTE_SCHED_TYPE_PARALLEL)
-			conf.nb_atomic_order_sequences = 0;
-		else
-			conf.nb_atomic_order_sequences = DEFAULT_NUM_SEQ_NUMS;
-
-		if (rte_event_queue_setup(evdev, i, &conf) < 0) {
-			printf("%d: error creating qid %d\n", __LINE__, i);
-			return -1;
-		}
-	}
-
-	t->nb_qids += num_qids;
-	if (t->nb_qids > MAX_QIDS)
-		return -1;
-
-	return 0;
-}
-
-static inline int
-create_atomic_qids(struct test *t, int num_qids)
-{
-	return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
-}
-
-/* destruction */
-static inline int
-cleanup(void)
-{
-	rte_event_dev_stop(evdev);
-	return rte_event_dev_close(evdev);
-};
-
-static inline int
-enqueue_timeout(uint8_t port_id, struct rte_event *ev, uint64_t tmo_us)
-{
-	const uint64_t start = rte_get_timer_cycles();
-	const uint64_t ticks = (tmo_us * rte_get_timer_hz()) / 1E6;
-
-	while ((rte_get_timer_cycles() - start) < ticks) {
-		if (rte_event_enqueue_burst(evdev, port_id, ev, 1) == 1)
-			return 0;
-
-		if (rte_errno != -ENOSPC)
-			return -1;
-	}
-
-	return -1;
-}
-
-static void
-flush(uint8_t id __rte_unused, struct rte_event event, void *arg __rte_unused)
-{
-	rte_pktmbuf_free(event.mbuf);
-}
-
-static int
-test_stop_flush(struct test *t) /* test to check we can properly flush events */
-{
-	struct rte_event ev;
-	uint32_t dequeue_depth;
-	unsigned int i, count;
-	uint8_t queue_id;
-
-	ev.op = RTE_EVENT_OP_NEW;
-
-	if (init(t, 2, 1) < 0 ||
-	    create_ports(1) < 0 ||
-	    create_atomic_qids(t, 2) < 0) {
-		printf("%d: Error initializing device\n", __LINE__);
-		return -1;
-	}
-
-	if (rte_event_port_link(evdev, 0, NULL, NULL, 0) != 2) {
-		printf("%d: Error linking queues to the port\n", __LINE__);
-		goto err;
-	}
-
-	if (rte_event_dev_start(evdev) < 0) {
-		printf("%d: Error with start call\n", __LINE__);
-		goto err;
-	}
-
-	/* Unlink queue 1 so the PMD's stop callback has to cleanup an unlinked
-	 * queue.
-	 */
-	queue_id = 1;
-
-	if (rte_event_port_unlink(evdev, 0, &queue_id, 1) != 1) {
-		printf("%d: Error unlinking queue 1 from port\n", __LINE__);
-		goto err;
-	}
-
-	if (t->mbuf_pool)
-		count = rte_mempool_avail_count(t->mbuf_pool);
-	else {
-		printf("%d: mbuf_pool is NULL\n", __LINE__);
-		goto err;
-	}
-
-	if (rte_event_port_attr_get(evdev,
-				    0,
-				    RTE_EVENT_PORT_ATTR_DEQ_DEPTH,
-				    &dequeue_depth)) {
-		printf("%d: Error retrieveing dequeue depth\n", __LINE__);
-		goto err;
-	}
-
-	/* Send QEs to queue 0 */
-	for (i = 0; i < dequeue_depth + 1; i++) {
-		ev.mbuf = rte_pktmbuf_alloc(t->mbuf_pool);
-		ev.queue_id = 0;
-		ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
-
-		if (enqueue_timeout(0, &ev, 1000)) {
-			printf("%d: Error enqueuing events\n", __LINE__);
-			goto err;
-		}
-	}
-
-	/* Send QEs to queue 1 */
-	for (i = 0; i < dequeue_depth + 1; i++) {
-		ev.mbuf = rte_pktmbuf_alloc(t->mbuf_pool);
-		ev.queue_id = 1;
-		ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
-
-		if (enqueue_timeout(0, &ev, 1000)) {
-			printf("%d: Error enqueuing events\n", __LINE__);
-			goto err;
-		}
-	}
-
-	/* Now the DLB is scheduling events from the port to the IQ, and at
-	 * least one event should be remaining in each queue.
-	 */
-
-	if (rte_event_dev_stop_flush_callback_register(evdev, flush, NULL)) {
-		printf("%d: Error installing the flush callback\n", __LINE__);
-		goto err;
-	}
-
-	cleanup();
-
-	if (count != rte_mempool_avail_count(t->mbuf_pool)) {
-		printf("%d: Error executing the flush callback\n", __LINE__);
-		goto err;
-	}
-
-	if (rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL)) {
-		printf("%d: Error uninstalling the flush callback\n", __LINE__);
-		goto err;
-	}
-
-	return 0;
-err:
-	cleanup();
-	return -1;
-}
-
-static int
-test_single_link(void)
-{
-	struct rte_event_dev_config config = {0};
-	struct rte_event_queue_conf queue_conf;
-	struct rte_event_port_conf port_conf;
-	struct rte_event_dev_info info;
-	uint8_t queue_id;
-	int ret;
-
-	if (rte_event_dev_info_get(evdev, &info)) {
-		printf("%d: Error querying device info\n", __LINE__);
-		return -1;
-	}
-
-	config.nb_event_queues = 2;
-	config.nb_event_ports = 2;
-	config.nb_single_link_event_port_queues = 1;
-	config.nb_event_queue_flows = info.max_event_queue_flows;
-	config.nb_events_limit = info.max_num_events;
-	config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
-	config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
-	config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
-	config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
-
-	ret = rte_event_dev_configure(evdev, &config);
-	if (ret < 0) {
-		printf("%d: Error configuring device\n", __LINE__);
-		return -1;
-	}
-
-	/* Create a directed port */
-	if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
-		printf("%d: Error querying default port conf\n", __LINE__);
-		goto err;
-	}
-
-	port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_SINGLE_LINK;
-
-	if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
-		printf("%d: port 0 setup expected to succeed\n", __LINE__);
-		goto err;
-	}
-
-	/* Attempt to create another directed port */
-	if (rte_event_port_setup(evdev, 1, &port_conf) == 0) {
-		printf("%d: port 1 setup expected to fail\n", __LINE__);
-		goto err;
-	}
-
-	port_conf.event_port_cfg = 0;
-
-	/* Create a load-balanced port */
-	if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
-		printf("%d: port 1 setup expected to succeed\n", __LINE__);
-		goto err;
-	}
-
-	/* Create a directed queue */
-	if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
-		printf("%d: Error querying default queue conf\n", __LINE__);
-		goto err;
-	}
-
-	queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
-
-	if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
-		printf("%d: queue 0 setup expected to succeed\n", __LINE__);
-		goto err;
-	}
-
-	/* Attempt to create another directed queue */
-	if (rte_event_queue_setup(evdev, 1, &queue_conf) == 0) {
-		printf("%d: queue 1 setup expected to fail\n", __LINE__);
-		goto err;
-	}
-
-	/* Create a load-balanced queue */
-	queue_conf.event_queue_cfg = 0;
-
-	if (rte_event_queue_setup(evdev, 1, &queue_conf) < 0) {
-		printf("%d: queue 1 setup expected to succeed\n", __LINE__);
-		goto err;
-	}
-
-	/* Attempt to link directed and load-balanced resources */
-	queue_id = 1;
-	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) == 1) {
-		printf("%d: port 0 link expected to fail\n", __LINE__);
-		goto err;
-	}
-
-	queue_id = 0;
-	if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) == 1) {
-		printf("%d: port 1 link expected to fail\n", __LINE__);
-		goto err;
-	}
-
-	/* Link ports to queues */
-	queue_id = 0;
-	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
-		printf("%d: port 0 link expected to succeed\n", __LINE__);
-		goto err;
-	}
-
-	queue_id = 1;
-	if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
-		printf("%d: port 1 link expected to succeed\n", __LINE__);
-		goto err;
-	}
-
-	return rte_event_dev_close(evdev);
-
-err:
-	rte_event_dev_close(evdev);
-	return -1;
-}
-
-#define NUM_LDB_PORTS 64
-#define NUM_LDB_QUEUES 128
-
-static int
-test_info_get(void)
-{
-	struct rte_event_dev_config config = {0};
-	struct rte_event_dev_info info;
-	int ret;
-
-	if (rte_event_dev_info_get(evdev, &info)) {
-		printf("%d: Error querying device info\n", __LINE__);
-		return -1;
-	}
-
-	if (info.max_event_ports != NUM_LDB_PORTS) {
-		printf("%d: Got %u ports, expected %u\n",
-		       __LINE__, info.max_event_ports, NUM_LDB_PORTS);
-		goto err;
-	}
-
-	if (info.max_event_queues != NUM_LDB_QUEUES) {
-		printf("%d: Got %u queues, expected %u\n",
-		       __LINE__, info.max_event_queues, NUM_LDB_QUEUES);
-		goto err;
-	}
-
-	config.nb_event_ports = info.max_event_ports;
-	config.nb_event_queues = NUM_LDB_QUEUES + info.max_event_ports / 2;
-	config.nb_single_link_event_port_queues = info.max_event_ports / 2;
-	config.nb_event_queue_flows = info.max_event_queue_flows;
-	config.nb_events_limit = info.max_num_events;
-	config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
-	config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
-	config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
-	config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
-
-	ret = rte_event_dev_configure(evdev, &config);
-	if (ret < 0) {
-		printf("%d: Error configuring device\n", __LINE__);
-		return -1;
-	}
-
-	if (rte_event_dev_info_get(evdev, &info)) {
-		printf("%d: Error querying device info\n", __LINE__);
-		goto err;
-	}
-
-	/* The DLB PMD only reports load-balanced ports and queues in its
-	 * info_get function. Confirm that these values don't include the
-	 * directed port or queue counts.
-	 */
-
-	if (info.max_event_ports != NUM_LDB_PORTS) {
-		printf("%d: Got %u ports, expected %u\n",
-		       __LINE__, info.max_event_ports, NUM_LDB_PORTS);
-		goto err;
-	}
-
-	if (info.max_event_queues != NUM_LDB_QUEUES) {
-		printf("%d: Got %u queues, expected %u\n",
-		       __LINE__, info.max_event_queues, NUM_LDB_QUEUES);
-		goto err;
-	}
-
-	ret = rte_event_dev_close(evdev);
-	if (ret) {
-		printf("rte_event_dev_close err %d\n", ret);
-		goto err;
-	}
-
-	return 0;
-
-err:
-	rte_event_dev_close(evdev);
-	return -1;
-}
-
-static int
-test_reconfiguration_link(void)
-{
-	struct rte_event_dev_config config = {0};
-	struct rte_event_queue_conf queue_conf;
-	struct rte_event_port_conf port_conf;
-	struct rte_event_dev_info info;
-	uint8_t queue_id;
-	int ret, i;
-
-	if (rte_event_dev_info_get(evdev, &info)) {
-		printf("%d: Error querying device info\n", __LINE__);
-		return -1;
-	}
-
-	config.nb_event_queues = 2;
-	config.nb_event_ports = 2;
-	config.nb_single_link_event_port_queues = 0;
-	config.nb_event_queue_flows = info.max_event_queue_flows;
-	config.nb_events_limit = info.max_num_events;
-	config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
-	config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
-	config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
-	config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
-
-	/* Configure the device with 2 LDB ports and 2 LDB queues */
-	ret = rte_event_dev_configure(evdev, &config);
-	if (ret < 0) {
-		printf("%d: Error configuring device\n", __LINE__);
-		return -1;
-	}
-
-	/* Configure the ports and queues */
-	if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
-		printf("%d: Error querying default port conf\n", __LINE__);
-		goto err;
-	}
-
-	for (i = 0; i < 2; i++) {
-		if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
-			printf("%d: port %d setup expected to succeed\n",
-			       __LINE__, i);
-			goto err;
-		}
-	}
-
-	if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
-		printf("%d: Error querying default queue conf\n", __LINE__);
-		goto err;
-	}
-
-	for (i = 0; i < 2; i++) {
-		if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
-			printf("%d: queue %d setup expected to succeed\n",
-			       __LINE__, i);
-			goto err;
-		}
-	}
-
-	/* Link P0->Q0 and P1->Q1 */
-	for (i = 0; i < 2; i++) {
-		queue_id = i;
-
-		if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
-			printf("%d: port %d link expected to succeed\n",
-			       __LINE__, i);
-			goto err;
-		}
-	}
-
-	/* Start the device */
-	if (rte_event_dev_start(evdev) < 0) {
-		printf("%d: device start failed\n", __LINE__);
-		goto err;
-	}
-
-	/* Stop the device */
-	rte_event_dev_stop(evdev);
-
-	/* Reconfigure device */
-	ret = rte_event_dev_configure(evdev, &config);
-	if (ret < 0) {
-		printf("%d: Error re-configuring device\n", __LINE__);
-		return -1;
-	}
-
-	/* Configure P1 and Q1, leave P0 and Q0 to be configured by the PMD. */
-	if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
-		printf("%d: port 1 setup expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	if (rte_event_queue_setup(evdev, 1, &queue_conf) < 0) {
-		printf("%d: queue 1 setup expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	/* Link P0->Q0 and Q1 */
-	for (i = 0; i < 2; i++) {
-		queue_id = i;
-
-		if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
-			printf("%d: P0->Q%d link expected to succeed\n",
-			       __LINE__, i);
-			goto err;
-		}
-	}
-
-	/* Link P1->Q0 and Q1 */
-	for (i = 0; i < 2; i++) {
-		queue_id = i;
-
-		if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
-			printf("%d: P1->Q%d link expected to succeed\n",
-			       __LINE__, i);
-			goto err;
-		}
-	}
-
-	/* Start the device */
-	if (rte_event_dev_start(evdev) < 0) {
-		printf("%d: device start failed\n", __LINE__);
-		goto err;
-	}
-
-	/* Stop the device */
-	rte_event_dev_stop(evdev);
-
-	/* Configure device with 2 DIR ports and 2 DIR queues */
-	config.nb_single_link_event_port_queues = 2;
-
-	ret = rte_event_dev_configure(evdev, &config);
-	if (ret < 0) {
-		printf("%d: Error configuring device\n", __LINE__);
-		return -1;
-	}
-
-	/* Configure the ports and queues */
-	port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_SINGLE_LINK;
-
-	for (i = 0; i < 2; i++) {
-		if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
-			printf("%d: port %d setup expected to succeed\n",
-			       __LINE__, i);
-			goto err;
-		}
-	}
-
-	queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
-
-	for (i = 0; i < 2; i++) {
-		if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
-			printf("%d: queue %d setup expected to succeed\n",
-			       __LINE__, i);
-			goto err;
-		}
-	}
-
-	/* Link P0->Q0 and P1->Q1 */
-	for (i = 0; i < 2; i++) {
-		queue_id = i;
-
-		if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
-			printf("%d: port %d link expected to succeed\n",
-			       __LINE__, i);
-			goto err;
-		}
-	}
-
-	/* Start the device */
-	if (rte_event_dev_start(evdev) < 0) {
-		printf("%d: device start failed\n", __LINE__);
-		goto err;
-	}
-
-	/* Stop the device */
-	rte_event_dev_stop(evdev);
-
-	/* Reconfigure device */
-	ret = rte_event_dev_configure(evdev, &config);
-	if (ret < 0) {
-		printf("%d: Error re-configuring device\n", __LINE__);
-		return -1;
-	}
-
-	/* Configure P1 and Q0, leave P0 and Q1 to be configured by the PMD. */
-	if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
-		printf("%d: port 1 setup expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
-		printf("%d: queue 1 setup expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	/* Link P0->Q1 */
-	queue_id = 1;
-
-	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
-		printf("%d: P0->Q%d link expected to succeed\n",
-		       __LINE__, i);
-		goto err;
-	}
-
-	/* Link P1->Q0 */
-	queue_id = 0;
-
-	if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
-		printf("%d: P1->Q%d link expected to succeed\n",
-		       __LINE__, i);
-		goto err;
-	}
-
-	/* Start the device */
-	if (rte_event_dev_start(evdev) < 0) {
-		printf("%d: device start failed\n", __LINE__);
-		goto err;
-	}
-
-	rte_event_dev_stop(evdev);
-
-	config.nb_event_queues = 5;
-	config.nb_event_ports = 5;
-	config.nb_single_link_event_port_queues = 1;
-
-	ret = rte_event_dev_configure(evdev, &config);
-	if (ret < 0) {
-		printf("%d: Error re-configuring device\n", __LINE__);
-		return -1;
-	}
-
-	for (i = 0; i < config.nb_event_queues - 1; i++) {
-		port_conf.event_port_cfg = 0;
-		queue_conf.event_queue_cfg = 0;
-
-		if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
-			printf("%d: port %d setup expected to succeed\n",
-			       __LINE__, i);
-			goto err;
-		}
-
-		if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
-			printf("%d: queue %d setup expected to succeed\n",
-			       __LINE__, i);
-			goto err;
-		}
-
-		queue_id = i;
-
-		if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
-			printf("%d: P%d->Q%d link expected to succeed\n",
-			       __LINE__, i, i);
-			goto err;
-		}
-	}
-
-	port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_SINGLE_LINK;
-	queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
-
-	if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
-		printf("%d: port %d setup expected to succeed\n",
-		       __LINE__, i);
-		goto err;
-	}
-
-	if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
-		printf("%d: queue %d setup expected to succeed\n",
-		       __LINE__, i);
-		goto err;
-	}
-
-	queue_id = i;
-
-	if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
-		printf("%d: P%d->Q%d link expected to succeed\n",
-		       __LINE__, i, i);
-		goto err;
-	}
-
-	/* Start the device */
-	if (rte_event_dev_start(evdev) < 0) {
-		printf("%d: device start failed\n", __LINE__);
-		goto err;
-	}
-
-	/* Stop the device */
-	rte_event_dev_stop(evdev);
-
-	config.nb_event_ports += 1;
-
-	/* Reconfigure device with 1 more load-balanced port */
-	ret = rte_event_dev_configure(evdev, &config);
-	if (ret < 0) {
-		printf("%d: Error re-configuring device\n", __LINE__);
-		return -1;
-	}
-
-	port_conf.event_port_cfg = 0;
-
-	/* Configure the new port */
-	if (rte_event_port_setup(evdev, config.nb_event_ports - 1,
-				 &port_conf) < 0) {
-		printf("%d: port 1 setup expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	/* Start the device */
-	if (rte_event_dev_start(evdev) < 0) {
-		printf("%d: device start failed\n", __LINE__);
-		goto err;
-	}
-
-	cleanup();
-	return 0;
-
-err:
-	cleanup();
-	return -1;
-}
-
-static int
-test_load_balanced_traffic(void)
-{
-	uint64_t timeout;
-	struct rte_event_dev_config config = {0};
-	struct rte_event_queue_conf queue_conf;
-	struct rte_event_port_conf port_conf;
-	struct rte_event_dev_info info;
-	struct rte_event ev;
-	uint8_t queue_id;
-	int ret;
-
-	if (rte_event_dev_info_get(evdev, &info)) {
-		printf("%d: Error querying device info\n", __LINE__);
-		return -1;
-	}
-
-	config.nb_event_queues = 1;
-	config.nb_event_ports = 1;
-	config.nb_single_link_event_port_queues = 0;
-	config.nb_event_queue_flows = info.max_event_queue_flows;
-	config.nb_events_limit = info.max_num_events;
-	config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
-	config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
-	config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
-	config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
-
-	/* Configure the device with 1 LDB port and queue */
-	ret = rte_event_dev_configure(evdev, &config);
-	if (ret < 0) {
-		printf("%d: Error configuring device\n", __LINE__);
-		return -1;
-	}
-
-	/* Configure the ports and queues */
-	if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
-		printf("%d: Error querying default port conf\n", __LINE__);
-		goto err;
-	}
-
-	if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
-		printf("%d: port 0 setup expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
-		printf("%d: Error querying default queue conf\n", __LINE__);
-		goto err;
-	}
-
-	if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
-		printf("%d: queue 0 setup expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	/* Link P0->Q0 */
-	queue_id = 0;
-
-	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
-		printf("%d: port 0 link expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	/* Start the device */
-	if (rte_event_dev_start(evdev) < 0) {
-		printf("%d: device start failed\n", __LINE__);
-		goto err;
-	}
-
-	/* Enqueue 1 NEW event */
-	ev.op = RTE_EVENT_OP_NEW;
-	ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
-	ev.queue_id = 0;
-	ev.priority = 0;
-	ev.u64 = 0;
-
-	if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
-		printf("%d: NEW enqueue expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	/* Dequeue and enqueue 1 FORWARD event */
-	timeout = 0xFFFFFFFFF;
-	if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
-		printf("%d: event dequeue expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	ev.op = RTE_EVENT_OP_FORWARD;
-
-	if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
-		printf("%d: NEW enqueue expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	/* Dequeue and enqueue 1 RELEASE operation */
-	if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
-		printf("%d: event dequeue expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	ev.op = RTE_EVENT_OP_RELEASE;
-
-	if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
-		printf("%d: NEW enqueue expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	cleanup();
-	return 0;
-
-err:
-	cleanup();
-	return -1;
-}
-
-static int
-test_directed_traffic(void)
-{
-	uint64_t timeout;
-	struct rte_event_dev_config config = {0};
-	struct rte_event_queue_conf queue_conf;
-	struct rte_event_port_conf port_conf;
-	struct rte_event_dev_info info;
-	struct rte_event ev;
-	uint8_t queue_id;
-	int ret;
-
-	if (rte_event_dev_info_get(evdev, &info)) {
-		printf("%d: Error querying device info\n", __LINE__);
-		return -1;
-	}
-
-	config.nb_event_queues = 1;
-	config.nb_event_ports = 1;
-	config.nb_single_link_event_port_queues = 1;
-	config.nb_event_queue_flows = info.max_event_queue_flows;
-	config.nb_events_limit = info.max_num_events;
-	config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
-	config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
-	config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
-	config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
-
-	/* Configure the device with 1 DIR port and queue */
-	ret = rte_event_dev_configure(evdev, &config);
-	if (ret < 0) {
-		printf("%d: Error configuring device\n", __LINE__);
-		return -1;
-	}
-
-	/* Configure the ports and queues */
-	if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
-		printf("%d: Error querying default port conf\n", __LINE__);
-		goto err;
-	}
-
-	port_conf.event_port_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
-
-	if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
-		printf("%d: port 0 setup expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
-		printf("%d: Error querying default queue conf\n", __LINE__);
-		goto err;
-	}
-
-	queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
-
-	if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
-		printf("%d: queue 0 setup expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	/* Link P0->Q0 */
-	queue_id = 0;
-
-	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
-		printf("%d: port 0 link expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	/* Start the device */
-	if (rte_event_dev_start(evdev) < 0) {
-		printf("%d: device start failed\n", __LINE__);
-		goto err;
-	}
-
-	/* Enqueue 1 NEW event */
-	ev.op = RTE_EVENT_OP_NEW;
-	ev.queue_id = 0;
-	ev.priority = 0;
-	ev.u64 = 0;
-
-	if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
-		printf("%d: NEW enqueue expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	/* Dequeue and enqueue 1 FORWARD event */
-	timeout = 0xFFFFFFFFF;
-	if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
-		printf("%d: event dequeue expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	if (ev.queue_id != 0) {
-		printf("%d: invalid dequeued event queue ID (%d)\n",
-		       __LINE__, ev.queue_id);
-		goto err;
-	}
-
-	ev.op = RTE_EVENT_OP_FORWARD;
-
-	if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
-		printf("%d: NEW enqueue expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	/* Dequeue and enqueue 1 RELEASE operation */
-	if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
-		printf("%d: event dequeue expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	ev.op = RTE_EVENT_OP_RELEASE;
-
-	if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
-		printf("%d: NEW enqueue expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	cleanup();
-	return 0;
-
-err:
-	cleanup();
-	return -1;
-}
-
-static int
-test_deferred_sched(void)
-{
-	uint64_t timeout;
-	struct rte_event_dev_config config = {0};
-	struct rte_event_queue_conf queue_conf;
-	struct rte_event_port_conf port_conf;
-	struct rte_event_dev_info info;
-	const int num_events = 128;
-	struct rte_event ev;
-	uint8_t queue_id;
-	int ret, i;
-
-	if (rte_event_dev_info_get(evdev, &info)) {
-		printf("%d: Error querying device info\n", __LINE__);
-		return -1;
-	}
-
-	config.nb_event_queues = 1;
-	config.nb_event_ports = 2;
-	config.nb_single_link_event_port_queues = 0;
-	config.nb_event_queue_flows = info.max_event_queue_flows;
-	config.nb_events_limit = info.max_num_events;
-	config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
-	config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
-	config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
-	config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
-
-	/* Configure the device with 2 LDB ports and 1 queue */
-	ret = rte_event_dev_configure(evdev, &config);
-	if (ret < 0) {
-		printf("%d: Error configuring device\n", __LINE__);
-		return -1;
-	}
-
-	ret = rte_pmd_dlb_set_token_pop_mode(evdev, 0, DEFERRED_POP);
-	if (ret < 0) {
-		printf("%d: Error setting deferred scheduling\n", __LINE__);
-		goto err;
-	}
-
-	ret = rte_pmd_dlb_set_token_pop_mode(evdev, 1, DEFERRED_POP);
-	if (ret < 0) {
-		printf("%d: Error setting deferred scheduling\n", __LINE__);
-		goto err;
-	}
-
-	/* Configure the ports and queues */
-	if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
-		printf("%d: Error querying default port conf\n", __LINE__);
-		goto err;
-	}
-
-	port_conf.dequeue_depth = 1;
-
-	if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
-		printf("%d: port 0 setup expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
-		printf("%d: port 1 setup expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
-		printf("%d: Error querying default queue conf\n", __LINE__);
-		goto err;
-	}
-
-	queue_conf.schedule_type = RTE_SCHED_TYPE_PARALLEL;
-	queue_conf.nb_atomic_order_sequences = 0;
-
-	if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
-		printf("%d: queue 0 setup expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	/* Link P0->Q0 and P1->Q0 */
-	queue_id = 0;
-
-	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
-		printf("%d: port 0 link expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
-		printf("%d: port 1 link expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	/* Start the device */
-	if (rte_event_dev_start(evdev) < 0) {
-		printf("%d: device start failed\n", __LINE__);
-		goto err;
-	}
-
-	/* Enqueue 128 NEW events */
-	ev.op = RTE_EVENT_OP_NEW;
-	ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
-	ev.queue_id = 0;
-	ev.priority = 0;
-	ev.u64 = 0;
-
-	for (i = 0; i < num_events; i++) {
-		if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
-			printf("%d: NEW enqueue expected to succeed\n",
-			       __LINE__);
-			goto err;
-		}
-	}
-
-	/* Dequeue two events from port 0 (dequeue_depth * 2 due to the
-	 * reserved token scheme)
-	 */
-	timeout = 0xFFFFFFFFF;
-	if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
-		printf("%d: event dequeue expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
-		printf("%d: event dequeue expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	/* Dequeue (and release) all other events from port 1. Deferred
-	 * scheduling ensures no other events are scheduled to port 0 without a
-	 * subsequent rte_event_dequeue_burst() call.
-	 */
-	for (i = 0; i < num_events - 2; i++) {
-		if (rte_event_dequeue_burst(evdev, 1, &ev, 1, timeout) != 1) {
-			printf("%d: event dequeue expected to succeed\n",
-			       __LINE__);
-			goto err;
-		}
-
-		ev.op = RTE_EVENT_OP_RELEASE;
-
-		if (rte_event_enqueue_burst(evdev, 1, &ev, 1) != 1) {
-			printf("%d: RELEASE enqueue expected to succeed\n",
-			       __LINE__);
-			goto err;
-		}
-	}
-
-	cleanup();
-	return 0;
-
-err:
-	cleanup();
-	return -1;
-}
-
-static int
-test_delayed_pop(void)
-{
-	uint64_t timeout;
-	struct rte_event_dev_config config = {0};
-	struct rte_event_queue_conf queue_conf;
-	struct rte_event_port_conf port_conf;
-	struct rte_event_dev_info info;
-	int ret, i, num_events;
-	struct rte_event ev;
-	uint8_t queue_id;
-
-	if (rte_event_dev_info_get(evdev, &info)) {
-		printf("%d: Error querying device info\n", __LINE__);
-		return -1;
-	}
-
-	config.nb_event_queues = 1;
-	config.nb_event_ports = 1;
-	config.nb_single_link_event_port_queues = 0;
-	config.nb_event_queue_flows = info.max_event_queue_flows;
-	config.nb_events_limit = info.max_num_events;
-	config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
-	config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
-	config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
-	config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
-
-	/* Configure the device with 1 LDB port and queue */
-	ret = rte_event_dev_configure(evdev, &config);
-	if (ret < 0) {
-		printf("%d: Error configuring device\n", __LINE__);
-		return -1;
-	}
-
-	ret = rte_pmd_dlb_set_token_pop_mode(evdev, 0, DELAYED_POP);
-	if (ret < 0) {
-		printf("%d: Error setting deferred scheduling\n", __LINE__);
-		goto err;
-	}
-
-	/* Configure the ports and queues */
-	if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
-		printf("%d: Error querying default port conf\n", __LINE__);
-		goto err;
-	}
-
-	port_conf.dequeue_depth = 16;
-	port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
-
-	if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
-		printf("%d: port 0 setup expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
-		printf("%d: Error querying default queue conf\n", __LINE__);
-		goto err;
-	}
-
-	if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
-		printf("%d: queue 0 setup expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	/* Link P0->Q0 */
-	queue_id = 0;
-
-	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
-		printf("%d: port 0 link expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	/* Start the device */
-	if (rte_event_dev_start(evdev) < 0) {
-		printf("%d: device start failed\n", __LINE__);
-		goto err;
-	}
-
-	num_events = 2 * port_conf.dequeue_depth;
-
-	/* Enqueue 2 * dequeue_depth NEW events. Due to the PMD's reserved
-	 * token scheme, the port will initially behave as though its
-	 * dequeue_depth is twice the requested size.
-	 */
-	ev.op = RTE_EVENT_OP_NEW;
-	ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
-	ev.queue_id = 0;
-	ev.priority = 0;
-	ev.u64 = 0;
-
-	for (i = 0; i < num_events; i++) {
-		if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
-			printf("%d: NEW enqueue expected to succeed\n",
-			       __LINE__);
-			goto err;
-		}
-	}
-
-	/* Flush these events out of the CQ */
-	timeout = 0xFFFFFFFFF;
-
-	for (i = 0; i < num_events; i++) {
-		if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
-			printf("%d: event dequeue expected to succeed\n",
-			       __LINE__);
-			goto err;
-		}
-	}
-
-	ev.op = RTE_EVENT_OP_RELEASE;
-
-	for (i = 0; i < num_events; i++) {
-		if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
-			printf("%d: RELEASE enqueue expected to succeed\n",
-			       __LINE__);
-			goto err;
-		}
-	}
-
-	/* Enqueue 2 * dequeue_depth NEW events again */
-	ev.op = RTE_EVENT_OP_NEW;
-	ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
-	ev.queue_id = 0;
-	ev.priority = 0;
-	ev.u64 = 0;
-
-	for (i = 0; i < num_events; i++) {
-		if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
-			printf("%d: NEW enqueue expected to succeed\n",
-			       __LINE__);
-			goto err;
-		}
-	}
-
-	/* Dequeue dequeue_depth events but only release dequeue_depth - 1.
-	 * Delayed pop won't perform the pop and no more events will be
-	 * scheduled.
-	 */
-	for (i = 0; i < port_conf.dequeue_depth; i++) {
-		if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
-			printf("%d: event dequeue expected to succeed\n",
-			       __LINE__);
-			goto err;
-		}
-	}
-
-	ev.op = RTE_EVENT_OP_RELEASE;
-
-	for (i = 0; i < port_conf.dequeue_depth - 1; i++) {
-		if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
-			printf("%d: RELEASE enqueue expected to succeed\n",
-			       __LINE__);
-			goto err;
-		}
-	}
-
-	timeout = 0x10000;
-
-	ret = rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout);
-	if (ret != 0) {
-		printf("%d: event dequeue expected to fail (ret = %d)\n",
-		       __LINE__, ret);
-		goto err;
-	}
-
-	/* Release one more event. This will trigger the token pop, and
-	 * another batch of events will be scheduled to the device.
-	 */
-	ev.op = RTE_EVENT_OP_RELEASE;
-
-	if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
-		printf("%d: RELEASE enqueue expected to succeed\n",
-		       __LINE__);
-		goto err;
-	}
-
-	timeout = 0xFFFFFFFFF;
-
-	for (i = 0; i < port_conf.dequeue_depth; i++) {
-		if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
-			printf("%d: event dequeue expected to succeed\n",
-			       __LINE__);
-			goto err;
-		}
-	}
-
-	cleanup();
-	return 0;
-
-err:
-	cleanup();
-	return -1;
-}
-
-static int
-do_selftest(void)
-{
-	struct test t;
-	int ret;
-
-	/* Only create mbuf pool once, reuse for each test run */
-	if (!eventdev_func_mempool) {
-		eventdev_func_mempool =
-			rte_pktmbuf_pool_create("EVENTDEV_DLB_SA_MBUF_POOL",
-						(1 << 12), /* 4k buffers */
-						32 /*MBUF_CACHE_SIZE*/,
-						0,
-						512, /* use very small mbufs */
-						rte_socket_id());
-		if (!eventdev_func_mempool) {
-			printf("ERROR creating mempool\n");
-			goto test_fail;
-		}
-	}
-	t.mbuf_pool = eventdev_func_mempool;
-
-	printf("*** Running Stop Flush test...\n");
-	ret = test_stop_flush(&t);
-	if (ret != 0) {
-		printf("ERROR - Stop Flush test FAILED.\n");
-		return ret;
-	}
-
-	printf("*** Running Single Link test...\n");
-	ret = test_single_link();
-	if (ret != 0) {
-		printf("ERROR - Single Link test FAILED.\n");
-
-		goto test_fail;
-	}
-
-	printf("*** Running Info Get test...\n");
-	ret = test_info_get();
-	if (ret != 0) {
-		printf("ERROR - Stop Flush test FAILED.\n");
-		return ret;
-	}
-
-	printf("*** Running Reconfiguration Link test...\n");
-	ret = test_reconfiguration_link();
-	if (ret != 0) {
-		printf("ERROR - Reconfiguration Link test FAILED.\n");
-
-		goto test_fail;
-	}
-
-	printf("*** Running Load-Balanced Traffic test...\n");
-	ret = test_load_balanced_traffic();
-	if (ret != 0) {
-		printf("ERROR - Load-Balanced Traffic test FAILED.\n");
-
-		goto test_fail;
-	}
-
-	printf("*** Running Directed Traffic test...\n");
-	ret = test_directed_traffic();
-	if (ret != 0) {
-		printf("ERROR - Directed Traffic test FAILED.\n");
-
-		goto test_fail;
-	}
-
-	printf("*** Running Deferred Scheduling test...\n");
-	ret = test_deferred_sched();
-	if (ret != 0) {
-		printf("ERROR - Deferred Scheduling test FAILED.\n");
-
-		goto test_fail;
-	}
-
-	printf("*** Running Delayed Pop test...\n");
-	ret = test_delayed_pop();
-	if (ret != 0) {
-		printf("ERROR - Delayed Pop test FAILED.\n");
-
-		goto test_fail;
-	}
-
-	return 0;
-
-test_fail:
-	return -1;
-}
-
-int
-test_dlb_eventdev(void)
-{
-	const char *dlb_eventdev_name = "dlb_event";
-	uint8_t num_evdevs = rte_event_dev_count();
-	int i, ret = 0;
-	int found = 0, skipped = 0, passed = 0, failed = 0;
-	struct rte_event_dev_info info;
-
-	for (i = 0; found + skipped < num_evdevs && i < RTE_EVENT_MAX_DEVS;
-	     i++) {
-		ret = rte_event_dev_info_get(i, &info);
-		if (ret < 0)
-			continue;
-
-		/* skip non-dlb event devices */
-		if (strncmp(info.driver_name, dlb_eventdev_name,
-			    sizeof(*info.driver_name)) != 0) {
-			skipped++;
-			continue;
-		}
-
-		evdev = rte_event_dev_get_dev_id(info.driver_name);
-		if (evdev < 0) {
-			printf("Could not get dev_id for eventdev with name %s, i=%d\n",
-			       info.driver_name, i);
-			skipped++;
-			continue;
-		}
-		found++;
-		printf("Running selftest on eventdev %s\n", info.driver_name);
-		ret = do_selftest();
-		if (ret == 0) {
-			passed++;
-			printf("Selftest passed for eventdev %s\n",
-			       info.driver_name);
-		} else {
-			failed++;
-			printf("Selftest failed for eventdev %s, err=%d\n",
-			       info.driver_name, ret);
-		}
-	}
-
-	printf("Ran selftest on %d eventdevs, %d skipped, %d passed, %d failed\n",
-	       found, skipped, passed, failed);
-	return ret;
-}
diff --git a/drivers/event/dlb/dlb_user.h b/drivers/event/dlb/dlb_user.h
deleted file mode 100644
index 2d9582b2b..000000000
--- a/drivers/event/dlb/dlb_user.h
+++ /dev/null
@@ -1,814 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#ifndef __DLB_USER_H
-#define __DLB_USER_H
-
-#include <linux/types.h>
-
-#define DLB_MAX_NAME_LEN 64
-
-enum dlb_error {
-	DLB_ST_SUCCESS = 0,
-	DLB_ST_NAME_EXISTS,
-	DLB_ST_DOMAIN_UNAVAILABLE,
-	DLB_ST_LDB_PORTS_UNAVAILABLE,
-	DLB_ST_DIR_PORTS_UNAVAILABLE,
-	DLB_ST_LDB_QUEUES_UNAVAILABLE,
-	DLB_ST_LDB_CREDITS_UNAVAILABLE,
-	DLB_ST_DIR_CREDITS_UNAVAILABLE,
-	DLB_ST_LDB_CREDIT_POOLS_UNAVAILABLE,
-	DLB_ST_DIR_CREDIT_POOLS_UNAVAILABLE,
-	DLB_ST_SEQUENCE_NUMBERS_UNAVAILABLE,
-	DLB_ST_INVALID_DOMAIN_ID,
-	DLB_ST_INVALID_QID_INFLIGHT_ALLOCATION,
-	DLB_ST_ATOMIC_INFLIGHTS_UNAVAILABLE,
-	DLB_ST_HIST_LIST_ENTRIES_UNAVAILABLE,
-	DLB_ST_INVALID_LDB_CREDIT_POOL_ID,
-	DLB_ST_INVALID_DIR_CREDIT_POOL_ID,
-	DLB_ST_INVALID_POP_COUNT_VIRT_ADDR,
-	DLB_ST_INVALID_LDB_QUEUE_ID,
-	DLB_ST_INVALID_CQ_DEPTH,
-	DLB_ST_INVALID_CQ_VIRT_ADDR,
-	DLB_ST_INVALID_PORT_ID,
-	DLB_ST_INVALID_QID,
-	DLB_ST_INVALID_PRIORITY,
-	DLB_ST_NO_QID_SLOTS_AVAILABLE,
-	DLB_ST_QED_FREELIST_ENTRIES_UNAVAILABLE,
-	DLB_ST_DQED_FREELIST_ENTRIES_UNAVAILABLE,
-	DLB_ST_INVALID_DIR_QUEUE_ID,
-	DLB_ST_DIR_QUEUES_UNAVAILABLE,
-	DLB_ST_INVALID_LDB_CREDIT_LOW_WATERMARK,
-	DLB_ST_INVALID_LDB_CREDIT_QUANTUM,
-	DLB_ST_INVALID_DIR_CREDIT_LOW_WATERMARK,
-	DLB_ST_INVALID_DIR_CREDIT_QUANTUM,
-	DLB_ST_DOMAIN_NOT_CONFIGURED,
-	DLB_ST_PID_ALREADY_ATTACHED,
-	DLB_ST_PID_NOT_ATTACHED,
-	DLB_ST_INTERNAL_ERROR,
-	DLB_ST_DOMAIN_IN_USE,
-	DLB_ST_IOMMU_MAPPING_ERROR,
-	DLB_ST_FAIL_TO_PIN_MEMORY_PAGE,
-	DLB_ST_UNABLE_TO_PIN_POPCOUNT_PAGES,
-	DLB_ST_UNABLE_TO_PIN_CQ_PAGES,
-	DLB_ST_DISCONTIGUOUS_CQ_MEMORY,
-	DLB_ST_DISCONTIGUOUS_POP_COUNT_MEMORY,
-	DLB_ST_DOMAIN_STARTED,
-	DLB_ST_LARGE_POOL_NOT_SPECIFIED,
-	DLB_ST_SMALL_POOL_NOT_SPECIFIED,
-	DLB_ST_NEITHER_POOL_SPECIFIED,
-	DLB_ST_DOMAIN_NOT_STARTED,
-	DLB_ST_INVALID_MEASUREMENT_DURATION,
-	DLB_ST_INVALID_PERF_METRIC_GROUP_ID,
-	DLB_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES,
-	DLB_ST_DOMAIN_RESET_FAILED,
-	DLB_ST_MBOX_ERROR,
-	DLB_ST_INVALID_HIST_LIST_DEPTH,
-	DLB_ST_NO_MEMORY,
-};
-
-static const char dlb_error_strings[][128] = {
-	"DLB_ST_SUCCESS",
-	"DLB_ST_NAME_EXISTS",
-	"DLB_ST_DOMAIN_UNAVAILABLE",
-	"DLB_ST_LDB_PORTS_UNAVAILABLE",
-	"DLB_ST_DIR_PORTS_UNAVAILABLE",
-	"DLB_ST_LDB_QUEUES_UNAVAILABLE",
-	"DLB_ST_LDB_CREDITS_UNAVAILABLE",
-	"DLB_ST_DIR_CREDITS_UNAVAILABLE",
-	"DLB_ST_LDB_CREDIT_POOLS_UNAVAILABLE",
-	"DLB_ST_DIR_CREDIT_POOLS_UNAVAILABLE",
-	"DLB_ST_SEQUENCE_NUMBERS_UNAVAILABLE",
-	"DLB_ST_INVALID_DOMAIN_ID",
-	"DLB_ST_INVALID_QID_INFLIGHT_ALLOCATION",
-	"DLB_ST_ATOMIC_INFLIGHTS_UNAVAILABLE",
-	"DLB_ST_HIST_LIST_ENTRIES_UNAVAILABLE",
-	"DLB_ST_INVALID_LDB_CREDIT_POOL_ID",
-	"DLB_ST_INVALID_DIR_CREDIT_POOL_ID",
-	"DLB_ST_INVALID_POP_COUNT_VIRT_ADDR",
-	"DLB_ST_INVALID_LDB_QUEUE_ID",
-	"DLB_ST_INVALID_CQ_DEPTH",
-	"DLB_ST_INVALID_CQ_VIRT_ADDR",
-	"DLB_ST_INVALID_PORT_ID",
-	"DLB_ST_INVALID_QID",
-	"DLB_ST_INVALID_PRIORITY",
-	"DLB_ST_NO_QID_SLOTS_AVAILABLE",
-	"DLB_ST_QED_FREELIST_ENTRIES_UNAVAILABLE",
-	"DLB_ST_DQED_FREELIST_ENTRIES_UNAVAILABLE",
-	"DLB_ST_INVALID_DIR_QUEUE_ID",
-	"DLB_ST_DIR_QUEUES_UNAVAILABLE",
-	"DLB_ST_INVALID_LDB_CREDIT_LOW_WATERMARK",
-	"DLB_ST_INVALID_LDB_CREDIT_QUANTUM",
-	"DLB_ST_INVALID_DIR_CREDIT_LOW_WATERMARK",
-	"DLB_ST_INVALID_DIR_CREDIT_QUANTUM",
-	"DLB_ST_DOMAIN_NOT_CONFIGURED",
-	"DLB_ST_PID_ALREADY_ATTACHED",
-	"DLB_ST_PID_NOT_ATTACHED",
-	"DLB_ST_INTERNAL_ERROR",
-	"DLB_ST_DOMAIN_IN_USE",
-	"DLB_ST_IOMMU_MAPPING_ERROR",
-	"DLB_ST_FAIL_TO_PIN_MEMORY_PAGE",
-	"DLB_ST_UNABLE_TO_PIN_POPCOUNT_PAGES",
-	"DLB_ST_UNABLE_TO_PIN_CQ_PAGES",
-	"DLB_ST_DISCONTIGUOUS_CQ_MEMORY",
-	"DLB_ST_DISCONTIGUOUS_POP_COUNT_MEMORY",
-	"DLB_ST_DOMAIN_STARTED",
-	"DLB_ST_LARGE_POOL_NOT_SPECIFIED",
-	"DLB_ST_SMALL_POOL_NOT_SPECIFIED",
-	"DLB_ST_NEITHER_POOL_SPECIFIED",
-	"DLB_ST_DOMAIN_NOT_STARTED",
-	"DLB_ST_INVALID_MEASUREMENT_DURATION",
-	"DLB_ST_INVALID_PERF_METRIC_GROUP_ID",
-	"DLB_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES",
-	"DLB_ST_DOMAIN_RESET_FAILED",
-	"DLB_ST_MBOX_ERROR",
-	"DLB_ST_INVALID_HIST_LIST_DEPTH",
-	"DLB_ST_NO_MEMORY",
-};
-
-struct dlb_cmd_response {
-	__u32 status; /* Interpret using enum dlb_error */
-	__u32 id;
-};
-
-/******************************/
-/* 'dlb' commands	      */
-/******************************/
-
-#define DLB_DEVICE_VERSION(x) (((x) >> 8) & 0xFF)
-#define DLB_DEVICE_REVISION(x) ((x) & 0xFF)
-
-enum dlb_revisions {
-	DLB_REV_A0 = 0,
-	DLB_REV_A1 = 1,
-	DLB_REV_A2 = 2,
-	DLB_REV_A3 = 3,
-	DLB_REV_B0 = 4,
-};
-
-/*
- * DLB_CMD_CREATE_SCHED_DOMAIN: Create a DLB scheduling domain and reserve the
- *	resources (queues, ports, etc.) that it contains.
- *
- * Input parameters:
- * - num_ldb_queues: Number of load-balanced queues.
- * - num_ldb_ports: Number of load-balanced ports.
- * - num_dir_ports: Number of directed ports. A directed port has one directed
- *	queue, so no num_dir_queues argument is necessary.
- * - num_atomic_inflights: This specifies the amount of temporary atomic QE
- *	storage for the domain. This storage is divided among the domain's
- *	load-balanced queues that are configured for atomic scheduling.
- * - num_hist_list_entries: Amount of history list storage. This is divided
- *	among the domain's CQs.
- * - num_ldb_credits: Amount of load-balanced QE storage (QED). QEs occupy this
- *	space until they are scheduled to a load-balanced CQ. One credit
- *	represents the storage for one QE.
- * - num_dir_credits: Amount of directed QE storage (DQED). QEs occupy this
- *	space until they are scheduled to a directed CQ. One credit represents
- *	the storage for one QE.
- * - num_ldb_credit_pools: Number of pools into which the load-balanced credits
- *	are placed.
- * - num_dir_credit_pools: Number of pools into which the directed credits are
- *	placed.
- * - padding0: Reserved for future use.
- *
- * Output parameters:
- * - response: pointer to a struct dlb_cmd_response.
- *	response.status: Detailed error code. In certain cases, such as if the
- *		response pointer is invalid, the driver won't set status.
- *	response.id: domain ID.
- */
-struct dlb_create_sched_domain_args {
-	/* Output parameters */
-	__u64 response;
-	/* Input parameters */
-	__u32 num_ldb_queues;
-	__u32 num_ldb_ports;
-	__u32 num_dir_ports;
-	__u32 num_atomic_inflights;
-	__u32 num_hist_list_entries;
-	__u32 num_ldb_credits;
-	__u32 num_dir_credits;
-	__u32 num_ldb_credit_pools;
-	__u32 num_dir_credit_pools;
-};
-
-/*
- * DLB_CMD_GET_NUM_RESOURCES: Return the number of available resources
- *	(queues, ports, etc.) that this device owns.
- *
- * Output parameters:
- * - num_domains: Number of available scheduling domains.
- * - num_ldb_queues: Number of available load-balanced queues.
- * - num_ldb_ports: Number of available load-balanced ports.
- * - num_dir_ports: Number of available directed ports. There is one directed
- *	queue for every directed port.
- * - num_atomic_inflights: Amount of available temporary atomic QE storage.
- * - max_contiguous_atomic_inflights: When a domain is created, the temporary
- *	atomic QE storage is allocated in a contiguous chunk. This return value
- *	is the longest available contiguous range of atomic QE storage.
- * - num_hist_list_entries: Amount of history list storage.
- * - max_contiguous_hist_list_entries: History list storage is allocated in
- *	a contiguous chunk, and this return value is the longest available
- *	contiguous range of history list entries.
- * - num_ldb_credits: Amount of available load-balanced QE storage.
- * - max_contiguous_ldb_credits: QED storage is allocated in a contiguous
- *	chunk, and this return value is the longest available contiguous range
- *	of load-balanced credit storage.
- * - num_dir_credits: Amount of available directed QE storage.
- * - max_contiguous_dir_credits: DQED storage is allocated in a contiguous
- *	chunk, and this return value is the longest available contiguous range
- *	of directed credit storage.
- * - num_ldb_credit_pools: Number of available load-balanced credit pools.
- * - num_dir_credit_pools: Number of available directed credit pools.
- * - padding0: Reserved for future use.
- */
-struct dlb_get_num_resources_args {
-	/* Output parameters */
-	__u32 num_sched_domains;
-	__u32 num_ldb_queues;
-	__u32 num_ldb_ports;
-	__u32 num_dir_ports;
-	__u32 num_atomic_inflights;
-	__u32 max_contiguous_atomic_inflights;
-	__u32 num_hist_list_entries;
-	__u32 max_contiguous_hist_list_entries;
-	__u32 num_ldb_credits;
-	__u32 max_contiguous_ldb_credits;
-	__u32 num_dir_credits;
-	__u32 max_contiguous_dir_credits;
-	__u32 num_ldb_credit_pools;
-	__u32 num_dir_credit_pools;
-	__u32 padding0;
-};
-
-/*
- * DLB_CMD_SET_SN_ALLOCATION: Configure a sequence number group
- *
- * Input parameters:
- * - group: Sequence number group ID.
- * - num: Number of sequence numbers per queue.
- *
- * Output parameters:
- * - response: pointer to a struct dlb_cmd_response.
- *	response.status: Detailed error code. In certain cases, such as if the
- *		response pointer is invalid, the driver won't set status.
- */
-struct dlb_set_sn_allocation_args {
-	/* Output parameters */
-	__u64 response;
-	/* Input parameters */
-	__u32 group;
-	__u32 num;
-};
-
-/*
- * DLB_CMD_GET_SN_ALLOCATION: Get a sequence number group's configuration
- *
- * Input parameters:
- * - group: Sequence number group ID.
- * - padding0: Reserved for future use.
- *
- * Output parameters:
- * - response: pointer to a struct dlb_cmd_response.
- *	response.status: Detailed error code. In certain cases, such as if the
- *		response pointer is invalid, the driver won't set status.
- *	response.id: Specified group's number of sequence numbers per queue.
- */
-struct dlb_get_sn_allocation_args {
-	/* Output parameters */
-	__u64 response;
-	/* Input parameters */
-	__u32 group;
-	__u32 padding0;
-};
-
-enum dlb_cq_poll_modes {
-	DLB_CQ_POLL_MODE_STD,
-	DLB_CQ_POLL_MODE_SPARSE,
-
-	/* NUM_DLB_CQ_POLL_MODE must be last */
-	NUM_DLB_CQ_POLL_MODE,
-};
-
-/*
- * DLB_CMD_QUERY_CQ_POLL_MODE: Query the CQ poll mode the kernel driver is using
- *
- * Output parameters:
- * - response: pointer to a struct dlb_cmd_response.
- *	response.status: Detailed error code. In certain cases, such as if the
- *		response pointer is invalid, the driver won't set status.
- *	response.id: CQ poll mode (see enum dlb_cq_poll_modes).
- */
-struct dlb_query_cq_poll_mode_args {
-	/* Output parameters */
-	__u64 response;
-};
-
-/*
- * DLB_CMD_GET_SN_OCCUPANCY: Get a sequence number group's occupancy
- *
- * Each sequence number group has one or more slots, depending on its
- * configuration. I.e.:
- * - If configured for 1024 sequence numbers per queue, the group has 1 slot
- * - If configured for 512 sequence numbers per queue, the group has 2 slots
- *   ...
- * - If configured for 32 sequence numbers per queue, the group has 32 slots
- *
- * This ioctl returns the group's number of in-use slots. If its occupancy is
- * 0, the group's sequence number allocation can be reconfigured.
- *
- * Input parameters:
- * - group: Sequence number group ID.
- * - padding0: Reserved for future use.
- *
- * Output parameters:
- * - response: pointer to a struct dlb_cmd_response.
- *	response.status: Detailed error code. In certain cases, such as if the
- *		response pointer is invalid, the driver won't set status.
- *	response.id: Specified group's number of used slots.
- */
-struct dlb_get_sn_occupancy_args {
-	/* Output parameters */
-	__u64 response;
-	/* Input parameters */
-	__u32 group;
-	__u32 padding0;
-};
-
-/*********************************/
-/* 'scheduling domain' commands  */
-/*********************************/
-
-/*
- * DLB_DOMAIN_CMD_CREATE_LDB_POOL: Configure a load-balanced credit pool.
- * Input parameters:
- * - num_ldb_credits: Number of load-balanced credits (QED space) for this
- *	pool.
- * - padding0: Reserved for future use.
- *
- * Output parameters:
- * - response: pointer to a struct dlb_cmd_response.
- *	response.status: Detailed error code. In certain cases, such as if the
- *		response pointer is invalid, the driver won't set status.
- *	response.id: pool ID.
- */
-struct dlb_create_ldb_pool_args {
-	/* Output parameters */
-	__u64 response;
-	/* Input parameters */
-	__u32 num_ldb_credits;
-	__u32 padding0;
-};
-
-/*
- * DLB_DOMAIN_CMD_CREATE_DIR_POOL: Configure a directed credit pool.
- * Input parameters:
- * - num_dir_credits: Number of directed credits (DQED space) for this pool.
- * - padding0: Reserved for future use.
- *
- * Output parameters:
- * - response: pointer to a struct dlb_cmd_response.
- *	response.status: Detailed error code. In certain cases, such as if the
- *		response pointer is invalid, the driver won't set status.
- *	response.id: Pool ID.
- */
-struct dlb_create_dir_pool_args {
-	/* Output parameters */
-	__u64 response;
-	/* Input parameters */
-	__u32 num_dir_credits;
-	__u32 padding0;
-};
-
-/*
- * DLB_DOMAIN_CMD_CREATE_LDB_QUEUE: Configure a load-balanced queue.
- * Input parameters:
- * - num_atomic_inflights: This specifies the amount of temporary atomic QE
- *	storage for this queue. If zero, the queue will not support atomic
- *	scheduling.
- * - num_sequence_numbers: This specifies the number of sequence numbers used
- *	by this queue. If zero, the queue will not support ordered scheduling.
- *	If non-zero, the queue will not support unordered scheduling.
- * - num_qid_inflights: The maximum number of QEs that can be inflight
- *	(scheduled to a CQ but not completed) at any time. If
- *	num_sequence_numbers is non-zero, num_qid_inflights must be set equal
- *	to num_sequence_numbers.
- * - padding0: Reserved for future use.
- *
- * Output parameters:
- * - response: pointer to a struct dlb_cmd_response.
- *	response.status: Detailed error code. In certain cases, such as if the
- *		response pointer is invalid, the driver won't set status.
- *	response.id: Queue ID.
- */
-struct dlb_create_ldb_queue_args {
-	/* Output parameters */
-	__u64 response;
-	/* Input parameters */
-	__u32 num_sequence_numbers;
-	__u32 num_qid_inflights;
-	__u32 num_atomic_inflights;
-	__u32 padding0;
-};
-
-/*
- * DLB_DOMAIN_CMD_CREATE_DIR_QUEUE: Configure a directed queue.
- * Input parameters:
- * - port_id: Port ID. If the corresponding directed port is already created,
- *	specify its ID here. Else this argument must be 0xFFFFFFFF to indicate
- *	that the queue is being created before the port.
- * - padding0: Reserved for future use.
- *
- * Output parameters:
- * - response: pointer to a struct dlb_cmd_response.
- *	response.status: Detailed error code. In certain cases, such as if the
- *		response pointer is invalid, the driver won't set status.
- *	response.id: Queue ID.
- */
-struct dlb_create_dir_queue_args {
-	/* Output parameters */
-	__u64 response;
-	/* Input parameters */
-	__s32 port_id;
-	__u32 padding0;
-};
-
-/*
- * DLB_DOMAIN_CMD_CREATE_LDB_PORT: Configure a load-balanced port.
- * Input parameters:
- * - ldb_credit_pool_id: Load-balanced credit pool this port will belong to.
- * - dir_credit_pool_id: Directed credit pool this port will belong to.
- * - ldb_credit_high_watermark: Number of load-balanced credits from the pool
- *	that this port will own.
- *
- *	If this port's scheduling domain does not have any load-balanced queues,
- *	this argument is ignored and the port is given no load-balanced
- *	credits.
- * - dir_credit_high_watermark: Number of directed credits from the pool that
- *	this port will own.
- *
- *	If this port's scheduling domain does not have any directed queues,
- *	this argument is ignored and the port is given no directed credits.
- * - ldb_credit_low_watermark: Load-balanced credit low watermark. When the
- *	port's credits reach this watermark, they become eligible to be
- *	refilled by the DLB as credits until the high watermark
- *	(num_ldb_credits) is reached.
- *
- *	If this port's scheduling domain does not have any load-balanced queues,
- *	this argument is ignored and the port is given no load-balanced
- *	credits.
- * - dir_credit_low_watermark: Directed credit low watermark. When the port's
- *	credits reach this watermark, they become eligible to be refilled by
- *	the DLB as credits until the high watermark (num_dir_credits) is
- *	reached.
- *
- *	If this port's scheduling domain does not have any directed queues,
- *	this argument is ignored and the port is given no directed credits.
- * - ldb_credit_quantum: Number of load-balanced credits for the DLB to refill
- *	per refill operation.
- *
- *	If this port's scheduling domain does not have any load-balanced queues,
- *	this argument is ignored and the port is given no load-balanced
- *	credits.
- * - dir_credit_quantum: Number of directed credits for the DLB to refill per
- *	refill operation.
- *
- *	If this port's scheduling domain does not have any directed queues,
- *	this argument is ignored and the port is given no directed credits.
- * - padding0: Reserved for future use.
- * - cq_depth: Depth of the port's CQ. Must be a power-of-two between 8 and
- *	1024, inclusive.
- * - cq_depth_threshold: CQ depth interrupt threshold. A value of N means that
- *	the CQ interrupt won't fire until there are N or more outstanding CQ
- *	tokens.
- * - cq_history_list_size: Number of history list entries. This must be greater
- *	than or equal to cq_depth.
- * - padding1: Reserved for future use.
- * - padding2: Reserved for future use.
- *
- * Output parameters:
- * - response: pointer to a struct dlb_cmd_response.
- *	response.status: Detailed error code. In certain cases, such as if the
- *		response pointer is invalid, the driver won't set status.
- *	response.id: port ID.
- */
-struct dlb_create_ldb_port_args {
-	/* Output parameters */
-	__u64 response;
-	/* Input parameters */
-	__u32 ldb_credit_pool_id;
-	__u32 dir_credit_pool_id;
-	__u16 ldb_credit_high_watermark;
-	__u16 ldb_credit_low_watermark;
-	__u16 ldb_credit_quantum;
-	__u16 dir_credit_high_watermark;
-	__u16 dir_credit_low_watermark;
-	__u16 dir_credit_quantum;
-	__u16 padding0;
-	__u16 cq_depth;
-	__u16 cq_depth_threshold;
-	__u16 cq_history_list_size;
-	__u32 padding1;
-};
-
-/*
- * DLB_DOMAIN_CMD_CREATE_DIR_PORT: Configure a directed port.
- * Input parameters:
- * - ldb_credit_pool_id: Load-balanced credit pool this port will belong to.
- * - dir_credit_pool_id: Directed credit pool this port will belong to.
- * - ldb_credit_high_watermark: Number of load-balanced credits from the pool
- *	that this port will own.
- *
- *	If this port's scheduling domain does not have any load-balanced queues,
- *	this argument is ignored and the port is given no load-balanced
- *	credits.
- * - dir_credit_high_watermark: Number of directed credits from the pool that
- *	this port will own.
- * - ldb_credit_low_watermark: Load-balanced credit low watermark. When the
- *	port's credits reach this watermark, they become eligible to be
- *	refilled by the DLB as credits until the high watermark
- *	(num_ldb_credits) is reached.
- *
- *	If this port's scheduling domain does not have any load-balanced queues,
- *	this argument is ignored and the port is given no load-balanced
- *	credits.
- * - dir_credit_low_watermark: Directed credit low watermark. When the port's
- *	credits reach this watermark, they become eligible to be refilled by
- *	the DLB as credits until the high watermark (num_dir_credits) is
- *	reached.
- * - ldb_credit_quantum: Number of load-balanced credits for the DLB to refill
- *	per refill operation.
- *
- *	If this port's scheduling domain does not have any load-balanced queues,
- *	this argument is ignored and the port is given no load-balanced
- *	credits.
- * - dir_credit_quantum: Number of directed credits for the DLB to refill per
- *	refill operation.
- * - cq_depth: Depth of the port's CQ. Must be a power-of-two between 8 and
- *	1024, inclusive.
- * - cq_depth_threshold: CQ depth interrupt threshold. A value of N means that
- *	the CQ interrupt won't fire until there are N or more outstanding CQ
- *	tokens.
- * - qid: Queue ID. If the corresponding directed queue is already created,
- *	specify its ID here. Else this argument must be 0xFFFFFFFF to indicate
- *	that the port is being created before the queue.
- * - padding1: Reserved for future use.
- *
- * Output parameters:
- * - response: pointer to a struct dlb_cmd_response.
- *	response.status: Detailed error code. In certain cases, such as if the
- *		response pointer is invalid, the driver won't set status.
- *	response.id: Port ID.
- */
-struct dlb_create_dir_port_args {
-	/* Output parameters */
-	__u64 response;
-	/* Input parameters */
-	__u32 ldb_credit_pool_id;
-	__u32 dir_credit_pool_id;
-	__u16 ldb_credit_high_watermark;
-	__u16 ldb_credit_low_watermark;
-	__u16 ldb_credit_quantum;
-	__u16 dir_credit_high_watermark;
-	__u16 dir_credit_low_watermark;
-	__u16 dir_credit_quantum;
-	__u16 cq_depth;
-	__u16 cq_depth_threshold;
-	__s32 queue_id;
-	__u32 padding1;
-};
-
-/*
- * DLB_DOMAIN_CMD_START_DOMAIN: Mark the end of the domain configuration. This
- *	must be called before passing QEs into the device, and no configuration
- *	ioctls can be issued once the domain has started. Sending QEs into the
- *	device before calling this ioctl will result in undefined behavior.
- * Input parameters:
- * - (None)
- *
- * Output parameters:
- * - response: pointer to a struct dlb_cmd_response.
- *	response.status: Detailed error code. In certain cases, such as if the
- *		response pointer is invalid, the driver won't set status.
- */
-struct dlb_start_domain_args {
-	/* Output parameters */
-	__u64 response;
-	/* Input parameters */
-};
-
-/*
- * DLB_DOMAIN_CMD_MAP_QID: Map a load-balanced queue to a load-balanced port.
- * Input parameters:
- * - port_id: Load-balanced port ID.
- * - qid: Load-balanced queue ID.
- * - priority: Queue->port service priority.
- * - padding0: Reserved for future use.
- *
- * Output parameters:
- * - response: pointer to a struct dlb_cmd_response.
- *	response.status: Detailed error code. In certain cases, such as if the
- *		response pointer is invalid, the driver won't set status.
- */
-struct dlb_map_qid_args {
-	/* Output parameters */
-	__u64 response;
-	/* Input parameters */
-	__u32 port_id;
-	__u32 qid;
-	__u32 priority;
-	__u32 padding0;
-};
-
-/*
- * DLB_DOMAIN_CMD_UNMAP_QID: Unmap a load-balanced queue to a load-balanced
- *	port.
- * Input parameters:
- * - port_id: Load-balanced port ID.
- * - qid: Load-balanced queue ID.
- *
- * Output parameters:
- * - response: pointer to a struct dlb_cmd_response.
- *	response.status: Detailed error code. In certain cases, such as if the
- *		response pointer is invalid, the driver won't set status.
- */
-struct dlb_unmap_qid_args {
-	/* Output parameters */
-	__u64 response;
-	/* Input parameters */
-	__u32 port_id;
-	__u32 qid;
-};
-
-/*
- * DLB_DOMAIN_CMD_ENABLE_LDB_PORT: Enable scheduling to a load-balanced port.
- * Input parameters:
- * - port_id: Load-balanced port ID.
- * - padding0: Reserved for future use.
- *
- * Output parameters:
- * - response: pointer to a struct dlb_cmd_response.
- *	response.status: Detailed error code. In certain cases, such as if the
- *		response pointer is invalid, the driver won't set status.
- */
-struct dlb_enable_ldb_port_args {
-	/* Output parameters */
-	__u64 response;
-	/* Input parameters */
-	__u32 port_id;
-	__u32 padding0;
-};
-
-/*
- * DLB_DOMAIN_CMD_ENABLE_DIR_PORT: Enable scheduling to a directed port.
- * Input parameters:
- * - port_id: Directed port ID.
- * - padding0: Reserved for future use.
- *
- * Output parameters:
- * - response: pointer to a struct dlb_cmd_response.
- *	response.status: Detailed error code. In certain cases, such as if the
- *		response pointer is invalid, the driver won't set status.
- */
-struct dlb_enable_dir_port_args {
-	/* Output parameters */
-	__u64 response;
-	/* Input parameters */
-	__u32 port_id;
-};
-
-/*
- * DLB_DOMAIN_CMD_DISABLE_LDB_PORT: Disable scheduling to a load-balanced port.
- * Input parameters:
- * - port_id: Load-balanced port ID.
- * - padding0: Reserved for future use.
- *
- * Output parameters:
- * - response: pointer to a struct dlb_cmd_response.
- *	response.status: Detailed error code. In certain cases, such as if the
- *		response pointer is invalid, the driver won't set status.
- */
-struct dlb_disable_ldb_port_args {
-	/* Output parameters */
-	__u64 response;
-	/* Input parameters */
-	__u32 port_id;
-	__u32 padding0;
-};
-
-/*
- * DLB_DOMAIN_CMD_DISABLE_DIR_PORT: Disable scheduling to a directed port.
- * Input parameters:
- * - port_id: Directed port ID.
- * - padding0: Reserved for future use.
- *
- * Output parameters:
- * - response: pointer to a struct dlb_cmd_response.
- *	response.status: Detailed error code. In certain cases, such as if the
- *		response pointer is invalid, the driver won't set status.
- */
-struct dlb_disable_dir_port_args {
-	/* Output parameters */
-	__u64 response;
-	/* Input parameters */
-	__u32 port_id;
-	__u32 padding0;
-};
-
-/*
- * DLB_DOMAIN_CMD_GET_LDB_QUEUE_DEPTH: Get a load-balanced queue's depth.
- * Input parameters:
- * - queue_id: The load-balanced queue ID.
- * - padding0: Reserved for future use.
- *
- * Output parameters:
- * - response: pointer to a struct dlb_cmd_response.
- *	response.status: Detailed error code. In certain cases, such as if the
- *		response pointer is invalid, the driver won't set status.
- *	response.id: queue depth.
- */
-struct dlb_get_ldb_queue_depth_args {
-	/* Output parameters */
-	__u64 response;
-	/* Input parameters */
-	__u32 queue_id;
-	__u32 padding0;
-};
-
-/*
- * DLB_DOMAIN_CMD_GET_DIR_QUEUE_DEPTH: Get a directed queue's depth.
- * Input parameters:
- * - queue_id: The directed queue ID.
- * - padding0: Reserved for future use.
- *
- * Output parameters:
- * - response: pointer to a struct dlb_cmd_response.
- *	response.status: Detailed error code. In certain cases, such as if the
- *		response pointer is invalid, the driver won't set status.
- *	response.id: queue depth.
- */
-struct dlb_get_dir_queue_depth_args {
-	/* Output parameters */
-	__u64 response;
-	/* Input parameters */
-	__u32 queue_id;
-	__u32 padding0;
-};
-
-/*
- * DLB_DOMAIN_CMD_PENDING_PORT_UNMAPS: Get number of queue unmap operations in
- *	progress for a load-balanced port.
- *
- *	Note: This is a snapshot; the number of unmap operations in progress
- *	is subject to change at any time.
- *
- * Input parameters:
- * - port_id: Load-balanced port ID.
- *
- * Output parameters:
- * - response: pointer to a struct dlb_cmd_response.
- *	response.status: Detailed error code. In certain cases, such as if the
- *		response pointer is invalid, the driver won't set status.
- *	response.id: number of unmaps in progress.
- */
-struct dlb_pending_port_unmaps_args {
-	/* Output parameters */
-	__u64 response;
-	/* Input parameters */
-	__u32 port_id;
-	__u32 padding0;
-};
-
-/*
- * Base addresses for memory mapping the consumer queue (CQ) and popcount (PC)
- * memory space, and producer port (PP) MMIO space. The CQ, PC, and PP
- * addresses are per-port. Every address is page-separated (e.g. LDB PP 0 is at
- * 0x2100000 and LDB PP 1 is at 0x2101000).
- */
-#define DLB_LDB_CQ_BASE 0x3000000
-#define DLB_LDB_CQ_MAX_SIZE 65536
-#define DLB_LDB_CQ_OFFS(id) (DLB_LDB_CQ_BASE + (id) * DLB_LDB_CQ_MAX_SIZE)
-
-#define DLB_DIR_CQ_BASE 0x3800000
-#define DLB_DIR_CQ_MAX_SIZE 65536
-#define DLB_DIR_CQ_OFFS(id) (DLB_DIR_CQ_BASE + (id) * DLB_DIR_CQ_MAX_SIZE)
-
-#define DLB_LDB_PC_BASE 0x2300000
-#define DLB_LDB_PC_MAX_SIZE 4096
-#define DLB_LDB_PC_OFFS(id) (DLB_LDB_PC_BASE + (id) * DLB_LDB_PC_MAX_SIZE)
-
-#define DLB_DIR_PC_BASE 0x2200000
-#define DLB_DIR_PC_MAX_SIZE 4096
-#define DLB_DIR_PC_OFFS(id) (DLB_DIR_PC_BASE + (id) * DLB_DIR_PC_MAX_SIZE)
-
-#define DLB_LDB_PP_BASE 0x2100000
-#define DLB_LDB_PP_MAX_SIZE 4096
-#define DLB_LDB_PP_OFFS(id) (DLB_LDB_PP_BASE + (id) * DLB_LDB_PP_MAX_SIZE)
-
-#define DLB_DIR_PP_BASE 0x2000000
-#define DLB_DIR_PP_MAX_SIZE 4096
-#define DLB_DIR_PP_OFFS(id) (DLB_DIR_PP_BASE + (id) * DLB_DIR_PP_MAX_SIZE)
-
-#endif /* __DLB_USER_H */
diff --git a/drivers/event/dlb/dlb_xstats.c b/drivers/event/dlb/dlb_xstats.c
deleted file mode 100644
index 5f4c59030..000000000
--- a/drivers/event/dlb/dlb_xstats.c
+++ /dev/null
@@ -1,1212 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#include <stdint.h>
-#include <inttypes.h>
-
-#include "dlb_priv.h"
-#include "dlb_inline_fns.h"
-
-enum dlb_xstats_type {
-	/* common to device and port */
-	rx_ok,				/**< Receive an event */
-	rx_drop,                        /**< Error bit set in received QE */
-	rx_interrupt_wait,		/**< Wait on an interrupt */
-	rx_umonitor_umwait,		/**< Block using umwait */
-	tx_ok,				/**< Transmit an event */
-	total_polls,			/**< Call dequeue_burst */
-	zero_polls,			/**< Call dequeue burst and return 0 */
-	tx_nospc_ldb_hw_credits,	/**< Insufficient LDB h/w credits */
-	tx_nospc_dir_hw_credits,	/**< Insufficient DIR h/w credits */
-	tx_nospc_inflight_max,		/**< Reach the new_event_threshold */
-	tx_nospc_new_event_limit,	/**< Insufficient s/w credits */
-	tx_nospc_inflight_credits,	/**< Port has too few s/w credits */
-	/* device specific */
-	nb_events_limit,		/**< Maximum num of events */
-	inflight_events,		/**< Current num events outstanding */
-	ldb_pool_size,			/**< Num load balanced credits */
-	dir_pool_size,			/**< Num directed credits */
-	/* port specific */
-	tx_new,				/**< Send an OP_NEW event */
-	tx_fwd,				/**< Send an OP_FORWARD event */
-	tx_rel,				/**< Send an OP_RELEASE event */
-	tx_implicit_rel,		/**< Issue an implicit event release */
-	tx_sched_ordered,		/**< Send a SCHED_TYPE_ORDERED event */
-	tx_sched_unordered,		/**< Send a SCHED_TYPE_PARALLEL event */
-	tx_sched_atomic,		/**< Send a SCHED_TYPE_ATOMIC event */
-	tx_sched_directed,		/**< Send a directed event */
-	tx_invalid,                     /**< Send an event with an invalid op */
-	outstanding_releases,		/**< # of releases a port owes */
-	max_outstanding_releases,	/**< max # of releases a port can owe */
-	rx_sched_ordered,		/**< Dequeue an ordered event */
-	rx_sched_unordered,		/**< Dequeue an unordered event */
-	rx_sched_atomic,		/**< Dequeue an atomic event */
-	rx_sched_directed,		/**< Dequeue an directed event */
-	rx_sched_invalid,               /**< Dequeue event sched type invalid */
-	/* common to port and queue */
-	is_configured,			/**< Port is configured */
-	is_load_balanced,		/**< Port is LDB */
-	hw_id,				/**< Hardware ID */
-	/* queue specific */
-	num_links,			/**< Number of ports linked */
-	sched_type,			/**< Queue sched type */
-	enq_ok,				/**< # events enqueued to the queue */
-	current_depth			/**< Current queue depth */
-};
-
-typedef uint64_t (*dlb_xstats_fn)(struct dlb_eventdev *dlb,
-		uint16_t obj_idx, /* port or queue id */
-		enum dlb_xstats_type stat, int extra_arg);
-
-enum dlb_xstats_fn_type {
-	DLB_XSTATS_FN_DEV,
-	DLB_XSTATS_FN_PORT,
-	DLB_XSTATS_FN_QUEUE
-};
-
-struct dlb_xstats_entry {
-	struct rte_event_dev_xstats_name name;
-	uint64_t reset_value; /* an offset to be taken away to emulate resets */
-	enum dlb_xstats_fn_type fn_id;
-	enum dlb_xstats_type stat;
-	enum rte_event_dev_xstats_mode mode;
-	int extra_arg;
-	uint16_t obj_idx;
-	uint8_t reset_allowed; /* when set, this value can be reset */
-};
-
-/* Some device stats are simply a summation of the corresponding port values */
-static uint64_t
-dlb_device_traffic_stat_get(struct dlb_eventdev *dlb, int which_stat)
-{
-	int i;
-	uint64_t val = 0;
-
-	for (i = 0; i < DLB_MAX_NUM_PORTS; i++) {
-		struct dlb_eventdev_port *port = &dlb->ev_ports[i];
-
-		if (!port->setup_done)
-			continue;
-
-		switch (which_stat) {
-		case rx_ok:
-			val += port->stats.traffic.rx_ok;
-			break;
-		case rx_drop:
-			val += port->stats.traffic.rx_drop;
-			break;
-		case rx_interrupt_wait:
-			val += port->stats.traffic.rx_interrupt_wait;
-			break;
-		case rx_umonitor_umwait:
-			val += port->stats.traffic.rx_umonitor_umwait;
-			break;
-		case tx_ok:
-			val += port->stats.traffic.tx_ok;
-			break;
-		case total_polls:
-			val += port->stats.traffic.total_polls;
-			break;
-		case zero_polls:
-			val += port->stats.traffic.zero_polls;
-			break;
-		case tx_nospc_ldb_hw_credits:
-			val += port->stats.traffic.tx_nospc_ldb_hw_credits;
-			break;
-		case tx_nospc_dir_hw_credits:
-			val += port->stats.traffic.tx_nospc_dir_hw_credits;
-			break;
-		case tx_nospc_inflight_max:
-			val += port->stats.traffic.tx_nospc_inflight_max;
-			break;
-		case tx_nospc_new_event_limit:
-			val += port->stats.traffic.tx_nospc_new_event_limit;
-			break;
-		case tx_nospc_inflight_credits:
-			val += port->stats.traffic.tx_nospc_inflight_credits;
-			break;
-		default:
-			return -1;
-		}
-	}
-	return val;
-}
-
-static uint64_t
-get_dev_stat(struct dlb_eventdev *dlb, uint16_t obj_idx __rte_unused,
-	     enum dlb_xstats_type type, int extra_arg __rte_unused)
-{
-	switch (type) {
-	case rx_ok:
-	case rx_drop:
-	case rx_interrupt_wait:
-	case rx_umonitor_umwait:
-	case tx_ok:
-	case total_polls:
-	case zero_polls:
-	case tx_nospc_ldb_hw_credits:
-	case tx_nospc_dir_hw_credits:
-	case tx_nospc_inflight_max:
-	case tx_nospc_new_event_limit:
-	case tx_nospc_inflight_credits:
-		return dlb_device_traffic_stat_get(dlb, type);
-	case nb_events_limit:
-		return dlb->new_event_limit;
-	case inflight_events:
-		return __atomic_load_n(&dlb->inflights, __ATOMIC_SEQ_CST);
-	case ldb_pool_size:
-		return dlb->num_ldb_credits;
-	case dir_pool_size:
-		return dlb->num_dir_credits;
-	default: return -1;
-	}
-}
-
-static uint64_t
-get_port_stat(struct dlb_eventdev *dlb, uint16_t obj_idx,
-	      enum dlb_xstats_type type, int extra_arg __rte_unused)
-{
-	struct dlb_eventdev_port *ev_port = &dlb->ev_ports[obj_idx];
-
-	switch (type) {
-	case rx_ok: return ev_port->stats.traffic.rx_ok;
-
-	case rx_drop: return ev_port->stats.traffic.rx_drop;
-
-	case rx_interrupt_wait: return ev_port->stats.traffic.rx_interrupt_wait;
-
-	case rx_umonitor_umwait:
-		return ev_port->stats.traffic.rx_umonitor_umwait;
-
-	case tx_ok: return ev_port->stats.traffic.tx_ok;
-
-	case total_polls: return ev_port->stats.traffic.total_polls;
-
-	case zero_polls: return ev_port->stats.traffic.zero_polls;
-
-	case tx_nospc_ldb_hw_credits:
-		return ev_port->stats.traffic.tx_nospc_ldb_hw_credits;
-
-	case tx_nospc_dir_hw_credits:
-		return ev_port->stats.traffic.tx_nospc_dir_hw_credits;
-
-	case tx_nospc_inflight_max:
-		return ev_port->stats.traffic.tx_nospc_inflight_max;
-
-	case tx_nospc_new_event_limit:
-		return ev_port->stats.traffic.tx_nospc_new_event_limit;
-
-	case tx_nospc_inflight_credits:
-		return ev_port->stats.traffic.tx_nospc_inflight_credits;
-
-	case is_configured: return ev_port->setup_done;
-
-	case is_load_balanced: return !ev_port->qm_port.is_directed;
-
-	case hw_id: return ev_port->qm_port.id;
-
-	case tx_new: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_NEW];
-
-	case tx_fwd: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_FORWARD];
-
-	case tx_rel: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_RELEASE];
-
-	case tx_implicit_rel: return ev_port->stats.tx_implicit_rel;
-
-	case tx_sched_ordered:
-		return ev_port->stats.tx_sched_cnt[DLB_SCHED_ORDERED];
-
-	case tx_sched_unordered:
-		return ev_port->stats.tx_sched_cnt[DLB_SCHED_UNORDERED];
-
-	case tx_sched_atomic:
-		return ev_port->stats.tx_sched_cnt[DLB_SCHED_ATOMIC];
-
-	case tx_sched_directed:
-		return ev_port->stats.tx_sched_cnt[DLB_SCHED_DIRECTED];
-
-	case tx_invalid: return ev_port->stats.tx_invalid;
-
-	case outstanding_releases: return ev_port->outstanding_releases;
-
-	case max_outstanding_releases:
-		return DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
-
-	case rx_sched_ordered:
-		return ev_port->stats.rx_sched_cnt[DLB_SCHED_ORDERED];
-
-	case rx_sched_unordered:
-		return ev_port->stats.rx_sched_cnt[DLB_SCHED_UNORDERED];
-
-	case rx_sched_atomic:
-		return ev_port->stats.rx_sched_cnt[DLB_SCHED_ATOMIC];
-
-	case rx_sched_directed:
-		return ev_port->stats.rx_sched_cnt[DLB_SCHED_DIRECTED];
-
-	case rx_sched_invalid: return ev_port->stats.rx_sched_invalid;
-
-	default: return -1;
-	}
-}
-
-static uint64_t
-get_queue_stat(struct dlb_eventdev *dlb, uint16_t obj_idx,
-	       enum dlb_xstats_type type, int extra_arg __rte_unused)
-{
-	struct dlb_eventdev_queue *ev_queue = &dlb->ev_queues[obj_idx];
-
-	switch (type) {
-	case is_configured: return ev_queue->setup_done;
-
-	case is_load_balanced: return !ev_queue->qm_queue.is_directed;
-
-	case hw_id: return ev_queue->qm_queue.id;
-
-	case num_links: return ev_queue->num_links;
-
-	case sched_type: return ev_queue->qm_queue.sched_type;
-
-	case enq_ok:
-	{
-		int port_count = 0;
-		uint64_t enq_ok_tally = 0;
-
-		ev_queue->enq_ok = 0;
-		for (port_count = 0; port_count < DLB_MAX_NUM_PORTS;
-		     port_count++) {
-			struct dlb_eventdev_port *ev_port =
-				&dlb->ev_ports[port_count];
-			enq_ok_tally += ev_port->stats.enq_ok[ev_queue->id];
-		}
-		ev_queue->enq_ok = enq_ok_tally;
-		return ev_queue->enq_ok;
-	}
-
-	case current_depth: return dlb_get_queue_depth(dlb, ev_queue);
-
-	default: return -1;
-	}
-}
-
-int
-dlb_xstats_init(struct dlb_eventdev *dlb)
-{
-	/*
-	 * define the stats names and types. Used to build up the device
-	 * xstats array
-	 * There are multiple set of stats:
-	 *   - device-level,
-	 *   - per-port,
-	 *   - per-qid,
-	 *
-	 * For each of these sets, we have three parallel arrays, one for the
-	 * names, the other for the stat type parameter to be passed in the fn
-	 * call to get that stat. The third array allows resetting or not.
-	 * All these arrays must be kept in sync
-	 */
-	static const char * const dev_stats[] = {
-		"rx_ok",
-		"rx_drop",
-		"rx_interrupt_wait",
-		"rx_umonitor_umwait",
-		"tx_ok",
-		"total_polls",
-		"zero_polls",
-		"tx_nospc_ldb_hw_credits",
-		"tx_nospc_dir_hw_credits",
-		"tx_nospc_inflight_max",
-		"tx_nospc_new_event_limit",
-		"tx_nospc_inflight_credits",
-		"nb_events_limit",
-		"inflight_events",
-		"ldb_pool_size",
-		"dir_pool_size",
-	};
-	static const enum dlb_xstats_type dev_types[] = {
-		rx_ok,
-		rx_drop,
-		rx_interrupt_wait,
-		rx_umonitor_umwait,
-		tx_ok,
-		total_polls,
-		zero_polls,
-		tx_nospc_ldb_hw_credits,
-		tx_nospc_dir_hw_credits,
-		tx_nospc_inflight_max,
-		tx_nospc_new_event_limit,
-		tx_nospc_inflight_credits,
-		nb_events_limit,
-		inflight_events,
-		ldb_pool_size,
-		dir_pool_size,
-	};
-	/* Note: generated device stats are not allowed to be reset. */
-	static const uint8_t dev_reset_allowed[] = {
-		0, /* rx_ok */
-		0, /* rx_drop */
-		0, /* rx_interrupt_wait */
-		0, /* rx_umonitor_umwait */
-		0, /* tx_ok */
-		0, /* total_polls */
-		0, /* zero_polls */
-		0, /* tx_nospc_ldb_hw_credits */
-		0, /* tx_nospc_dir_hw_credits */
-		0, /* tx_nospc_inflight_max */
-		0, /* tx_nospc_new_event_limit */
-		0, /* tx_nospc_inflight_credits */
-		0, /* nb_events_limit */
-		0, /* inflight_events */
-		0, /* ldb_pool_size */
-		0, /* dir_pool_size */
-	};
-	static const char * const port_stats[] = {
-		"is_configured",
-		"is_load_balanced",
-		"hw_id",
-		"rx_ok",
-		"rx_drop",
-		"rx_interrupt_wait",
-		"rx_umonitor_umwait",
-		"tx_ok",
-		"total_polls",
-		"zero_polls",
-		"tx_nospc_ldb_hw_credits",
-		"tx_nospc_dir_hw_credits",
-		"tx_nospc_inflight_max",
-		"tx_nospc_new_event_limit",
-		"tx_nospc_inflight_credits",
-		"tx_new",
-		"tx_fwd",
-		"tx_rel",
-		"tx_implicit_rel",
-		"tx_sched_ordered",
-		"tx_sched_unordered",
-		"tx_sched_atomic",
-		"tx_sched_directed",
-		"tx_invalid",
-		"outstanding_releases",
-		"max_outstanding_releases",
-		"rx_sched_ordered",
-		"rx_sched_unordered",
-		"rx_sched_atomic",
-		"rx_sched_directed",
-		"rx_sched_invalid"
-	};
-	static const enum dlb_xstats_type port_types[] = {
-		is_configured,
-		is_load_balanced,
-		hw_id,
-		rx_ok,
-		rx_drop,
-		rx_interrupt_wait,
-		rx_umonitor_umwait,
-		tx_ok,
-		total_polls,
-		zero_polls,
-		tx_nospc_ldb_hw_credits,
-		tx_nospc_dir_hw_credits,
-		tx_nospc_inflight_max,
-		tx_nospc_new_event_limit,
-		tx_nospc_inflight_credits,
-		tx_new,
-		tx_fwd,
-		tx_rel,
-		tx_implicit_rel,
-		tx_sched_ordered,
-		tx_sched_unordered,
-		tx_sched_atomic,
-		tx_sched_directed,
-		tx_invalid,
-		outstanding_releases,
-		max_outstanding_releases,
-		rx_sched_ordered,
-		rx_sched_unordered,
-		rx_sched_atomic,
-		rx_sched_directed,
-		rx_sched_invalid
-	};
-	static const uint8_t port_reset_allowed[] = {
-		0, /* is_configured */
-		0, /* is_load_balanced */
-		0, /* hw_id */
-		1, /* rx_ok */
-		1, /* rx_drop */
-		1, /* rx_interrupt_wait */
-		1, /* rx_umonitor_umwait */
-		1, /* tx_ok */
-		1, /* total_polls */
-		1, /* zero_polls */
-		1, /* tx_nospc_ldb_hw_credits */
-		1, /* tx_nospc_dir_hw_credits */
-		1, /* tx_nospc_inflight_max */
-		1, /* tx_nospc_new_event_limit */
-		1, /* tx_nospc_inflight_credits */
-		1, /* tx_new */
-		1, /* tx_fwd */
-		1, /* tx_rel */
-		1, /* tx_implicit_rel */
-		1, /* tx_sched_ordered */
-		1, /* tx_sched_unordered */
-		1, /* tx_sched_atomic */
-		1, /* tx_sched_directed */
-		1, /* tx_invalid */
-		0, /* outstanding_releases */
-		0, /* max_outstanding_releases */
-		1, /* rx_sched_ordered */
-		1, /* rx_sched_unordered */
-		1, /* rx_sched_atomic */
-		1, /* rx_sched_directed */
-		1  /* rx_sched_invalid */
-	};
-
-	/* QID specific stats */
-	static const char * const qid_stats[] = {
-		"is_configured",
-		"is_load_balanced",
-		"hw_id",
-		"num_links",
-		"sched_type",
-		"enq_ok",
-		"current_depth",
-	};
-	static const enum dlb_xstats_type qid_types[] = {
-		is_configured,
-		is_load_balanced,
-		hw_id,
-		num_links,
-		sched_type,
-		enq_ok,
-		current_depth,
-	};
-	static const uint8_t qid_reset_allowed[] = {
-		0, /* is_configured */
-		0, /* is_load_balanced */
-		0, /* hw_id */
-		0, /* num_links */
-		0, /* sched_type */
-		1, /* enq_ok */
-		0, /* current_depth */
-	};
-
-	/* ---- end of stat definitions ---- */
-
-	/* check sizes, since a missed comma can lead to strings being
-	 * joined by the compiler.
-	 */
-	RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_types));
-	RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_types));
-	RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_types));
-
-	RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_reset_allowed));
-	RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_reset_allowed));
-	RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_reset_allowed));
-
-	/* other vars */
-	const unsigned int count = RTE_DIM(dev_stats) +
-			DLB_MAX_NUM_PORTS * RTE_DIM(port_stats) +
-			DLB_MAX_NUM_QUEUES * RTE_DIM(qid_stats);
-	unsigned int i, port, qid, stat_id = 0;
-
-	dlb->xstats = rte_zmalloc_socket(NULL,
-					 sizeof(dlb->xstats[0]) * count, 0,
-					 dlb->qm_instance.info.socket_id);
-	if (dlb->xstats == NULL)
-		return -ENOMEM;
-
-#define sname dlb->xstats[stat_id].name.name
-	for (i = 0; i < RTE_DIM(dev_stats); i++, stat_id++) {
-		dlb->xstats[stat_id] = (struct dlb_xstats_entry) {
-			.fn_id = DLB_XSTATS_FN_DEV,
-			.stat = dev_types[i],
-			.mode = RTE_EVENT_DEV_XSTATS_DEVICE,
-			.reset_allowed = dev_reset_allowed[i],
-		};
-		snprintf(sname, sizeof(sname), "dev_%s", dev_stats[i]);
-	}
-	dlb->xstats_count_mode_dev = stat_id;
-
-	for (port = 0; port < DLB_MAX_NUM_PORTS; port++) {
-		uint32_t count_offset = stat_id;
-
-		dlb->xstats_offset_for_port[port] = stat_id;
-
-		for (i = 0; i < RTE_DIM(port_stats); i++, stat_id++) {
-			dlb->xstats[stat_id] = (struct dlb_xstats_entry){
-				.fn_id = DLB_XSTATS_FN_PORT,
-				.obj_idx = port,
-				.stat = port_types[i],
-				.mode = RTE_EVENT_DEV_XSTATS_PORT,
-				.reset_allowed = port_reset_allowed[i],
-			};
-			snprintf(sname, sizeof(sname), "port_%u_%s",
-				 port, port_stats[i]);
-		}
-
-		dlb->xstats_count_per_port[port] = stat_id - count_offset;
-	}
-
-	dlb->xstats_count_mode_port = stat_id - dlb->xstats_count_mode_dev;
-
-	for (qid = 0; qid < DLB_MAX_NUM_QUEUES; qid++) {
-		uint32_t count_offset = stat_id;
-
-		dlb->xstats_offset_for_qid[qid] = stat_id;
-
-		for (i = 0; i < RTE_DIM(qid_stats); i++, stat_id++) {
-			dlb->xstats[stat_id] = (struct dlb_xstats_entry){
-				.fn_id = DLB_XSTATS_FN_QUEUE,
-				.obj_idx = qid,
-				.stat = qid_types[i],
-				.mode = RTE_EVENT_DEV_XSTATS_QUEUE,
-				.reset_allowed = qid_reset_allowed[i],
-			};
-			snprintf(sname, sizeof(sname), "qid_%u_%s",
-				 qid, qid_stats[i]);
-		}
-
-		dlb->xstats_count_per_qid[qid] = stat_id - count_offset;
-	}
-
-	dlb->xstats_count_mode_queue = stat_id -
-		(dlb->xstats_count_mode_dev + dlb->xstats_count_mode_port);
-#undef sname
-
-	dlb->xstats_count = stat_id;
-
-	return 0;
-}
-
-void
-dlb_xstats_uninit(struct dlb_eventdev *dlb)
-{
-	rte_free(dlb->xstats);
-	dlb->xstats_count = 0;
-}
-
-int
-dlb_eventdev_xstats_get_names(const struct rte_eventdev *dev,
-		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
-		struct rte_event_dev_xstats_name *xstats_names,
-		unsigned int *ids, unsigned int size)
-{
-	const struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
-	unsigned int i;
-	unsigned int xidx = 0;
-	uint32_t xstats_mode_count = 0;
-	uint32_t start_offset = 0;
-
-	switch (mode) {
-	case RTE_EVENT_DEV_XSTATS_DEVICE:
-		xstats_mode_count = dlb->xstats_count_mode_dev;
-		break;
-	case RTE_EVENT_DEV_XSTATS_PORT:
-		if (queue_port_id >= DLB_MAX_NUM_PORTS)
-			break;
-		xstats_mode_count = dlb->xstats_count_per_port[queue_port_id];
-		start_offset = dlb->xstats_offset_for_port[queue_port_id];
-		break;
-	case RTE_EVENT_DEV_XSTATS_QUEUE:
-#if (DLB_MAX_NUM_QUEUES <= 255) /* max 8 bit value */
-		if (queue_port_id >= DLB_MAX_NUM_QUEUES)
-			break;
-#endif
-		xstats_mode_count = dlb->xstats_count_per_qid[queue_port_id];
-		start_offset = dlb->xstats_offset_for_qid[queue_port_id];
-		break;
-	default:
-		return -EINVAL;
-	};
-
-	if (xstats_mode_count > size || ids == NULL || xstats_names == NULL)
-		return xstats_mode_count;
-
-	for (i = 0; i < dlb->xstats_count && xidx < size; i++) {
-		if (dlb->xstats[i].mode != mode)
-			continue;
-
-		if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
-		    queue_port_id != dlb->xstats[i].obj_idx)
-			continue;
-
-		xstats_names[xidx] = dlb->xstats[i].name;
-		if (ids)
-			ids[xidx] = start_offset + xidx;
-		xidx++;
-	}
-	return xidx;
-}
-
-static int
-dlb_xstats_update(struct dlb_eventdev *dlb,
-		enum rte_event_dev_xstats_mode mode,
-		uint8_t queue_port_id, const unsigned int ids[],
-		uint64_t values[], unsigned int n, const uint32_t reset)
-{
-	unsigned int i;
-	unsigned int xidx = 0;
-	uint32_t xstats_mode_count = 0;
-
-	switch (mode) {
-	case RTE_EVENT_DEV_XSTATS_DEVICE:
-		xstats_mode_count = dlb->xstats_count_mode_dev;
-		break;
-	case RTE_EVENT_DEV_XSTATS_PORT:
-		if (queue_port_id >= DLB_MAX_NUM_PORTS)
-			goto invalid_value;
-		xstats_mode_count = dlb->xstats_count_per_port[queue_port_id];
-		break;
-	case RTE_EVENT_DEV_XSTATS_QUEUE:
-#if (DLB_MAX_NUM_QUEUES <= 255) /* max 8 bit value */
-		if (queue_port_id >= DLB_MAX_NUM_QUEUES)
-			goto invalid_value;
-#endif
-		xstats_mode_count = dlb->xstats_count_per_qid[queue_port_id];
-		break;
-	default:
-		goto invalid_value;
-	};
-
-	for (i = 0; i < n && xidx < xstats_mode_count; i++) {
-		struct dlb_xstats_entry *xs = &dlb->xstats[ids[i]];
-		dlb_xstats_fn fn;
-
-		if (ids[i] > dlb->xstats_count || xs->mode != mode)
-			continue;
-
-		if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
-		    queue_port_id != xs->obj_idx)
-			continue;
-
-		switch (xs->fn_id) {
-		case DLB_XSTATS_FN_DEV:
-			fn = get_dev_stat;
-			break;
-		case DLB_XSTATS_FN_PORT:
-			fn = get_port_stat;
-			break;
-		case DLB_XSTATS_FN_QUEUE:
-			fn = get_queue_stat;
-			break;
-		default:
-			DLB_LOG_ERR("Unexpected xstat fn_id %d\n",
-				     xs->fn_id);
-			return -EINVAL;
-		}
-
-		uint64_t val = fn(dlb, xs->obj_idx, xs->stat,
-				  xs->extra_arg) - xs->reset_value;
-
-		if (values)
-			values[xidx] = val;
-
-		if (xs->reset_allowed && reset)
-			xs->reset_value += val;
-
-		xidx++;
-	}
-
-	return xidx;
-
-invalid_value:
-	return -EINVAL;
-}
-
-int
-dlb_eventdev_xstats_get(const struct rte_eventdev *dev,
-		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
-		const unsigned int ids[], uint64_t values[], unsigned int n)
-{
-	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
-	const uint32_t reset = 0;
-
-	return dlb_xstats_update(dlb, mode, queue_port_id, ids, values, n,
-				  reset);
-}
-
-uint64_t
-dlb_eventdev_xstats_get_by_name(const struct rte_eventdev *dev,
-				const char *name, unsigned int *id)
-{
-	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
-	unsigned int i;
-	dlb_xstats_fn fn;
-
-	for (i = 0; i < dlb->xstats_count; i++) {
-		struct dlb_xstats_entry *xs = &dlb->xstats[i];
-
-		if (strncmp(xs->name.name, name,
-			    RTE_EVENT_DEV_XSTATS_NAME_SIZE) == 0){
-			if (id != NULL)
-				*id = i;
-
-			switch (xs->fn_id) {
-			case DLB_XSTATS_FN_DEV:
-				fn = get_dev_stat;
-				break;
-			case DLB_XSTATS_FN_PORT:
-				fn = get_port_stat;
-				break;
-			case DLB_XSTATS_FN_QUEUE:
-				fn = get_queue_stat;
-				break;
-			default:
-				DLB_LOG_ERR("Unexpected xstat fn_id %d\n",
-					    xs->fn_id);
-				return (uint64_t)-1;
-			}
-
-			return fn(dlb, xs->obj_idx, xs->stat,
-				  xs->extra_arg) - xs->reset_value;
-		}
-	}
-	if (id != NULL)
-		*id = (uint32_t)-1;
-	return (uint64_t)-1;
-}
-
-static void
-dlb_xstats_reset_range(struct dlb_eventdev *dlb, uint32_t start,
-		       uint32_t num)
-{
-	uint32_t i;
-	dlb_xstats_fn fn;
-
-	for (i = start; i < start + num; i++) {
-		struct dlb_xstats_entry *xs = &dlb->xstats[i];
-
-		if (!xs->reset_allowed)
-			continue;
-
-		switch (xs->fn_id) {
-		case DLB_XSTATS_FN_DEV:
-			fn = get_dev_stat;
-			break;
-		case DLB_XSTATS_FN_PORT:
-			fn = get_port_stat;
-			break;
-		case DLB_XSTATS_FN_QUEUE:
-			fn = get_queue_stat;
-			break;
-		default:
-			DLB_LOG_ERR("Unexpected xstat fn_id %d\n", xs->fn_id);
-			return;
-		}
-
-		uint64_t val = fn(dlb, xs->obj_idx, xs->stat, xs->extra_arg);
-		xs->reset_value = val;
-	}
-}
-
-static int
-dlb_xstats_reset_queue(struct dlb_eventdev *dlb, uint8_t queue_id,
-		       const uint32_t ids[], uint32_t nb_ids)
-{
-	const uint32_t reset = 1;
-
-	if (ids) {
-		uint32_t nb_reset = dlb_xstats_update(dlb,
-					RTE_EVENT_DEV_XSTATS_QUEUE,
-					queue_id, ids, NULL, nb_ids,
-					reset);
-		return nb_reset == nb_ids ? 0 : -EINVAL;
-	}
-
-	if (ids == NULL)
-		dlb_xstats_reset_range(dlb,
-				       dlb->xstats_offset_for_qid[queue_id],
-				       dlb->xstats_count_per_qid[queue_id]);
-
-	return 0;
-}
-
-static int
-dlb_xstats_reset_port(struct dlb_eventdev *dlb, uint8_t port_id,
-		      const uint32_t ids[], uint32_t nb_ids)
-{
-	const uint32_t reset = 1;
-	int offset = dlb->xstats_offset_for_port[port_id];
-	int nb_stat = dlb->xstats_count_per_port[port_id];
-
-	if (ids) {
-		uint32_t nb_reset = dlb_xstats_update(dlb,
-					RTE_EVENT_DEV_XSTATS_PORT, port_id,
-					ids, NULL, nb_ids,
-					reset);
-		return nb_reset == nb_ids ? 0 : -EINVAL;
-	}
-
-	dlb_xstats_reset_range(dlb, offset, nb_stat);
-	return 0;
-}
-
-static int
-dlb_xstats_reset_dev(struct dlb_eventdev *dlb, const uint32_t ids[],
-		     uint32_t nb_ids)
-{
-	uint32_t i;
-
-	if (ids) {
-		for (i = 0; i < nb_ids; i++) {
-			uint32_t id = ids[i];
-
-			if (id >= dlb->xstats_count_mode_dev)
-				return -EINVAL;
-			dlb_xstats_reset_range(dlb, id, 1);
-		}
-	} else {
-		for (i = 0; i < dlb->xstats_count_mode_dev; i++)
-			dlb_xstats_reset_range(dlb, i, 1);
-	}
-
-	return 0;
-}
-
-int
-dlb_eventdev_xstats_reset(struct rte_eventdev *dev,
-			  enum rte_event_dev_xstats_mode mode,
-			  int16_t queue_port_id,
-			  const uint32_t ids[],
-			  uint32_t nb_ids)
-{
-	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
-	uint32_t i;
-
-	/* handle -1 for queue_port_id here, looping over all ports/queues */
-	switch (mode) {
-	case RTE_EVENT_DEV_XSTATS_DEVICE:
-		if (dlb_xstats_reset_dev(dlb, ids, nb_ids))
-			return -EINVAL;
-		break;
-	case RTE_EVENT_DEV_XSTATS_PORT:
-		if (queue_port_id == -1) {
-			for (i = 0; i < DLB_MAX_NUM_PORTS; i++) {
-				if (dlb_xstats_reset_port(dlb, i, ids,
-							  nb_ids))
-					return -EINVAL;
-			}
-		} else if (queue_port_id < DLB_MAX_NUM_PORTS) {
-			if (dlb_xstats_reset_port(dlb, queue_port_id, ids,
-						  nb_ids))
-				return -EINVAL;
-		} else {
-			return -EINVAL;
-		}
-		break;
-	case RTE_EVENT_DEV_XSTATS_QUEUE:
-		if (queue_port_id == -1) {
-			for (i = 0; i < DLB_MAX_NUM_QUEUES; i++) {
-				if (dlb_xstats_reset_queue(dlb, i, ids,
-							   nb_ids))
-					return -EINVAL;
-			}
-		} else if (queue_port_id < DLB_MAX_NUM_QUEUES) {
-			if (dlb_xstats_reset_queue(dlb, queue_port_id, ids,
-						   nb_ids))
-				return -EINVAL;
-		} else {
-			return -EINVAL;
-		}
-		break;
-	};
-
-	return 0;
-}
-
-void
-dlb_eventdev_dump(struct rte_eventdev *dev, FILE *f)
-{
-	struct dlb_eventdev *dlb;
-	struct dlb_hw_dev *handle;
-	int i;
-
-	dlb = dlb_pmd_priv(dev);
-
-	if (dlb == NULL) {
-		fprintf(f, "DLB Event device cannot be dumped!\n");
-		return;
-	}
-
-	if (!dlb->configured)
-		fprintf(f, "DLB Event device is not configured\n");
-
-	handle = &dlb->qm_instance;
-
-	fprintf(f, "================\n");
-	fprintf(f, "DLB Device Dump\n");
-	fprintf(f, "================\n");
-
-	fprintf(f, "Processor supports umonitor/umwait instructions = %s\n",
-		dlb->umwait_allowed ? "yes" : "no");
-
-	/* Generic top level device information */
-
-	fprintf(f, "device is configured and run state =");
-	if (dlb->run_state == DLB_RUN_STATE_STOPPED)
-		fprintf(f, "STOPPED\n");
-	else if (dlb->run_state == DLB_RUN_STATE_STOPPING)
-		fprintf(f, "STOPPING\n");
-	else if (dlb->run_state == DLB_RUN_STATE_STARTING)
-		fprintf(f, "STARTING\n");
-	else if (dlb->run_state == DLB_RUN_STATE_STARTED)
-		fprintf(f, "STARTED\n");
-	else
-		fprintf(f, "UNEXPECTED\n");
-
-	fprintf(f,
-		"dev ID=%d, dom ID=%u, sock=%u, evdev=%p\n",
-		handle->device_id, handle->domain_id,
-		handle->info.socket_id, dlb->event_dev);
-
-	fprintf(f, "num dir ports=%u, num dir queues=%u\n",
-		dlb->num_dir_ports, dlb->num_dir_queues);
-
-	fprintf(f, "num ldb ports=%u, num ldb queues=%u\n",
-		dlb->num_ldb_ports, dlb->num_ldb_queues);
-
-	fprintf(f, "dir_credit_pool_id=%u, num_credits=%u\n",
-		handle->cfg.dir_credit_pool_id, handle->cfg.num_dir_credits);
-
-	fprintf(f, "ldb_credit_pool_id=%u, num_credits=%u\n",
-		handle->cfg.ldb_credit_pool_id, handle->cfg.num_ldb_credits);
-
-	fprintf(f, "num atomic inflights=%u, hist list entries=%u\n",
-		handle->cfg.resources.num_atomic_inflights,
-		handle->cfg.resources.num_hist_list_entries);
-
-	fprintf(f, "results from most recent hw resource query:\n");
-
-	fprintf(f, "\tnum_sched_domains = %u\n",
-		dlb->hw_rsrc_query_results.num_sched_domains);
-
-	fprintf(f, "\tnum_ldb_queues = %u\n",
-		dlb->hw_rsrc_query_results.num_ldb_queues);
-
-	fprintf(f, "\tnum_ldb_ports = %u\n",
-		dlb->hw_rsrc_query_results.num_ldb_ports);
-
-	fprintf(f, "\tnum_dir_ports = %u\n",
-		dlb->hw_rsrc_query_results.num_dir_ports);
-
-	fprintf(f, "\tnum_atomic_inflights = %u\n",
-		dlb->hw_rsrc_query_results.num_atomic_inflights);
-
-	fprintf(f, "\tmax_contiguous_atomic_inflights = %u\n",
-		dlb->hw_rsrc_query_results.max_contiguous_atomic_inflights);
-
-	fprintf(f, "\tnum_hist_list_entries = %u\n",
-		dlb->hw_rsrc_query_results.num_hist_list_entries);
-
-	fprintf(f, "\tmax_contiguous_hist_list_entries = %u\n",
-		dlb->hw_rsrc_query_results.max_contiguous_hist_list_entries);
-
-	fprintf(f, "\tnum_ldb_credits = %u\n",
-		dlb->hw_rsrc_query_results.num_ldb_credits);
-
-	fprintf(f, "\tmax_contiguous_ldb_credits = %u\n",
-		dlb->hw_rsrc_query_results.max_contiguous_ldb_credits);
-
-	fprintf(f, "\tnum_dir_credits = %u\n",
-		dlb->hw_rsrc_query_results.num_dir_credits);
-
-	fprintf(f, "\tmax_contiguous_dir_credits = %u\n",
-		dlb->hw_rsrc_query_results.max_contiguous_dir_credits);
-
-	fprintf(f, "\tnum_ldb_credit_pools = %u\n",
-		dlb->hw_rsrc_query_results.num_ldb_credit_pools);
-
-	fprintf(f, "\tnum_dir_credit_pools = %u\n",
-		dlb->hw_rsrc_query_results.num_dir_credit_pools);
-
-	/* Port level information */
-
-	for (i = 0; i < dlb->num_ports; i++) {
-		struct dlb_eventdev_port *p = &dlb->ev_ports[i];
-		int j;
-
-		if (!p->enq_configured)
-			fprintf(f, "Port_%d is not configured\n", i);
-
-		fprintf(f, "Port_%d\n", i);
-		fprintf(f, "=======\n");
-
-		fprintf(f, "\tevport_%u is configured, setup done=%d\n",
-			p->id, p->setup_done);
-
-		fprintf(f, "\tconfig state=%d, port state=%d\n",
-			p->qm_port.config_state, p->qm_port.state);
-
-		fprintf(f, "\tport is %s\n",
-			p->qm_port.is_directed ? "directed" : "load balanced");
-
-		fprintf(f, "\toutstanding releases=%u\n",
-			p->outstanding_releases);
-
-		fprintf(f, "\tinflight max=%u, inflight credits=%u\n",
-			p->inflight_max, p->inflight_credits);
-
-		fprintf(f, "\tcredit update quanta=%u, implicit release =%u\n",
-			p->credit_update_quanta, p->implicit_release);
-
-		fprintf(f, "\tnum_links=%d, queues -> ", p->num_links);
-
-		for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
-			if (p->link[j].valid)
-				fprintf(f, "id=%u prio=%u ",
-					p->link[j].queue_id,
-					p->link[j].priority);
-		}
-		fprintf(f, "\n");
-
-		fprintf(f, "\thardware port id=%u\n", p->qm_port.id);
-
-		fprintf(f, "\tcached_ldb_credits=%u\n",
-			p->qm_port.cached_ldb_credits);
-
-		fprintf(f, "\tldb_pushcount_at_credit_expiry = %u\n",
-			p->qm_port.ldb_pushcount_at_credit_expiry);
-
-		fprintf(f, "\tldb_credits = %u\n",
-			p->qm_port.ldb_credits);
-
-		fprintf(f, "\tcached_dir_credits = %u\n",
-			p->qm_port.cached_dir_credits);
-
-		fprintf(f, "\tdir_pushcount_at_credit_expiry=%u\n",
-			p->qm_port.dir_pushcount_at_credit_expiry);
-
-		fprintf(f, "\tdir_credits = %u\n",
-			p->qm_port.dir_credits);
-
-		fprintf(f, "\tgenbit=%d, cq_idx=%d, cq_depth=%d\n",
-			p->qm_port.gen_bit,
-			p->qm_port.cq_idx,
-			p->qm_port.cq_depth);
-
-		fprintf(f, "\tuse reserved token scheme=%d, cq_rsvd_token_deficit=%u\n",
-			p->qm_port.use_rsvd_token_scheme,
-			p->qm_port.cq_rsvd_token_deficit);
-
-		fprintf(f, "\tinterrupt armed=%d\n",
-			p->qm_port.int_armed);
-
-		fprintf(f, "\tPort statistics\n");
-
-		fprintf(f, "\t\trx_ok %" PRIu64 "\n",
-			p->stats.traffic.rx_ok);
-
-		fprintf(f, "\t\trx_drop %" PRIu64 "\n",
-			p->stats.traffic.rx_drop);
-
-		fprintf(f, "\t\trx_interrupt_wait %" PRIu64 "\n",
-			p->stats.traffic.rx_interrupt_wait);
-
-		fprintf(f, "\t\trx_umonitor_umwait %" PRIu64 "\n",
-			p->stats.traffic.rx_umonitor_umwait);
-
-		fprintf(f, "\t\ttx_ok %" PRIu64 "\n",
-			p->stats.traffic.tx_ok);
-
-		fprintf(f, "\t\ttotal_polls %" PRIu64 "\n",
-			p->stats.traffic.total_polls);
-
-		fprintf(f, "\t\tzero_polls %" PRIu64 "\n",
-			p->stats.traffic.zero_polls);
-
-		fprintf(f, "\t\ttx_nospc_ldb_hw_credits %" PRIu64 "\n",
-			p->stats.traffic.tx_nospc_ldb_hw_credits);
-
-		fprintf(f, "\t\ttx_nospc_dir_hw_credits %" PRIu64 "\n",
-			p->stats.traffic.tx_nospc_dir_hw_credits);
-
-		fprintf(f, "\t\ttx_nospc_inflight_max %" PRIu64 "\n",
-			p->stats.traffic.tx_nospc_inflight_max);
-
-		fprintf(f, "\t\ttx_nospc_new_event_limit %" PRIu64 "\n",
-			p->stats.traffic.tx_nospc_new_event_limit);
-
-		fprintf(f, "\t\ttx_nospc_inflight_credits %" PRIu64 "\n",
-			p->stats.traffic.tx_nospc_inflight_credits);
-
-		fprintf(f, "\t\ttx_new %" PRIu64 "\n",
-			p->stats.tx_op_cnt[RTE_EVENT_OP_NEW]);
-
-		fprintf(f, "\t\ttx_fwd %" PRIu64 "\n",
-			p->stats.tx_op_cnt[RTE_EVENT_OP_FORWARD]);
-
-		fprintf(f, "\t\ttx_rel %" PRIu64 "\n",
-			p->stats.tx_op_cnt[RTE_EVENT_OP_RELEASE]);
-
-		fprintf(f, "\t\ttx_implicit_rel %" PRIu64 "\n",
-			p->stats.tx_implicit_rel);
-
-		fprintf(f, "\t\ttx_sched_ordered %" PRIu64 "\n",
-			p->stats.tx_sched_cnt[DLB_SCHED_ORDERED]);
-
-		fprintf(f, "\t\ttx_sched_unordered %" PRIu64 "\n",
-			p->stats.tx_sched_cnt[DLB_SCHED_UNORDERED]);
-
-		fprintf(f, "\t\ttx_sched_atomic %" PRIu64 "\n",
-			p->stats.tx_sched_cnt[DLB_SCHED_ATOMIC]);
-
-		fprintf(f, "\t\ttx_sched_directed %" PRIu64 "\n",
-			p->stats.tx_sched_cnt[DLB_SCHED_DIRECTED]);
-
-		fprintf(f, "\t\ttx_invalid %" PRIu64 "\n",
-			p->stats.tx_invalid);
-
-		fprintf(f, "\t\trx_sched_ordered %" PRIu64 "\n",
-			p->stats.rx_sched_cnt[DLB_SCHED_ORDERED]);
-
-		fprintf(f, "\t\trx_sched_unordered %" PRIu64 "\n",
-			p->stats.rx_sched_cnt[DLB_SCHED_UNORDERED]);
-
-		fprintf(f, "\t\trx_sched_atomic %" PRIu64 "\n",
-			p->stats.rx_sched_cnt[DLB_SCHED_ATOMIC]);
-
-		fprintf(f, "\t\trx_sched_directed %" PRIu64 "\n",
-			p->stats.rx_sched_cnt[DLB_SCHED_DIRECTED]);
-
-		fprintf(f, "\t\trx_sched_invalid %" PRIu64 "\n",
-			p->stats.rx_sched_invalid);
-	}
-
-	/* Queue level information */
-
-	for (i = 0; i < dlb->num_queues; i++) {
-		struct dlb_eventdev_queue *q = &dlb->ev_queues[i];
-		int j, k;
-
-		if (!q->setup_done)
-			fprintf(f, "Queue_%d is not configured\n", i);
-
-		fprintf(f, "Queue_%d\n", i);
-		fprintf(f, "========\n");
-
-		fprintf(f, "\tevqueue_%u is set up\n", q->id);
-
-		fprintf(f, "\tqueue is %s\n",
-			q->qm_queue.is_directed ? "directed" : "load balanced");
-
-		fprintf(f, "\tnum_links=%d, ports -> ", q->num_links);
-
-		for (j = 0; j < dlb->num_ports; j++) {
-			struct dlb_eventdev_port *p = &dlb->ev_ports[j];
-
-			for (k = 0; k < DLB_MAX_NUM_QIDS_PER_LDB_CQ; k++) {
-				if (p->link[k].valid &&
-				    p->link[k].queue_id == q->id)
-					fprintf(f, "id=%u prio=%u ",
-						p->id, p->link[k].priority);
-			}
-		}
-		fprintf(f, "\n");
-
-		 fprintf(f, "\tcurrent depth: %u events\n",
-			 dlb_get_queue_depth(dlb, q));
-
-		fprintf(f, "\tnum qid inflights=%u, sched_type=%d\n",
-			q->qm_queue.num_qid_inflights, q->qm_queue.sched_type);
-	}
-}
diff --git a/drivers/event/dlb/meson.build b/drivers/event/dlb/meson.build
deleted file mode 100644
index bc158d2e0..000000000
--- a/drivers/event/dlb/meson.build
+++ /dev/null
@@ -1,22 +0,0 @@ 
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2019-2020 Intel Corporation
-
-if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
-        build = false
-        reason = 'only supported on x86_64 Linux'
-        subdir_done()
-endif
-
-sources = files('dlb.c',
-		'dlb_iface.c',
-		'dlb_xstats.c',
-		'pf/dlb_main.c',
-		'pf/dlb_pf.c',
-		'pf/base/dlb_resource.c',
-		'rte_pmd_dlb.c',
-		'dlb_selftest.c'
-)
-
-headers = files('rte_pmd_dlb.h')
-
-deps += ['mbuf', 'mempool', 'ring', 'pci', 'bus_pci']
diff --git a/drivers/event/dlb/pf/base/dlb_hw_types.h b/drivers/event/dlb/pf/base/dlb_hw_types.h
deleted file mode 100644
index 4c40e2125..000000000
--- a/drivers/event/dlb/pf/base/dlb_hw_types.h
+++ /dev/null
@@ -1,334 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#ifndef __DLB_HW_TYPES_H
-#define __DLB_HW_TYPES_H
-
-#include "../../dlb_user.h"
-#include "dlb_osdep_types.h"
-#include "dlb_osdep_list.h"
-
-#define DLB_MAX_NUM_DOMAINS 32
-#define DLB_MAX_NUM_LDB_QUEUES 128
-#define DLB_MAX_NUM_LDB_PORTS 64
-#define DLB_MAX_NUM_DIR_PORTS 128
-#define DLB_MAX_NUM_LDB_CREDITS 16384
-#define DLB_MAX_NUM_DIR_CREDITS 4096
-#define DLB_MAX_NUM_LDB_CREDIT_POOLS 64
-#define DLB_MAX_NUM_DIR_CREDIT_POOLS 64
-#define DLB_MAX_NUM_HIST_LIST_ENTRIES 5120
-#define DLB_MAX_NUM_AQOS_ENTRIES 2048
-#define DLB_MAX_NUM_TOTAL_OUTSTANDING_COMPLETIONS 4096
-#define DLB_MAX_NUM_QIDS_PER_LDB_CQ 8
-#define DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS 4
-#define DLB_MAX_NUM_SEQUENCE_NUMBER_MODES 6
-#define DLB_QID_PRIORITIES 8
-#define DLB_NUM_ARB_WEIGHTS 8
-#define DLB_MAX_WEIGHT 255
-#define DLB_MAX_PORT_CREDIT_QUANTUM 1023
-#define DLB_MAX_CQ_COMP_CHECK_LOOPS 409600
-#define DLB_MAX_QID_EMPTY_CHECK_LOOPS (32 * 64 * 1024 * (800 / 30))
-#define DLB_HZ 800000000
-
-/* Used for DLB A-stepping workaround for hardware write buffer lock up issue */
-#define DLB_A_STEP_MAX_PORTS 128
-
-#define DLB_PF_DEV_ID 0x270B
-
-/* Interrupt related macros */
-#define DLB_PF_NUM_NON_CQ_INTERRUPT_VECTORS 8
-#define DLB_PF_NUM_CQ_INTERRUPT_VECTORS	 64
-#define DLB_PF_TOTAL_NUM_INTERRUPT_VECTORS \
-	(DLB_PF_NUM_NON_CQ_INTERRUPT_VECTORS + \
-	 DLB_PF_NUM_CQ_INTERRUPT_VECTORS)
-#define DLB_PF_NUM_COMPRESSED_MODE_VECTORS \
-	(DLB_PF_NUM_NON_CQ_INTERRUPT_VECTORS + 1)
-#define DLB_PF_NUM_PACKED_MODE_VECTORS	 DLB_PF_TOTAL_NUM_INTERRUPT_VECTORS
-#define DLB_PF_COMPRESSED_MODE_CQ_VECTOR_ID DLB_PF_NUM_NON_CQ_INTERRUPT_VECTORS
-
-#define DLB_PF_NUM_ALARM_INTERRUPT_VECTORS 4
-#define DLB_INT_ALARM 0
-#define DLB_INT_INGRESS_ERROR 3
-
-#define DLB_ALARM_HW_SOURCE_SYS 0
-#define DLB_ALARM_HW_SOURCE_DLB 1
-
-#define DLB_ALARM_HW_UNIT_CHP 1
-#define DLB_ALARM_HW_UNIT_LSP 3
-
-#define DLB_ALARM_HW_CHP_AID_OUT_OF_CREDITS 6
-#define DLB_ALARM_HW_CHP_AID_ILLEGAL_ENQ 7
-#define DLB_ALARM_HW_LSP_AID_EXCESS_TOKEN_POPS 15
-#define DLB_ALARM_SYS_AID_ILLEGAL_HCW 0
-#define DLB_ALARM_SYS_AID_ILLEGAL_QID 3
-#define DLB_ALARM_SYS_AID_DISABLED_QID 4
-#define DLB_ALARM_SYS_AID_ILLEGAL_CQID 6
-
-/* Hardware-defined base addresses */
-#define DLB_LDB_PP_BASE 0x2100000
-#define DLB_LDB_PP_STRIDE 0x1000
-#define DLB_LDB_PP_BOUND \
-	(DLB_LDB_PP_BASE + DLB_LDB_PP_STRIDE * DLB_MAX_NUM_LDB_PORTS)
-#define DLB_DIR_PP_BASE 0x2000000
-#define DLB_DIR_PP_STRIDE 0x1000
-#define DLB_DIR_PP_BOUND \
-	(DLB_DIR_PP_BASE + DLB_DIR_PP_STRIDE * DLB_MAX_NUM_DIR_PORTS)
-
-struct dlb_freelist {
-	u32 base;
-	u32 bound;
-	u32 offset;
-};
-
-static inline u32 dlb_freelist_count(struct dlb_freelist *list)
-{
-	return (list->bound - list->base) - list->offset;
-}
-
-struct dlb_hcw {
-	u64 data;
-	/* Word 3 */
-	u16 opaque;
-	u8 qid;
-	u8 sched_type:2;
-	u8 priority:3;
-	u8 msg_type:3;
-	/* Word 4 */
-	u16 lock_id;
-	u8 meas_lat:1;
-	u8 rsvd1:2;
-	u8 no_dec:1;
-	u8 cmp_id:4;
-	u8 cq_token:1;
-	u8 qe_comp:1;
-	u8 qe_frag:1;
-	u8 qe_valid:1;
-	u8 int_arm:1;
-	u8 error:1;
-	u8 rsvd:2;
-};
-
-struct dlb_ldb_queue {
-	struct dlb_list_entry domain_list;
-	struct dlb_list_entry func_list;
-	u32 id;
-	u32 domain_id;
-	u32 num_qid_inflights;
-	struct dlb_freelist aqed_freelist;
-	u8 sn_cfg_valid;
-	u32 sn_group;
-	u32 sn_slot;
-	u32 num_mappings;
-	u8 num_pending_additions;
-	u8 owned;
-	u8 configured;
-};
-
-/* Directed ports and queues are paired by nature, so the driver tracks them
- * with a single data structure.
- */
-struct dlb_dir_pq_pair {
-	struct dlb_list_entry domain_list;
-	struct dlb_list_entry func_list;
-	u32 id;
-	u32 domain_id;
-	u8 ldb_pool_used;
-	u8 dir_pool_used;
-	u8 queue_configured;
-	u8 port_configured;
-	u8 owned;
-	u8 enabled;
-	u32 ref_cnt;
-};
-
-enum dlb_qid_map_state {
-	/* The slot doesn't contain a valid queue mapping */
-	DLB_QUEUE_UNMAPPED,
-	/* The slot contains a valid queue mapping */
-	DLB_QUEUE_MAPPED,
-	/* The driver is mapping a queue into this slot */
-	DLB_QUEUE_MAP_IN_PROGRESS,
-	/* The driver is unmapping a queue from this slot */
-	DLB_QUEUE_UNMAP_IN_PROGRESS,
-	/* The driver is unmapping a queue from this slot, and once complete
-	 * will replace it with another mapping.
-	 */
-	DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP,
-};
-
-struct dlb_ldb_port_qid_map {
-	u16 qid;
-	u8 priority;
-	u16 pending_qid;
-	u8 pending_priority;
-	enum dlb_qid_map_state state;
-};
-
-struct dlb_ldb_port {
-	struct dlb_list_entry domain_list;
-	struct dlb_list_entry func_list;
-	u32 id;
-	u32 domain_id;
-	u8 ldb_pool_used;
-	u8 dir_pool_used;
-	u8 init_tkn_cnt;
-	u32 hist_list_entry_base;
-	u32 hist_list_entry_limit;
-	/* The qid_map represents the hardware QID mapping state. */
-	struct dlb_ldb_port_qid_map qid_map[DLB_MAX_NUM_QIDS_PER_LDB_CQ];
-	u32 ref_cnt;
-	u8 num_pending_removals;
-	u8 num_mappings;
-	u8 owned;
-	u8 enabled;
-	u8 configured;
-};
-
-struct dlb_credit_pool {
-	struct dlb_list_entry domain_list;
-	struct dlb_list_entry func_list;
-	u32 id;
-	u32 domain_id;
-	u32 total_credits;
-	u32 avail_credits;
-	u8 owned;
-	u8 configured;
-};
-
-struct dlb_sn_group {
-	u32 mode;
-	u32 sequence_numbers_per_queue;
-	u32 slot_use_bitmap;
-	u32 id;
-};
-
-static inline bool dlb_sn_group_full(struct dlb_sn_group *group)
-{
-	u32 mask[6] = {
-		0xffffffff,  /* 32 SNs per queue */
-		0x0000ffff,  /* 64 SNs per queue */
-		0x000000ff,  /* 128 SNs per queue */
-		0x0000000f,  /* 256 SNs per queue */
-		0x00000003,  /* 512 SNs per queue */
-		0x00000001}; /* 1024 SNs per queue */
-
-	return group->slot_use_bitmap == mask[group->mode];
-}
-
-static inline int dlb_sn_group_alloc_slot(struct dlb_sn_group *group)
-{
-	int bound[6] = {32, 16, 8, 4, 2, 1};
-	int i;
-
-	for (i = 0; i < bound[group->mode]; i++) {
-		if (!(group->slot_use_bitmap & (1 << i))) {
-			group->slot_use_bitmap |= 1 << i;
-			return i;
-		}
-	}
-
-	return -1;
-}
-
-static inline void dlb_sn_group_free_slot(struct dlb_sn_group *group, int slot)
-{
-	group->slot_use_bitmap &= ~(1 << slot);
-}
-
-static inline int dlb_sn_group_used_slots(struct dlb_sn_group *group)
-{
-	int i, cnt = 0;
-
-	for (i = 0; i < 32; i++)
-		cnt += !!(group->slot_use_bitmap & (1 << i));
-
-	return cnt;
-}
-
-struct dlb_domain {
-	struct dlb_function_resources *parent_func;
-	struct dlb_list_entry func_list;
-	struct dlb_list_head used_ldb_queues;
-	struct dlb_list_head used_ldb_ports;
-	struct dlb_list_head used_dir_pq_pairs;
-	struct dlb_list_head used_ldb_credit_pools;
-	struct dlb_list_head used_dir_credit_pools;
-	struct dlb_list_head avail_ldb_queues;
-	struct dlb_list_head avail_ldb_ports;
-	struct dlb_list_head avail_dir_pq_pairs;
-	struct dlb_list_head avail_ldb_credit_pools;
-	struct dlb_list_head avail_dir_credit_pools;
-	u32 total_hist_list_entries;
-	u32 avail_hist_list_entries;
-	u32 hist_list_entry_base;
-	u32 hist_list_entry_offset;
-	struct dlb_freelist qed_freelist;
-	struct dlb_freelist dqed_freelist;
-	struct dlb_freelist aqed_freelist;
-	u32 id;
-	int num_pending_removals;
-	int num_pending_additions;
-	u8 configured;
-	u8 started;
-};
-
-struct dlb_bitmap;
-
-struct dlb_function_resources {
-	u32 num_avail_domains;
-	struct dlb_list_head avail_domains;
-	struct dlb_list_head used_domains;
-	u32 num_avail_ldb_queues;
-	struct dlb_list_head avail_ldb_queues;
-	u32 num_avail_ldb_ports;
-	struct dlb_list_head avail_ldb_ports;
-	u32 num_avail_dir_pq_pairs;
-	struct dlb_list_head avail_dir_pq_pairs;
-	struct dlb_bitmap *avail_hist_list_entries;
-	struct dlb_bitmap *avail_qed_freelist_entries;
-	struct dlb_bitmap *avail_dqed_freelist_entries;
-	struct dlb_bitmap *avail_aqed_freelist_entries;
-	u32 num_avail_ldb_credit_pools;
-	struct dlb_list_head avail_ldb_credit_pools;
-	u32 num_avail_dir_credit_pools;
-	struct dlb_list_head avail_dir_credit_pools;
-	u32 num_enabled_ldb_ports;
-};
-
-/* After initialization, each resource in dlb_hw_resources is located in one of
- * the following lists:
- * -- The PF's available resources list. These are unconfigured resources owned
- *	by the PF and not allocated to a DLB scheduling domain.
- * -- A domain's available resources list. These are domain-owned unconfigured
- *	resources.
- * -- A domain's used resources list. These are domain-owned configured
- *	resources.
- *
- * A resource moves to a new list when a domain is created or destroyed, or
- * when the resource is configured.
- */
-struct dlb_hw_resources {
-	struct dlb_ldb_queue ldb_queues[DLB_MAX_NUM_LDB_QUEUES];
-	struct dlb_ldb_port ldb_ports[DLB_MAX_NUM_LDB_PORTS];
-	struct dlb_dir_pq_pair dir_pq_pairs[DLB_MAX_NUM_DIR_PORTS];
-	struct dlb_credit_pool ldb_credit_pools[DLB_MAX_NUM_LDB_CREDIT_POOLS];
-	struct dlb_credit_pool dir_credit_pools[DLB_MAX_NUM_DIR_CREDIT_POOLS];
-	struct dlb_sn_group sn_groups[DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS];
-};
-
-struct dlb_hw {
-	/* BAR 0 address */
-	void  *csr_kva;
-	unsigned long csr_phys_addr;
-	/* BAR 2 address */
-	void  *func_kva;
-	unsigned long func_phys_addr;
-
-	/* Resource tracking */
-	struct dlb_hw_resources rsrcs;
-	struct dlb_function_resources pf;
-	struct dlb_domain domains[DLB_MAX_NUM_DOMAINS];
-};
-
-#endif /* __DLB_HW_TYPES_H */
diff --git a/drivers/event/dlb/pf/base/dlb_osdep.h b/drivers/event/dlb/pf/base/dlb_osdep.h
deleted file mode 100644
index 0c119b759..000000000
--- a/drivers/event/dlb/pf/base/dlb_osdep.h
+++ /dev/null
@@ -1,310 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#ifndef __DLB_OSDEP_H__
-#define __DLB_OSDEP_H__
-
-#include <string.h>
-#include <time.h>
-#include <unistd.h>
-#include <cpuid.h>
-#include <pthread.h>
-#include <rte_string_fns.h>
-#include <rte_cycles.h>
-#include <rte_io.h>
-#include <rte_log.h>
-#include <rte_spinlock.h>
-#include "../dlb_main.h"
-#include "dlb_resource.h"
-#include "../../dlb_log.h"
-#include "../../dlb_user.h"
-
-
-#define DLB_PCI_REG_READ(reg)        rte_read32((void *)reg)
-#define DLB_PCI_REG_WRITE(reg, val)   rte_write32(val, (void *)reg)
-
-#define DLB_CSR_REG_ADDR(a, reg) ((void *)((uintptr_t)(a)->csr_kva + (reg)))
-#define DLB_CSR_RD(hw, reg) \
-	DLB_PCI_REG_READ(DLB_CSR_REG_ADDR((hw), (reg)))
-#define DLB_CSR_WR(hw, reg, val) \
-	DLB_PCI_REG_WRITE(DLB_CSR_REG_ADDR((hw), (reg)), (val))
-
-#define DLB_FUNC_REG_ADDR(a, reg) ((void *)((uintptr_t)(a)->func_kva + (reg)))
-#define DLB_FUNC_RD(hw, reg) \
-	DLB_PCI_REG_READ(DLB_FUNC_REG_ADDR((hw), (reg)))
-#define DLB_FUNC_WR(hw, reg, val) \
-	DLB_PCI_REG_WRITE(DLB_FUNC_REG_ADDR((hw), (reg)), (val))
-
-extern unsigned int dlb_unregister_timeout_s;
-/**
- * os_queue_unregister_timeout_s() - timeout (in seconds) to wait for queue
- *                                   unregister acknowledgments.
- */
-static inline unsigned int os_queue_unregister_timeout_s(void)
-{
-	return dlb_unregister_timeout_s;
-}
-
-static inline size_t os_strlcpy(char *dst, const char *src, size_t sz)
-{
-	return rte_strlcpy(dst, src, sz);
-}
-
-/**
- * os_udelay() - busy-wait for a number of microseconds
- * @usecs: delay duration.
- */
-static inline void os_udelay(int usecs)
-{
-	rte_delay_us(usecs);
-}
-
-/**
- * os_msleep() - sleep for a number of milliseconds
- * @usecs: delay duration.
- */
-
-static inline void os_msleep(int msecs)
-{
-	rte_delay_ms(msecs);
-}
-
-#define DLB_PP_BASE(__is_ldb) ((__is_ldb) ? DLB_LDB_PP_BASE : DLB_DIR_PP_BASE)
-/**
- * os_map_producer_port() - map a producer port into the caller's address space
- * @hw: dlb_hw handle for a particular device.
- * @port_id: port ID
- * @is_ldb: true for load-balanced port, false for a directed port
- *
- * This function maps the requested producer port memory into the caller's
- * address space.
- *
- * Return:
- * Returns the base address at which the PP memory was mapped, else NULL.
- */
-static inline void *os_map_producer_port(struct dlb_hw *hw,
-					 u8 port_id,
-					 bool is_ldb)
-{
-	uint64_t addr;
-	uint64_t pp_dma_base;
-
-
-	pp_dma_base = (uintptr_t)hw->func_kva + DLB_PP_BASE(is_ldb);
-	addr = (pp_dma_base + (PAGE_SIZE * port_id));
-
-	return (void *)(uintptr_t)addr;
-
-}
-/**
- * os_unmap_producer_port() - unmap a producer port
- * @addr: mapped producer port address
- *
- * This function undoes os_map_producer_port() by unmapping the producer port
- * memory from the caller's address space.
- *
- * Return:
- * Returns the base address at which the PP memory was mapped, else NULL.
- */
-
-/* PFPMD - Nothing to do here, since memory was not actually mapped by us */
-static inline void os_unmap_producer_port(struct dlb_hw *hw, void *addr)
-{
-	RTE_SET_USED(hw);
-	RTE_SET_USED(addr);
-}
-
-/**
- * os_fence_hcw() - fence an HCW to ensure it arrives at the device
- * @hw: dlb_hw handle for a particular device.
- * @pp_addr: producer port address
- */
-static inline void os_fence_hcw(struct dlb_hw *hw, u64 *pp_addr)
-{
-	RTE_SET_USED(hw);
-
-	/* To ensure outstanding HCWs reach the device, read the PP address. IA
-	 * memory ordering prevents reads from passing older writes, and the
-	 * mfence also ensures this.
-	 */
-	rte_mb();
-
-	*(volatile u64 *)pp_addr;
-}
-
-/* Map to PMDs logging interface */
-#define DLB_ERR(dev, fmt, args...) \
-	DLB_LOG_ERR(fmt, ## args)
-
-#define DLB_INFO(dev, fmt, args...) \
-	DLB_LOG_INFO(fmt, ## args)
-
-#define DLB_DEBUG(dev, fmt, args...) \
-	DLB_LOG_DEBUG(fmt, ## args)
-
-/**
- * DLB_HW_ERR() - log an error message
- * @dlb: dlb_hw handle for a particular device.
- * @...: variable string args.
- */
-#define DLB_HW_ERR(dlb, ...) do {	\
-	RTE_SET_USED(dlb);		\
-	DLB_ERR(dlb, __VA_ARGS__);	\
-} while (0)
-
-/**
- * DLB_HW_INFO() - log an info message
- * @dlb: dlb_hw handle for a particular device.
- * @...: variable string args.
- */
-#define DLB_HW_INFO(dlb, ...) do {	\
-	RTE_SET_USED(dlb);		\
-	DLB_INFO(dlb, __VA_ARGS__);	\
-} while (0)
-
-/*** scheduling functions ***/
-
-/* The callback runs until it completes all outstanding QID->CQ
- * map and unmap requests. To prevent deadlock, this function gives other
- * threads a chance to grab the resource mutex and configure hardware.
- */
-static void *dlb_complete_queue_map_unmap(void *__args)
-{
-	struct dlb_dev *dlb_dev = (struct dlb_dev *)__args;
-	int ret;
-
-	while (1) {
-		rte_spinlock_lock(&dlb_dev->resource_mutex);
-
-		ret = dlb_finish_unmap_qid_procedures(&dlb_dev->hw);
-		ret += dlb_finish_map_qid_procedures(&dlb_dev->hw);
-
-		if (ret != 0) {
-			rte_spinlock_unlock(&dlb_dev->resource_mutex);
-			/* Relinquish the CPU so the application can process
-			 * its CQs, so this function does not deadlock.
-			 */
-			sched_yield();
-		} else
-			break;
-	}
-
-	dlb_dev->worker_launched = false;
-
-	rte_spinlock_unlock(&dlb_dev->resource_mutex);
-
-	return NULL;
-}
-
-
-/**
- * os_schedule_work() - launch a thread to process pending map and unmap work
- * @hw: dlb_hw handle for a particular device.
- *
- * This function launches a thread that will run until all pending
- * map and unmap procedures are complete.
- */
-static inline void os_schedule_work(struct dlb_hw *hw)
-{
-	struct dlb_dev *dlb_dev;
-	pthread_t complete_queue_map_unmap_thread;
-	int ret;
-
-	dlb_dev = container_of(hw, struct dlb_dev, hw);
-
-	ret = rte_ctrl_thread_create(&complete_queue_map_unmap_thread,
-				     "dlb_queue_unmap_waiter",
-				     NULL,
-				     dlb_complete_queue_map_unmap,
-				     dlb_dev);
-	if (ret)
-		DLB_ERR(dlb_dev,
-		"Could not create queue complete map/unmap thread, err=%d\n",
-			  ret);
-	else
-		dlb_dev->worker_launched = true;
-}
-
-/**
- * os_worker_active() - query whether the map/unmap worker thread is active
- * @hw: dlb_hw handle for a particular device.
- *
- * This function returns a boolean indicating whether a thread (launched by
- * os_schedule_work()) is active. This function is used to determine
- * whether or not to launch a worker thread.
- */
-static inline bool os_worker_active(struct dlb_hw *hw)
-{
-	struct dlb_dev *dlb_dev;
-
-	dlb_dev = container_of(hw, struct dlb_dev, hw);
-
-	return dlb_dev->worker_launched;
-}
-
-/**
- * os_notify_user_space() - notify user space
- * @hw: dlb_hw handle for a particular device.
- * @domain_id: ID of domain to notify.
- * @alert_id: alert ID.
- * @aux_alert_data: additional alert data.
- *
- * This function notifies user space of an alert (such as a remote queue
- * unregister or hardware alarm).
- *
- * Return:
- * Returns 0 upon success, <0 otherwise.
- */
-static inline int os_notify_user_space(struct dlb_hw *hw,
-				       u32 domain_id,
-				       u64 alert_id,
-				       u64 aux_alert_data)
-{
-	RTE_SET_USED(hw);
-	RTE_SET_USED(domain_id);
-	RTE_SET_USED(alert_id);
-	RTE_SET_USED(aux_alert_data);
-
-	/* Not called for PF PMD */
-	return -1;
-}
-
-enum dlb_dev_revision {
-	DLB_A0,
-	DLB_A1,
-	DLB_A2,
-	DLB_A3,
-	DLB_B0,
-};
-
-/**
- * os_get_dev_revision() - query the device_revision
- * @hw: dlb_hw handle for a particular device.
- */
-static inline enum dlb_dev_revision os_get_dev_revision(struct dlb_hw *hw)
-{
-	uint32_t a, b, c, d, stepping;
-
-	RTE_SET_USED(hw);
-
-	__cpuid(0x1, a, b, c, d);
-
-	stepping = a & 0xf;
-
-	switch (stepping) {
-	case 0:
-		return DLB_A0;
-	case 1:
-		return DLB_A1;
-	case 2:
-		return DLB_A2;
-	case 3:
-		return DLB_A3;
-	default:
-		/* Treat all revisions >= 4 as B0 */
-		return DLB_B0;
-	}
-}
-
-#endif /*  __DLB_OSDEP_H__ */
diff --git a/drivers/event/dlb/pf/base/dlb_osdep_bitmap.h b/drivers/event/dlb/pf/base/dlb_osdep_bitmap.h
deleted file mode 100644
index 4c10c8c5d..000000000
--- a/drivers/event/dlb/pf/base/dlb_osdep_bitmap.h
+++ /dev/null
@@ -1,441 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#ifndef __DLB_OSDEP_BITMAP_H__
-#define __DLB_OSDEP_BITMAP_H__
-
-#include <stdint.h>
-#include <stdbool.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <rte_bitmap.h>
-#include <rte_string_fns.h>
-#include <rte_malloc.h>
-#include <rte_errno.h>
-#include "../dlb_main.h"
-
-/*************************/
-/*** Bitmap operations ***/
-/*************************/
-struct dlb_bitmap {
-	struct rte_bitmap *map;
-	unsigned int len;
-	struct dlb_hw *hw;
-};
-
-/**
- * dlb_bitmap_alloc() - alloc a bitmap data structure
- * @bitmap: pointer to dlb_bitmap structure pointer.
- * @len: number of entries in the bitmap.
- *
- * This function allocates a bitmap and initializes it with length @len. All
- * entries are initially zero.
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise.
- *
- * Errors:
- * EINVAL - bitmap is NULL or len is 0.
- * ENOMEM - could not allocate memory for the bitmap data structure.
- */
-static inline int dlb_bitmap_alloc(struct dlb_hw *hw,
-				   struct dlb_bitmap **bitmap,
-				   unsigned int len)
-{
-	struct dlb_bitmap *bm;
-	void *mem;
-	uint32_t alloc_size;
-	uint32_t nbits = (uint32_t) len;
-	RTE_SET_USED(hw);
-
-	if (bitmap == NULL || nbits == 0)
-		return -EINVAL;
-
-	/* Allocate DLB bitmap control struct */
-	bm = rte_malloc("DLB_PF",
-		sizeof(struct dlb_bitmap),
-		RTE_CACHE_LINE_SIZE);
-
-	if (bm == NULL)
-		return -ENOMEM;
-
-	/* Allocate bitmap memory */
-	alloc_size = rte_bitmap_get_memory_footprint(nbits);
-	mem = rte_malloc("DLB_PF_BITMAP", alloc_size, RTE_CACHE_LINE_SIZE);
-	if (mem == NULL) {
-		rte_free(bm);
-		return -ENOMEM;
-	}
-
-	bm->map = rte_bitmap_init(len, mem, alloc_size);
-	if (bm->map == NULL) {
-		rte_free(mem);
-		rte_free(bm);
-		return -ENOMEM;
-	}
-
-	bm->len = len;
-
-	*bitmap = bm;
-
-	return 0;
-}
-
-/**
- * dlb_bitmap_free() - free a previously allocated bitmap data structure
- * @bitmap: pointer to dlb_bitmap structure.
- *
- * This function frees a bitmap that was allocated with dlb_bitmap_alloc().
- */
-static inline void dlb_bitmap_free(struct dlb_bitmap *bitmap)
-{
-	if (bitmap == NULL)
-		return;
-
-	rte_free(bitmap->map);
-	rte_free(bitmap);
-}
-
-/**
- * dlb_bitmap_fill() - fill a bitmap with all 1s
- * @bitmap: pointer to dlb_bitmap structure.
- *
- * This function sets all bitmap values to 1.
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise.
- *
- * Errors:
- * EINVAL - bitmap is NULL or is uninitialized.
- */
-static inline int dlb_bitmap_fill(struct dlb_bitmap *bitmap)
-{
-	unsigned int i;
-
-	if (bitmap  == NULL || bitmap->map == NULL)
-		return -EINVAL;
-
-	for (i = 0; i != bitmap->len; i++)
-		rte_bitmap_set(bitmap->map, i);
-
-	return 0;
-}
-
-/**
- * dlb_bitmap_zero() - fill a bitmap with all 0s
- * @bitmap: pointer to dlb_bitmap structure.
- *
- * This function sets all bitmap values to 0.
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise.
- *
- * Errors:
- * EINVAL - bitmap is NULL or is uninitialized.
- */
-static inline int dlb_bitmap_zero(struct dlb_bitmap *bitmap)
-{
-	if (bitmap  == NULL || bitmap->map == NULL)
-		return -EINVAL;
-
-	rte_bitmap_reset(bitmap->map);
-
-	return 0;
-}
-
-/**
- * dlb_bitmap_set() - set a bitmap entry
- * @bitmap: pointer to dlb_bitmap structure.
- * @bit: bit index.
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise.
- *
- * Errors:
- * EINVAL - bitmap is NULL or is uninitialized, or bit is larger than the
- *	    bitmap length.
- */
-static inline int dlb_bitmap_set(struct dlb_bitmap *bitmap,
-				 unsigned int bit)
-{
-	if (bitmap  == NULL || bitmap->map == NULL)
-		return -EINVAL;
-
-	if (bitmap->len <= bit)
-		return -EINVAL;
-
-	rte_bitmap_set(bitmap->map, bit);
-
-	return 0;
-}
-
-/**
- * dlb_bitmap_set_range() - set a range of bitmap entries
- * @bitmap: pointer to dlb_bitmap structure.
- * @bit: starting bit index.
- * @len: length of the range.
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise.
- *
- * Errors:
- * EINVAL - bitmap is NULL or is uninitialized, or the range exceeds the bitmap
- *	    length.
- */
-static inline int dlb_bitmap_set_range(struct dlb_bitmap *bitmap,
-				       unsigned int bit,
-				       unsigned int len)
-{
-	unsigned int i;
-
-	if (bitmap  == NULL || bitmap->map == NULL)
-		return -EINVAL;
-
-	if (bitmap->len <= bit)
-		return -EINVAL;
-
-	for (i = 0; i != len; i++)
-		rte_bitmap_set(bitmap->map, bit + i);
-
-	return 0;
-}
-
-/**
- * dlb_bitmap_clear() - clear a bitmap entry
- * @bitmap: pointer to dlb_bitmap structure.
- * @bit: bit index.
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise.
- *
- * Errors:
- * EINVAL - bitmap is NULL or is uninitialized, or bit is larger than the
- *	    bitmap length.
- */
-static inline int dlb_bitmap_clear(struct dlb_bitmap *bitmap,
-				   unsigned int bit)
-{
-	if (bitmap  == NULL || bitmap->map == NULL)
-		return -EINVAL;
-
-	if (bitmap->len <= bit)
-		return -EINVAL;
-
-	rte_bitmap_clear(bitmap->map, bit);
-
-	return 0;
-}
-
-/**
- * dlb_bitmap_clear_range() - clear a range of bitmap entries
- * @bitmap: pointer to dlb_bitmap structure.
- * @bit: starting bit index.
- * @len: length of the range.
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise.
- *
- * Errors:
- * EINVAL - bitmap is NULL or is uninitialized, or the range exceeds the bitmap
- *	    length.
- */
-static inline int dlb_bitmap_clear_range(struct dlb_bitmap *bitmap,
-					 unsigned int bit,
-					 unsigned int len)
-{
-	unsigned int i;
-
-	if (bitmap  == NULL || bitmap->map == NULL)
-		return -EINVAL;
-
-	if (bitmap->len <= bit)
-		return -EINVAL;
-
-	for (i = 0; i != len; i++)
-		rte_bitmap_clear(bitmap->map, bit + i);
-
-	return 0;
-}
-
-/**
- * dlb_bitmap_find_set_bit_range() - find a range of set bits
- * @bitmap: pointer to dlb_bitmap structure.
- * @len: length of the range.
- *
- * This function looks for a range of set bits of length @len.
- *
- * Return:
- * Returns the base bit index upon success, < 0 otherwise.
- *
- * Errors:
- * ENOENT - unable to find a length *len* range of set bits.
- * EINVAL - bitmap is NULL or is uninitialized, or len is invalid.
- */
-static inline int dlb_bitmap_find_set_bit_range(struct dlb_bitmap *bitmap,
-						unsigned int len)
-{
-	unsigned int i, j = 0;
-
-	if (bitmap  == NULL || bitmap->map  == NULL || len == 0)
-		return -EINVAL;
-
-	if (bitmap->len < len)
-		return -ENOENT;
-
-	for (i = 0; i != bitmap->len; i++) {
-		if  (rte_bitmap_get(bitmap->map, i)) {
-			if (++j == len)
-				return i - j + 1;
-		} else
-			j = 0;
-	}
-
-	/* No set bit range of length len? */
-	return -ENOENT;
-}
-
-/**
- * dlb_bitmap_find_set_bit() - find the first set bit
- * @bitmap: pointer to dlb_bitmap structure.
- *
- * This function looks for a single set bit.
- *
- * Return:
- * Returns the base bit index upon success, < 0 otherwise.
- *
- * Errors:
- * ENOENT - the bitmap contains no set bits.
- * EINVAL - bitmap is NULL or is uninitialized, or len is invalid.
- */
-static inline int dlb_bitmap_find_set_bit(struct dlb_bitmap *bitmap)
-{
-	unsigned int i;
-
-	if (bitmap == NULL)
-		return -EINVAL;
-
-	if (bitmap->map == NULL)
-		return -EINVAL;
-
-	for (i = 0; i != bitmap->len; i++) {
-		if  (rte_bitmap_get(bitmap->map, i))
-			return i;
-	}
-
-	return -ENOENT;
-}
-
-/**
- * dlb_bitmap_count() - returns the number of set bits
- * @bitmap: pointer to dlb_bitmap structure.
- *
- * This function looks for a single set bit.
- *
- * Return:
- * Returns the number of set bits upon success, <0 otherwise.
- *
- * Errors:
- * EINVAL - bitmap is NULL or is uninitialized.
- */
-static inline int dlb_bitmap_count(struct dlb_bitmap *bitmap)
-{
-	int weight = 0;
-	unsigned int i;
-
-	if (bitmap == NULL)
-		return -EINVAL;
-
-	if (bitmap->map == NULL)
-		return -EINVAL;
-
-	for (i = 0; i != bitmap->len; i++) {
-		if  (rte_bitmap_get(bitmap->map, i))
-			weight++;
-	}
-	return weight;
-}
-
-/**
- * dlb_bitmap_longest_set_range() - returns longest contiguous range of set bits
- * @bitmap: pointer to dlb_bitmap structure.
- *
- * Return:
- * Returns the bitmap's longest contiguous range of set bits upon success,
- * <0 otherwise.
- *
- * Errors:
- * EINVAL - bitmap is NULL or is uninitialized.
- */
-static inline int dlb_bitmap_longest_set_range(struct dlb_bitmap *bitmap)
-{
-	int max_len = 0, len = 0;
-	unsigned int i;
-
-	if (bitmap == NULL)
-		return -EINVAL;
-
-	if (bitmap->map == NULL)
-		return -EINVAL;
-
-	for (i = 0; i != bitmap->len; i++) {
-		if  (rte_bitmap_get(bitmap->map, i)) {
-			len++;
-		} else {
-			if (len > max_len)
-				max_len = len;
-			len = 0;
-		}
-	}
-
-	if (len > max_len)
-		max_len = len;
-
-	return max_len;
-}
-
-/**
- * dlb_bitmap_or() - store the logical 'or' of two bitmaps into a third
- * @dest: pointer to dlb_bitmap structure, which will contain the results of
- *	  the 'or' of src1 and src2.
- * @src1: pointer to dlb_bitmap structure, will be 'or'ed with src2.
- * @src2: pointer to dlb_bitmap structure, will be 'or'ed with src1.
- *
- * This function 'or's two bitmaps together and stores the result in a third
- * bitmap. The source and destination bitmaps can be the same.
- *
- * Return:
- * Returns the number of set bits upon success, <0 otherwise.
- *
- * Errors:
- * EINVAL - One of the bitmaps is NULL or is uninitialized.
- */
-static inline int dlb_bitmap_or(struct dlb_bitmap *dest,
-				struct dlb_bitmap *src1,
-				struct dlb_bitmap *src2)
-{
-	unsigned int i, min;
-	int numset = 0;
-
-	if (dest  == NULL || dest->map == NULL ||
-	    src1 == NULL || src1->map == NULL ||
-	    src2  == NULL || src2->map == NULL)
-		return -EINVAL;
-
-	min = dest->len;
-	min = (min > src1->len) ? src1->len : min;
-	min = (min > src2->len) ? src2->len : min;
-
-	for (i = 0; i != min; i++) {
-		if  (rte_bitmap_get(src1->map, i) ||
-				rte_bitmap_get(src2->map, i)) {
-			rte_bitmap_set(dest->map, i);
-			numset++;
-		} else
-			rte_bitmap_clear(dest->map, i);
-	}
-
-	return numset;
-}
-
-#endif /*  __DLB_OSDEP_BITMAP_H__ */
diff --git a/drivers/event/dlb/pf/base/dlb_osdep_list.h b/drivers/event/dlb/pf/base/dlb_osdep_list.h
deleted file mode 100644
index a53b3626e..000000000
--- a/drivers/event/dlb/pf/base/dlb_osdep_list.h
+++ /dev/null
@@ -1,131 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#ifndef __DLB_OSDEP_LIST_H__
-#define __DLB_OSDEP_LIST_H__
-
-#include <rte_tailq.h>
-
-struct dlb_list_entry {
-	TAILQ_ENTRY(dlb_list_entry) node;
-};
-
-/* Dummy - just a struct definition */
-TAILQ_HEAD(dlb_list_head, dlb_list_entry);
-
-/* =================
- * TAILQ Supplements
- * =================
- */
-
-#ifndef TAILQ_FOREACH_ENTRY
-#define TAILQ_FOREACH_ENTRY(ptr, head, name, iter)		\
-	for ((iter) = TAILQ_FIRST(&head);			\
-	    (iter)						\
-		&& (ptr = container_of(iter, typeof(*(ptr)), name)); \
-	    (iter) = TAILQ_NEXT((iter), node))
-#endif
-
-#ifndef TAILQ_FOREACH_ENTRY_SAFE
-#define TAILQ_FOREACH_ENTRY_SAFE(ptr, head, name, iter, tvar)	\
-	for ((iter) = TAILQ_FIRST(&head);			\
-	    (iter) &&						\
-		(ptr = container_of(iter, typeof(*(ptr)), name)) &&\
-		((tvar) = TAILQ_NEXT((iter), node), 1);	\
-	    (iter) = (tvar))
-#endif
-
-/* =========
- * DLB Lists
- * =========
- */
-
-/**
- * dlb_list_init_head() - initialize the head of a list
- * @head: list head
- */
-static inline void dlb_list_init_head(struct dlb_list_head *head)
-{
-	TAILQ_INIT(head);
-}
-
-/**
- * dlb_list_add() - add an entry to a list
- * @head: new entry will be added after this list header
- * @entry: new list entry to be added
- */
-static inline void dlb_list_add(struct dlb_list_head *head,
-				struct dlb_list_entry *entry)
-{
-	TAILQ_INSERT_TAIL(head, entry, node);
-}
-
-/**
- * @head: list head
- * @entry: list entry to be deleted
- */
-static inline void dlb_list_del(struct dlb_list_head *head,
-				struct dlb_list_entry *entry)
-{
-	TAILQ_REMOVE(head, entry, node);
-}
-
-/**
- * dlb_list_empty() - check if a list is empty
- * @head: list head
- *
- * Return:
- * Returns 1 if empty, 0 if not.
- */
-static inline bool dlb_list_empty(struct dlb_list_head *head)
-{
-	return TAILQ_EMPTY(head);
-}
-
-/**
- * dlb_list_empty() - check if a list is empty
- * @src_head: list to be added
- * @ head: where src_head will be inserted
- */
-static inline void dlb_list_splice(struct dlb_list_head *src_head,
-				   struct dlb_list_head *head)
-{
-	TAILQ_CONCAT(head, src_head, node);
-}
-
-/**
- * DLB_LIST_HEAD() - retrieve the head of the list
- * @head: list head
- * @type: type of the list variable
- * @name: name of the dlb_list within the struct
- */
-#define DLB_LIST_HEAD(head, type, name)				\
-	(TAILQ_FIRST(&head) ?					\
-		container_of(TAILQ_FIRST(&head), type, name) :	\
-		NULL)
-
-/**
- * DLB_LIST_FOR_EACH() - iterate over a list
- * @head: list head
- * @ptr: pointer to struct containing a struct dlb_list_entry
- * @name: name of the dlb_list_entry field within the containing struct
- * @iter: iterator variable
- */
-#define DLB_LIST_FOR_EACH(head, ptr, name, tmp_iter) \
-	TAILQ_FOREACH_ENTRY(ptr, head, name, tmp_iter)
-
-/**
- * DLB_LIST_FOR_EACH_SAFE() - iterate over a list. This loop works even if
- * an element is removed from the list while processing it.
- * @ptr: pointer to struct containing a struct dlb_list_entry
- * @ptr_tmp: pointer to struct containing a struct dlb_list_entry (temporary)
- * @head: list head
- * @name: name of the dlb_list_entry field within the containing struct
- * @iter: iterator variable
- * @iter_tmp: iterator variable (temporary)
- */
-#define DLB_LIST_FOR_EACH_SAFE(head, ptr, ptr_tmp, name, tmp_iter, saf_iter) \
-	TAILQ_FOREACH_ENTRY_SAFE(ptr, head, name, tmp_iter, saf_iter)
-
-#endif /*  __DLB_OSDEP_LIST_H__ */
diff --git a/drivers/event/dlb/pf/base/dlb_osdep_types.h b/drivers/event/dlb/pf/base/dlb_osdep_types.h
deleted file mode 100644
index 2e9d7d8d0..000000000
--- a/drivers/event/dlb/pf/base/dlb_osdep_types.h
+++ /dev/null
@@ -1,31 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#ifndef __DLB_OSDEP_TYPES_H
-#define __DLB_OSDEP_TYPES_H
-
-#include <linux/types.h>
-
-#include <inttypes.h>
-#include <ctype.h>
-#include <stdint.h>
-#include <stdbool.h>
-#include <string.h>
-#include <unistd.h>
-#include <errno.h>
-
-/* Types for user mode PF PMD */
-typedef uint8_t         u8;
-typedef int8_t          s8;
-typedef uint16_t        u16;
-typedef int16_t         s16;
-typedef uint32_t        u32;
-typedef int32_t         s32;
-typedef uint64_t        u64;
-
-#define __iomem
-
-/* END types for user mode PF PMD */
-
-#endif /* __DLB_OSDEP_TYPES_H */
diff --git a/drivers/event/dlb/pf/base/dlb_regs.h b/drivers/event/dlb/pf/base/dlb_regs.h
deleted file mode 100644
index a1c63f336..000000000
--- a/drivers/event/dlb/pf/base/dlb_regs.h
+++ /dev/null
@@ -1,2368 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#ifndef __DLB_REGS_H
-#define __DLB_REGS_H
-
-#include "dlb_osdep_types.h"
-
-#define DLB_MSIX_MEM_VECTOR_CTRL(x) \
-	(0x100000c + (x) * 0x10)
-#define DLB_MSIX_MEM_VECTOR_CTRL_RST 0x1
-union dlb_msix_mem_vector_ctrl {
-	struct {
-		u32 vec_mask : 1;
-		u32 rsvd0 : 31;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_TOTAL_VAS 0x124
-#define DLB_SYS_TOTAL_VAS_RST 0x20
-union dlb_sys_total_vas {
-	struct {
-		u32 total_vas : 32;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_ALARM_PF_SYND2 0x508
-#define DLB_SYS_ALARM_PF_SYND2_RST 0x0
-union dlb_sys_alarm_pf_synd2 {
-	struct {
-		u32 lock_id : 16;
-		u32 meas : 1;
-		u32 debug : 7;
-		u32 cq_pop : 1;
-		u32 qe_uhl : 1;
-		u32 qe_orsp : 1;
-		u32 qe_valid : 1;
-		u32 cq_int_rearm : 1;
-		u32 dsi_error : 1;
-		u32 rsvd0 : 2;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_ALARM_PF_SYND1 0x504
-#define DLB_SYS_ALARM_PF_SYND1_RST 0x0
-union dlb_sys_alarm_pf_synd1 {
-	struct {
-		u32 dsi : 16;
-		u32 qid : 8;
-		u32 qtype : 2;
-		u32 qpri : 3;
-		u32 msg_type : 3;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_ALARM_PF_SYND0 0x500
-#define DLB_SYS_ALARM_PF_SYND0_RST 0x0
-union dlb_sys_alarm_pf_synd0 {
-	struct {
-		u32 syndrome : 8;
-		u32 rtype : 2;
-		u32 rsvd0 : 2;
-		u32 from_dmv : 1;
-		u32 is_ldb : 1;
-		u32 cls : 2;
-		u32 aid : 6;
-		u32 unit : 4;
-		u32 source : 4;
-		u32 more : 1;
-		u32 valid : 1;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_LDB_VASQID_V(x) \
-	(0xf60 + (x) * 0x1000)
-#define DLB_SYS_LDB_VASQID_V_RST 0x0
-union dlb_sys_ldb_vasqid_v {
-	struct {
-		u32 vasqid_v : 1;
-		u32 rsvd0 : 31;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_DIR_VASQID_V(x) \
-	(0xf68 + (x) * 0x1000)
-#define DLB_SYS_DIR_VASQID_V_RST 0x0
-union dlb_sys_dir_vasqid_v {
-	struct {
-		u32 vasqid_v : 1;
-		u32 rsvd0 : 31;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_WBUF_DIR_FLAGS(x) \
-	(0xf70 + (x) * 0x1000)
-#define DLB_SYS_WBUF_DIR_FLAGS_RST 0x0
-union dlb_sys_wbuf_dir_flags {
-	struct {
-		u32 wb_v : 4;
-		u32 cl : 1;
-		u32 busy : 1;
-		u32 opt : 1;
-		u32 rsvd0 : 25;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_WBUF_LDB_FLAGS(x) \
-	(0xf78 + (x) * 0x1000)
-#define DLB_SYS_WBUF_LDB_FLAGS_RST 0x0
-union dlb_sys_wbuf_ldb_flags {
-	struct {
-		u32 wb_v : 4;
-		u32 cl : 1;
-		u32 busy : 1;
-		u32 rsvd0 : 26;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_LDB_QID_V(x) \
-	(0x8000034 + (x) * 0x1000)
-#define DLB_SYS_LDB_QID_V_RST 0x0
-union dlb_sys_ldb_qid_v {
-	struct {
-		u32 qid_v : 1;
-		u32 rsvd0 : 31;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_LDB_QID_CFG_V(x) \
-	(0x8000030 + (x) * 0x1000)
-#define DLB_SYS_LDB_QID_CFG_V_RST 0x0
-union dlb_sys_ldb_qid_cfg_v {
-	struct {
-		u32 sn_cfg_v : 1;
-		u32 fid_cfg_v : 1;
-		u32 rsvd0 : 30;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_DIR_QID_V(x) \
-	(0x8000040 + (x) * 0x1000)
-#define DLB_SYS_DIR_QID_V_RST 0x0
-union dlb_sys_dir_qid_v {
-	struct {
-		u32 qid_v : 1;
-		u32 rsvd0 : 31;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_LDB_POOL_ENBLD(x) \
-	(0x8000070 + (x) * 0x1000)
-#define DLB_SYS_LDB_POOL_ENBLD_RST 0x0
-union dlb_sys_ldb_pool_enbld {
-	struct {
-		u32 pool_enabled : 1;
-		u32 rsvd0 : 31;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_DIR_POOL_ENBLD(x) \
-	(0x8000080 + (x) * 0x1000)
-#define DLB_SYS_DIR_POOL_ENBLD_RST 0x0
-union dlb_sys_dir_pool_enbld {
-	struct {
-		u32 pool_enabled : 1;
-		u32 rsvd0 : 31;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_LDB_PP2VPP(x) \
-	(0x8000090 + (x) * 0x1000)
-#define DLB_SYS_LDB_PP2VPP_RST 0x0
-union dlb_sys_ldb_pp2vpp {
-	struct {
-		u32 vpp : 6;
-		u32 rsvd0 : 26;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_DIR_PP2VPP(x) \
-	(0x8000094 + (x) * 0x1000)
-#define DLB_SYS_DIR_PP2VPP_RST 0x0
-union dlb_sys_dir_pp2vpp {
-	struct {
-		u32 vpp : 7;
-		u32 rsvd0 : 25;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_LDB_PP_V(x) \
-	(0x8000128 + (x) * 0x1000)
-#define DLB_SYS_LDB_PP_V_RST 0x0
-union dlb_sys_ldb_pp_v {
-	struct {
-		u32 pp_v : 1;
-		u32 rsvd0 : 31;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_LDB_CQ_ISR(x) \
-	(0x8000124 + (x) * 0x1000)
-#define DLB_SYS_LDB_CQ_ISR_RST 0x0
-/* CQ Interrupt Modes */
-#define DLB_CQ_ISR_MODE_DIS  0
-#define DLB_CQ_ISR_MODE_MSI  1
-#define DLB_CQ_ISR_MODE_MSIX 2
-union dlb_sys_ldb_cq_isr {
-	struct {
-		u32 vector : 6;
-		u32 vf : 4;
-		u32 en_code : 2;
-		u32 rsvd0 : 20;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_LDB_CQ2VF_PF(x) \
-	(0x8000120 + (x) * 0x1000)
-#define DLB_SYS_LDB_CQ2VF_PF_RST 0x0
-union dlb_sys_ldb_cq2vf_pf {
-	struct {
-		u32 vf : 4;
-		u32 is_pf : 1;
-		u32 rsvd0 : 27;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_LDB_PP2VAS(x) \
-	(0x800011c + (x) * 0x1000)
-#define DLB_SYS_LDB_PP2VAS_RST 0x0
-union dlb_sys_ldb_pp2vas {
-	struct {
-		u32 vas : 5;
-		u32 rsvd0 : 27;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_LDB_PP2LDBPOOL(x) \
-	(0x8000118 + (x) * 0x1000)
-#define DLB_SYS_LDB_PP2LDBPOOL_RST 0x0
-union dlb_sys_ldb_pp2ldbpool {
-	struct {
-		u32 ldbpool : 6;
-		u32 rsvd0 : 26;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_LDB_PP2DIRPOOL(x) \
-	(0x8000114 + (x) * 0x1000)
-#define DLB_SYS_LDB_PP2DIRPOOL_RST 0x0
-union dlb_sys_ldb_pp2dirpool {
-	struct {
-		u32 dirpool : 6;
-		u32 rsvd0 : 26;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_LDB_PP2VF_PF(x) \
-	(0x8000110 + (x) * 0x1000)
-#define DLB_SYS_LDB_PP2VF_PF_RST 0x0
-union dlb_sys_ldb_pp2vf_pf {
-	struct {
-		u32 vf : 4;
-		u32 is_pf : 1;
-		u32 rsvd0 : 27;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_LDB_PP_ADDR_U(x) \
-	(0x800010c + (x) * 0x1000)
-#define DLB_SYS_LDB_PP_ADDR_U_RST 0x0
-union dlb_sys_ldb_pp_addr_u {
-	struct {
-		u32 addr_u : 32;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_LDB_PP_ADDR_L(x) \
-	(0x8000108 + (x) * 0x1000)
-#define DLB_SYS_LDB_PP_ADDR_L_RST 0x0
-union dlb_sys_ldb_pp_addr_l {
-	struct {
-		u32 rsvd0 : 7;
-		u32 addr_l : 25;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_LDB_CQ_ADDR_U(x) \
-	(0x8000104 + (x) * 0x1000)
-#define DLB_SYS_LDB_CQ_ADDR_U_RST 0x0
-union dlb_sys_ldb_cq_addr_u {
-	struct {
-		u32 addr_u : 32;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_LDB_CQ_ADDR_L(x) \
-	(0x8000100 + (x) * 0x1000)
-#define DLB_SYS_LDB_CQ_ADDR_L_RST 0x0
-union dlb_sys_ldb_cq_addr_l {
-	struct {
-		u32 rsvd0 : 6;
-		u32 addr_l : 26;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_DIR_PP_V(x) \
-	(0x8000228 + (x) * 0x1000)
-#define DLB_SYS_DIR_PP_V_RST 0x0
-union dlb_sys_dir_pp_v {
-	struct {
-		u32 pp_v : 1;
-		u32 mb_dm : 1;
-		u32 rsvd0 : 30;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_DIR_CQ_ISR(x) \
-	(0x8000224 + (x) * 0x1000)
-#define DLB_SYS_DIR_CQ_ISR_RST 0x0
-union dlb_sys_dir_cq_isr {
-	struct {
-		u32 vector : 6;
-		u32 vf : 4;
-		u32 en_code : 2;
-		u32 rsvd0 : 20;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_DIR_CQ2VF_PF(x) \
-	(0x8000220 + (x) * 0x1000)
-#define DLB_SYS_DIR_CQ2VF_PF_RST 0x0
-union dlb_sys_dir_cq2vf_pf {
-	struct {
-		u32 vf : 4;
-		u32 is_pf : 1;
-		u32 rsvd0 : 27;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_DIR_PP2VAS(x) \
-	(0x800021c + (x) * 0x1000)
-#define DLB_SYS_DIR_PP2VAS_RST 0x0
-union dlb_sys_dir_pp2vas {
-	struct {
-		u32 vas : 5;
-		u32 rsvd0 : 27;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_DIR_PP2LDBPOOL(x) \
-	(0x8000218 + (x) * 0x1000)
-#define DLB_SYS_DIR_PP2LDBPOOL_RST 0x0
-union dlb_sys_dir_pp2ldbpool {
-	struct {
-		u32 ldbpool : 6;
-		u32 rsvd0 : 26;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_DIR_PP2DIRPOOL(x) \
-	(0x8000214 + (x) * 0x1000)
-#define DLB_SYS_DIR_PP2DIRPOOL_RST 0x0
-union dlb_sys_dir_pp2dirpool {
-	struct {
-		u32 dirpool : 6;
-		u32 rsvd0 : 26;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_DIR_PP2VF_PF(x) \
-	(0x8000210 + (x) * 0x1000)
-#define DLB_SYS_DIR_PP2VF_PF_RST 0x0
-union dlb_sys_dir_pp2vf_pf {
-	struct {
-		u32 vf : 4;
-		u32 is_pf : 1;
-		u32 is_hw_dsi : 1;
-		u32 rsvd0 : 26;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_DIR_PP_ADDR_U(x) \
-	(0x800020c + (x) * 0x1000)
-#define DLB_SYS_DIR_PP_ADDR_U_RST 0x0
-union dlb_sys_dir_pp_addr_u {
-	struct {
-		u32 addr_u : 32;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_DIR_PP_ADDR_L(x) \
-	(0x8000208 + (x) * 0x1000)
-#define DLB_SYS_DIR_PP_ADDR_L_RST 0x0
-union dlb_sys_dir_pp_addr_l {
-	struct {
-		u32 rsvd0 : 7;
-		u32 addr_l : 25;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_DIR_CQ_ADDR_U(x) \
-	(0x8000204 + (x) * 0x1000)
-#define DLB_SYS_DIR_CQ_ADDR_U_RST 0x0
-union dlb_sys_dir_cq_addr_u {
-	struct {
-		u32 addr_u : 32;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_DIR_CQ_ADDR_L(x) \
-	(0x8000200 + (x) * 0x1000)
-#define DLB_SYS_DIR_CQ_ADDR_L_RST 0x0
-union dlb_sys_dir_cq_addr_l {
-	struct {
-		u32 rsvd0 : 6;
-		u32 addr_l : 26;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_INGRESS_ALARM_ENBL 0x300
-#define DLB_SYS_INGRESS_ALARM_ENBL_RST 0x0
-union dlb_sys_ingress_alarm_enbl {
-	struct {
-		u32 illegal_hcw : 1;
-		u32 illegal_pp : 1;
-		u32 disabled_pp : 1;
-		u32 illegal_qid : 1;
-		u32 disabled_qid : 1;
-		u32 illegal_ldb_qid_cfg : 1;
-		u32 illegal_cqid : 1;
-		u32 rsvd0 : 25;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_CQ_MODE 0x30c
-#define DLB_SYS_CQ_MODE_RST 0x0
-union dlb_sys_cq_mode {
-	struct {
-		u32 ldb_cq64 : 1;
-		u32 dir_cq64 : 1;
-		u32 rsvd0 : 30;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_MSIX_ACK 0x400
-#define DLB_SYS_MSIX_ACK_RST 0x0
-union dlb_sys_msix_ack {
-	struct {
-		u32 msix_0_ack : 1;
-		u32 msix_1_ack : 1;
-		u32 msix_2_ack : 1;
-		u32 msix_3_ack : 1;
-		u32 msix_4_ack : 1;
-		u32 msix_5_ack : 1;
-		u32 msix_6_ack : 1;
-		u32 msix_7_ack : 1;
-		u32 msix_8_ack : 1;
-		u32 rsvd0 : 23;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_MSIX_PASSTHRU 0x404
-#define DLB_SYS_MSIX_PASSTHRU_RST 0x0
-union dlb_sys_msix_passthru {
-	struct {
-		u32 msix_0_passthru : 1;
-		u32 msix_1_passthru : 1;
-		u32 msix_2_passthru : 1;
-		u32 msix_3_passthru : 1;
-		u32 msix_4_passthru : 1;
-		u32 msix_5_passthru : 1;
-		u32 msix_6_passthru : 1;
-		u32 msix_7_passthru : 1;
-		u32 msix_8_passthru : 1;
-		u32 rsvd0 : 23;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_MSIX_MODE 0x408
-#define DLB_SYS_MSIX_MODE_RST 0x0
-/* MSI-X Modes */
-#define DLB_MSIX_MODE_PACKED     0
-#define DLB_MSIX_MODE_COMPRESSED 1
-union dlb_sys_msix_mode {
-	struct {
-		u32 mode : 1;
-		u32 rsvd0 : 31;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_DIR_CQ_31_0_OCC_INT_STS 0x440
-#define DLB_SYS_DIR_CQ_31_0_OCC_INT_STS_RST 0x0
-union dlb_sys_dir_cq_31_0_occ_int_sts {
-	struct {
-		u32 cq_0_occ_int : 1;
-		u32 cq_1_occ_int : 1;
-		u32 cq_2_occ_int : 1;
-		u32 cq_3_occ_int : 1;
-		u32 cq_4_occ_int : 1;
-		u32 cq_5_occ_int : 1;
-		u32 cq_6_occ_int : 1;
-		u32 cq_7_occ_int : 1;
-		u32 cq_8_occ_int : 1;
-		u32 cq_9_occ_int : 1;
-		u32 cq_10_occ_int : 1;
-		u32 cq_11_occ_int : 1;
-		u32 cq_12_occ_int : 1;
-		u32 cq_13_occ_int : 1;
-		u32 cq_14_occ_int : 1;
-		u32 cq_15_occ_int : 1;
-		u32 cq_16_occ_int : 1;
-		u32 cq_17_occ_int : 1;
-		u32 cq_18_occ_int : 1;
-		u32 cq_19_occ_int : 1;
-		u32 cq_20_occ_int : 1;
-		u32 cq_21_occ_int : 1;
-		u32 cq_22_occ_int : 1;
-		u32 cq_23_occ_int : 1;
-		u32 cq_24_occ_int : 1;
-		u32 cq_25_occ_int : 1;
-		u32 cq_26_occ_int : 1;
-		u32 cq_27_occ_int : 1;
-		u32 cq_28_occ_int : 1;
-		u32 cq_29_occ_int : 1;
-		u32 cq_30_occ_int : 1;
-		u32 cq_31_occ_int : 1;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_DIR_CQ_63_32_OCC_INT_STS 0x444
-#define DLB_SYS_DIR_CQ_63_32_OCC_INT_STS_RST 0x0
-union dlb_sys_dir_cq_63_32_occ_int_sts {
-	struct {
-		u32 cq_32_occ_int : 1;
-		u32 cq_33_occ_int : 1;
-		u32 cq_34_occ_int : 1;
-		u32 cq_35_occ_int : 1;
-		u32 cq_36_occ_int : 1;
-		u32 cq_37_occ_int : 1;
-		u32 cq_38_occ_int : 1;
-		u32 cq_39_occ_int : 1;
-		u32 cq_40_occ_int : 1;
-		u32 cq_41_occ_int : 1;
-		u32 cq_42_occ_int : 1;
-		u32 cq_43_occ_int : 1;
-		u32 cq_44_occ_int : 1;
-		u32 cq_45_occ_int : 1;
-		u32 cq_46_occ_int : 1;
-		u32 cq_47_occ_int : 1;
-		u32 cq_48_occ_int : 1;
-		u32 cq_49_occ_int : 1;
-		u32 cq_50_occ_int : 1;
-		u32 cq_51_occ_int : 1;
-		u32 cq_52_occ_int : 1;
-		u32 cq_53_occ_int : 1;
-		u32 cq_54_occ_int : 1;
-		u32 cq_55_occ_int : 1;
-		u32 cq_56_occ_int : 1;
-		u32 cq_57_occ_int : 1;
-		u32 cq_58_occ_int : 1;
-		u32 cq_59_occ_int : 1;
-		u32 cq_60_occ_int : 1;
-		u32 cq_61_occ_int : 1;
-		u32 cq_62_occ_int : 1;
-		u32 cq_63_occ_int : 1;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_DIR_CQ_95_64_OCC_INT_STS 0x448
-#define DLB_SYS_DIR_CQ_95_64_OCC_INT_STS_RST 0x0
-union dlb_sys_dir_cq_95_64_occ_int_sts {
-	struct {
-		u32 cq_64_occ_int : 1;
-		u32 cq_65_occ_int : 1;
-		u32 cq_66_occ_int : 1;
-		u32 cq_67_occ_int : 1;
-		u32 cq_68_occ_int : 1;
-		u32 cq_69_occ_int : 1;
-		u32 cq_70_occ_int : 1;
-		u32 cq_71_occ_int : 1;
-		u32 cq_72_occ_int : 1;
-		u32 cq_73_occ_int : 1;
-		u32 cq_74_occ_int : 1;
-		u32 cq_75_occ_int : 1;
-		u32 cq_76_occ_int : 1;
-		u32 cq_77_occ_int : 1;
-		u32 cq_78_occ_int : 1;
-		u32 cq_79_occ_int : 1;
-		u32 cq_80_occ_int : 1;
-		u32 cq_81_occ_int : 1;
-		u32 cq_82_occ_int : 1;
-		u32 cq_83_occ_int : 1;
-		u32 cq_84_occ_int : 1;
-		u32 cq_85_occ_int : 1;
-		u32 cq_86_occ_int : 1;
-		u32 cq_87_occ_int : 1;
-		u32 cq_88_occ_int : 1;
-		u32 cq_89_occ_int : 1;
-		u32 cq_90_occ_int : 1;
-		u32 cq_91_occ_int : 1;
-		u32 cq_92_occ_int : 1;
-		u32 cq_93_occ_int : 1;
-		u32 cq_94_occ_int : 1;
-		u32 cq_95_occ_int : 1;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_DIR_CQ_127_96_OCC_INT_STS 0x44c
-#define DLB_SYS_DIR_CQ_127_96_OCC_INT_STS_RST 0x0
-union dlb_sys_dir_cq_127_96_occ_int_sts {
-	struct {
-		u32 cq_96_occ_int : 1;
-		u32 cq_97_occ_int : 1;
-		u32 cq_98_occ_int : 1;
-		u32 cq_99_occ_int : 1;
-		u32 cq_100_occ_int : 1;
-		u32 cq_101_occ_int : 1;
-		u32 cq_102_occ_int : 1;
-		u32 cq_103_occ_int : 1;
-		u32 cq_104_occ_int : 1;
-		u32 cq_105_occ_int : 1;
-		u32 cq_106_occ_int : 1;
-		u32 cq_107_occ_int : 1;
-		u32 cq_108_occ_int : 1;
-		u32 cq_109_occ_int : 1;
-		u32 cq_110_occ_int : 1;
-		u32 cq_111_occ_int : 1;
-		u32 cq_112_occ_int : 1;
-		u32 cq_113_occ_int : 1;
-		u32 cq_114_occ_int : 1;
-		u32 cq_115_occ_int : 1;
-		u32 cq_116_occ_int : 1;
-		u32 cq_117_occ_int : 1;
-		u32 cq_118_occ_int : 1;
-		u32 cq_119_occ_int : 1;
-		u32 cq_120_occ_int : 1;
-		u32 cq_121_occ_int : 1;
-		u32 cq_122_occ_int : 1;
-		u32 cq_123_occ_int : 1;
-		u32 cq_124_occ_int : 1;
-		u32 cq_125_occ_int : 1;
-		u32 cq_126_occ_int : 1;
-		u32 cq_127_occ_int : 1;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_LDB_CQ_31_0_OCC_INT_STS 0x460
-#define DLB_SYS_LDB_CQ_31_0_OCC_INT_STS_RST 0x0
-union dlb_sys_ldb_cq_31_0_occ_int_sts {
-	struct {
-		u32 cq_0_occ_int : 1;
-		u32 cq_1_occ_int : 1;
-		u32 cq_2_occ_int : 1;
-		u32 cq_3_occ_int : 1;
-		u32 cq_4_occ_int : 1;
-		u32 cq_5_occ_int : 1;
-		u32 cq_6_occ_int : 1;
-		u32 cq_7_occ_int : 1;
-		u32 cq_8_occ_int : 1;
-		u32 cq_9_occ_int : 1;
-		u32 cq_10_occ_int : 1;
-		u32 cq_11_occ_int : 1;
-		u32 cq_12_occ_int : 1;
-		u32 cq_13_occ_int : 1;
-		u32 cq_14_occ_int : 1;
-		u32 cq_15_occ_int : 1;
-		u32 cq_16_occ_int : 1;
-		u32 cq_17_occ_int : 1;
-		u32 cq_18_occ_int : 1;
-		u32 cq_19_occ_int : 1;
-		u32 cq_20_occ_int : 1;
-		u32 cq_21_occ_int : 1;
-		u32 cq_22_occ_int : 1;
-		u32 cq_23_occ_int : 1;
-		u32 cq_24_occ_int : 1;
-		u32 cq_25_occ_int : 1;
-		u32 cq_26_occ_int : 1;
-		u32 cq_27_occ_int : 1;
-		u32 cq_28_occ_int : 1;
-		u32 cq_29_occ_int : 1;
-		u32 cq_30_occ_int : 1;
-		u32 cq_31_occ_int : 1;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_LDB_CQ_63_32_OCC_INT_STS 0x464
-#define DLB_SYS_LDB_CQ_63_32_OCC_INT_STS_RST 0x0
-union dlb_sys_ldb_cq_63_32_occ_int_sts {
-	struct {
-		u32 cq_32_occ_int : 1;
-		u32 cq_33_occ_int : 1;
-		u32 cq_34_occ_int : 1;
-		u32 cq_35_occ_int : 1;
-		u32 cq_36_occ_int : 1;
-		u32 cq_37_occ_int : 1;
-		u32 cq_38_occ_int : 1;
-		u32 cq_39_occ_int : 1;
-		u32 cq_40_occ_int : 1;
-		u32 cq_41_occ_int : 1;
-		u32 cq_42_occ_int : 1;
-		u32 cq_43_occ_int : 1;
-		u32 cq_44_occ_int : 1;
-		u32 cq_45_occ_int : 1;
-		u32 cq_46_occ_int : 1;
-		u32 cq_47_occ_int : 1;
-		u32 cq_48_occ_int : 1;
-		u32 cq_49_occ_int : 1;
-		u32 cq_50_occ_int : 1;
-		u32 cq_51_occ_int : 1;
-		u32 cq_52_occ_int : 1;
-		u32 cq_53_occ_int : 1;
-		u32 cq_54_occ_int : 1;
-		u32 cq_55_occ_int : 1;
-		u32 cq_56_occ_int : 1;
-		u32 cq_57_occ_int : 1;
-		u32 cq_58_occ_int : 1;
-		u32 cq_59_occ_int : 1;
-		u32 cq_60_occ_int : 1;
-		u32 cq_61_occ_int : 1;
-		u32 cq_62_occ_int : 1;
-		u32 cq_63_occ_int : 1;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_ALARM_HW_SYND 0x50c
-#define DLB_SYS_ALARM_HW_SYND_RST 0x0
-union dlb_sys_alarm_hw_synd {
-	struct {
-		u32 syndrome : 8;
-		u32 rtype : 2;
-		u32 rsvd0 : 2;
-		u32 from_dmv : 1;
-		u32 is_ldb : 1;
-		u32 cls : 2;
-		u32 aid : 6;
-		u32 unit : 4;
-		u32 source : 4;
-		u32 more : 1;
-		u32 valid : 1;
-	} field;
-	u32 val;
-};
-
-#define DLB_SYS_SYS_ALARM_INT_ENABLE 0xc001048
-#define DLB_SYS_SYS_ALARM_INT_ENABLE_RST 0x7fffff
-union dlb_sys_sys_alarm_int_enable {
-	struct {
-		u32 cq_addr_overflow_error : 1;
-		u32 ingress_perr : 1;
-		u32 egress_perr : 1;
-		u32 alarm_perr : 1;
-		u32 vf_to_pf_isr_pend_error : 1;
-		u32 pf_to_vf_isr_pend_error : 1;
-		u32 timeout_error : 1;
-		u32 dmvw_sm_error : 1;
-		u32 pptr_sm_par_error : 1;
-		u32 pptr_sm_len_error : 1;
-		u32 sch_sm_error : 1;
-		u32 wbuf_flag_error : 1;
-		u32 dmvw_cl_error : 1;
-		u32 dmvr_cl_error : 1;
-		u32 cmpl_data_error : 1;
-		u32 cmpl_error : 1;
-		u32 fifo_underflow : 1;
-		u32 fifo_overflow : 1;
-		u32 sb_ep_parity_err : 1;
-		u32 ti_parity_err : 1;
-		u32 ri_parity_err : 1;
-		u32 cfgm_ppw_err : 1;
-		u32 system_csr_perr : 1;
-		u32 rsvd0 : 9;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_CQ_LDB_TOT_SCH_CNT_CTRL(x) \
-	(0x20000000 + (x) * 0x1000)
-#define DLB_LSP_CQ_LDB_TOT_SCH_CNT_CTRL_RST 0x0
-union dlb_lsp_cq_ldb_tot_sch_cnt_ctrl {
-	struct {
-		u32 count : 32;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_CQ_LDB_DSBL(x) \
-	(0x20000124 + (x) * 0x1000)
-#define DLB_LSP_CQ_LDB_DSBL_RST 0x1
-union dlb_lsp_cq_ldb_dsbl {
-	struct {
-		u32 disabled : 1;
-		u32 rsvd0 : 31;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_CQ_LDB_TOT_SCH_CNTH(x) \
-	(0x20000120 + (x) * 0x1000)
-#define DLB_LSP_CQ_LDB_TOT_SCH_CNTH_RST 0x0
-union dlb_lsp_cq_ldb_tot_sch_cnth {
-	struct {
-		u32 count : 32;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_CQ_LDB_TOT_SCH_CNTL(x) \
-	(0x2000011c + (x) * 0x1000)
-#define DLB_LSP_CQ_LDB_TOT_SCH_CNTL_RST 0x0
-union dlb_lsp_cq_ldb_tot_sch_cntl {
-	struct {
-		u32 count : 32;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_CQ_LDB_TKN_DEPTH_SEL(x) \
-	(0x20000118 + (x) * 0x1000)
-#define DLB_LSP_CQ_LDB_TKN_DEPTH_SEL_RST 0x0
-union dlb_lsp_cq_ldb_tkn_depth_sel {
-	struct {
-		u32 token_depth_select : 4;
-		u32 ignore_depth : 1;
-		u32 enab_shallow_cq : 1;
-		u32 rsvd0 : 26;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_CQ_LDB_TKN_CNT(x) \
-	(0x20000114 + (x) * 0x1000)
-#define DLB_LSP_CQ_LDB_TKN_CNT_RST 0x0
-union dlb_lsp_cq_ldb_tkn_cnt {
-	struct {
-		u32 token_count : 11;
-		u32 rsvd0 : 21;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_CQ_LDB_INFL_LIM(x) \
-	(0x20000110 + (x) * 0x1000)
-#define DLB_LSP_CQ_LDB_INFL_LIM_RST 0x0
-union dlb_lsp_cq_ldb_infl_lim {
-	struct {
-		u32 limit : 13;
-		u32 rsvd0 : 19;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_CQ_LDB_INFL_CNT(x) \
-	(0x2000010c + (x) * 0x1000)
-#define DLB_LSP_CQ_LDB_INFL_CNT_RST 0x0
-union dlb_lsp_cq_ldb_infl_cnt {
-	struct {
-		u32 count : 13;
-		u32 rsvd0 : 19;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_CQ2QID(x, y) \
-	(0x20000104 + (x) * 0x1000 + (y) * 0x4)
-#define DLB_LSP_CQ2QID_RST 0x0
-union dlb_lsp_cq2qid {
-	struct {
-		u32 qid_p0 : 7;
-		u32 rsvd3 : 1;
-		u32 qid_p1 : 7;
-		u32 rsvd2 : 1;
-		u32 qid_p2 : 7;
-		u32 rsvd1 : 1;
-		u32 qid_p3 : 7;
-		u32 rsvd0 : 1;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_CQ2PRIOV(x) \
-	(0x20000100 + (x) * 0x1000)
-#define DLB_LSP_CQ2PRIOV_RST 0x0
-union dlb_lsp_cq2priov {
-	struct {
-		u32 prio : 24;
-		u32 v : 8;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_CQ_DIR_DSBL(x) \
-	(0x20000310 + (x) * 0x1000)
-#define DLB_LSP_CQ_DIR_DSBL_RST 0x1
-union dlb_lsp_cq_dir_dsbl {
-	struct {
-		u32 disabled : 1;
-		u32 rsvd0 : 31;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(x) \
-	(0x2000030c + (x) * 0x1000)
-#define DLB_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST 0x0
-union dlb_lsp_cq_dir_tkn_depth_sel_dsi {
-	struct {
-		u32 token_depth_select : 4;
-		u32 disable_wb_opt : 1;
-		u32 ignore_depth : 1;
-		u32 rsvd0 : 26;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_CQ_DIR_TOT_SCH_CNTH(x) \
-	(0x20000308 + (x) * 0x1000)
-#define DLB_LSP_CQ_DIR_TOT_SCH_CNTH_RST 0x0
-union dlb_lsp_cq_dir_tot_sch_cnth {
-	struct {
-		u32 count : 32;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_CQ_DIR_TOT_SCH_CNTL(x) \
-	(0x20000304 + (x) * 0x1000)
-#define DLB_LSP_CQ_DIR_TOT_SCH_CNTL_RST 0x0
-union dlb_lsp_cq_dir_tot_sch_cntl {
-	struct {
-		u32 count : 32;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_CQ_DIR_TKN_CNT(x) \
-	(0x20000300 + (x) * 0x1000)
-#define DLB_LSP_CQ_DIR_TKN_CNT_RST 0x0
-union dlb_lsp_cq_dir_tkn_cnt {
-	struct {
-		u32 count : 11;
-		u32 rsvd0 : 21;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_QID_LDB_QID2CQIDX(x, y) \
-	(0x20000400 + (x) * 0x1000 + (y) * 0x4)
-#define DLB_LSP_QID_LDB_QID2CQIDX_RST 0x0
-union dlb_lsp_qid_ldb_qid2cqidx {
-	struct {
-		u32 cq_p0 : 8;
-		u32 cq_p1 : 8;
-		u32 cq_p2 : 8;
-		u32 cq_p3 : 8;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_QID_LDB_QID2CQIDX2(x, y) \
-	(0x20000500 + (x) * 0x1000 + (y) * 0x4)
-#define DLB_LSP_QID_LDB_QID2CQIDX2_RST 0x0
-union dlb_lsp_qid_ldb_qid2cqidx2 {
-	struct {
-		u32 cq_p0 : 8;
-		u32 cq_p1 : 8;
-		u32 cq_p2 : 8;
-		u32 cq_p3 : 8;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_QID_ATQ_ENQUEUE_CNT(x) \
-	(0x2000066c + (x) * 0x1000)
-#define DLB_LSP_QID_ATQ_ENQUEUE_CNT_RST 0x0
-union dlb_lsp_qid_atq_enqueue_cnt {
-	struct {
-		u32 count : 15;
-		u32 rsvd0 : 17;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_QID_LDB_INFL_LIM(x) \
-	(0x2000064c + (x) * 0x1000)
-#define DLB_LSP_QID_LDB_INFL_LIM_RST 0x0
-union dlb_lsp_qid_ldb_infl_lim {
-	struct {
-		u32 limit : 13;
-		u32 rsvd0 : 19;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_QID_LDB_INFL_CNT(x) \
-	(0x2000062c + (x) * 0x1000)
-#define DLB_LSP_QID_LDB_INFL_CNT_RST 0x0
-union dlb_lsp_qid_ldb_infl_cnt {
-	struct {
-		u32 count : 13;
-		u32 rsvd0 : 19;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_QID_AQED_ACTIVE_LIM(x) \
-	(0x20000628 + (x) * 0x1000)
-#define DLB_LSP_QID_AQED_ACTIVE_LIM_RST 0x0
-union dlb_lsp_qid_aqed_active_lim {
-	struct {
-		u32 limit : 12;
-		u32 rsvd0 : 20;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_QID_AQED_ACTIVE_CNT(x) \
-	(0x20000624 + (x) * 0x1000)
-#define DLB_LSP_QID_AQED_ACTIVE_CNT_RST 0x0
-union dlb_lsp_qid_aqed_active_cnt {
-	struct {
-		u32 count : 12;
-		u32 rsvd0 : 20;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_QID_LDB_ENQUEUE_CNT(x) \
-	(0x20000604 + (x) * 0x1000)
-#define DLB_LSP_QID_LDB_ENQUEUE_CNT_RST 0x0
-union dlb_lsp_qid_ldb_enqueue_cnt {
-	struct {
-		u32 count : 15;
-		u32 rsvd0 : 17;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_QID_LDB_REPLAY_CNT(x) \
-	(0x20000600 + (x) * 0x1000)
-#define DLB_LSP_QID_LDB_REPLAY_CNT_RST 0x0
-union dlb_lsp_qid_ldb_replay_cnt {
-	struct {
-		u32 count : 15;
-		u32 rsvd0 : 17;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_QID_DIR_ENQUEUE_CNT(x) \
-	(0x20000700 + (x) * 0x1000)
-#define DLB_LSP_QID_DIR_ENQUEUE_CNT_RST 0x0
-union dlb_lsp_qid_dir_enqueue_cnt {
-	struct {
-		u32 count : 13;
-		u32 rsvd0 : 19;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_CTRL_CONFIG_0 0x2800002c
-#define DLB_LSP_CTRL_CONFIG_0_RST 0x12cc
-union dlb_lsp_ctrl_config_0 {
-	struct {
-		u32 atm_cq_qid_priority_prot : 1;
-		u32 ldb_arb_ignore_empty : 1;
-		u32 ldb_arb_mode : 2;
-		u32 ldb_arb_threshold : 18;
-		u32 cfg_cq_sla_upd_always : 1;
-		u32 cfg_cq_wcn_upd_always : 1;
-		u32 spare : 8;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_1 0x28000028
-#define DLB_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_1_RST 0x0
-union dlb_lsp_cfg_arb_weight_atm_nalb_qid_1 {
-	struct {
-		u32 slot4_weight : 8;
-		u32 slot5_weight : 8;
-		u32 slot6_weight : 8;
-		u32 slot7_weight : 8;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0 0x28000024
-#define DLB_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_RST 0x0
-union dlb_lsp_cfg_arb_weight_atm_nalb_qid_0 {
-	struct {
-		u32 slot0_weight : 8;
-		u32 slot1_weight : 8;
-		u32 slot2_weight : 8;
-		u32 slot3_weight : 8;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_CFG_ARB_WEIGHT_LDB_QID_1 0x28000020
-#define DLB_LSP_CFG_ARB_WEIGHT_LDB_QID_1_RST 0x0
-union dlb_lsp_cfg_arb_weight_ldb_qid_1 {
-	struct {
-		u32 slot4_weight : 8;
-		u32 slot5_weight : 8;
-		u32 slot6_weight : 8;
-		u32 slot7_weight : 8;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_CFG_ARB_WEIGHT_LDB_QID_0 0x2800001c
-#define DLB_LSP_CFG_ARB_WEIGHT_LDB_QID_0_RST 0x0
-union dlb_lsp_cfg_arb_weight_ldb_qid_0 {
-	struct {
-		u32 slot0_weight : 8;
-		u32 slot1_weight : 8;
-		u32 slot2_weight : 8;
-		u32 slot3_weight : 8;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_LDB_SCHED_CTRL 0x28100000
-#define DLB_LSP_LDB_SCHED_CTRL_RST 0x0
-union dlb_lsp_ldb_sched_ctrl {
-	struct {
-		u32 cq : 8;
-		u32 qidix : 3;
-		u32 value : 1;
-		u32 nalb_haswork_v : 1;
-		u32 rlist_haswork_v : 1;
-		u32 slist_haswork_v : 1;
-		u32 inflight_ok_v : 1;
-		u32 aqed_nfull_v : 1;
-		u32 spare0 : 15;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_DIR_SCH_CNT_H 0x2820000c
-#define DLB_LSP_DIR_SCH_CNT_H_RST 0x0
-union dlb_lsp_dir_sch_cnt_h {
-	struct {
-		u32 count : 32;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_DIR_SCH_CNT_L 0x28200008
-#define DLB_LSP_DIR_SCH_CNT_L_RST 0x0
-union dlb_lsp_dir_sch_cnt_l {
-	struct {
-		u32 count : 32;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_LDB_SCH_CNT_H 0x28200004
-#define DLB_LSP_LDB_SCH_CNT_H_RST 0x0
-union dlb_lsp_ldb_sch_cnt_h {
-	struct {
-		u32 count : 32;
-	} field;
-	u32 val;
-};
-
-#define DLB_LSP_LDB_SCH_CNT_L 0x28200000
-#define DLB_LSP_LDB_SCH_CNT_L_RST 0x0
-union dlb_lsp_ldb_sch_cnt_l {
-	struct {
-		u32 count : 32;
-	} field;
-	u32 val;
-};
-
-#define DLB_DP_DIR_CSR_CTRL 0x38000018
-#define DLB_DP_DIR_CSR_CTRL_RST 0xc0000000
-union dlb_dp_dir_csr_ctrl {
-	struct {
-		u32 cfg_int_dis : 1;
-		u32 cfg_int_dis_sbe : 1;
-		u32 cfg_int_dis_mbe : 1;
-		u32 spare0 : 27;
-		u32 cfg_vasr_dis : 1;
-		u32 cfg_int_dis_synd : 1;
-	} field;
-	u32 val;
-};
-
-#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_DIR_1 0x38000014
-#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_DIR_1_RST 0xfffefdfc
-union dlb_dp_cfg_ctrl_arb_weights_tqpri_dir_1 {
-	struct {
-		u32 pri4 : 8;
-		u32 pri5 : 8;
-		u32 pri6 : 8;
-		u32 pri7 : 8;
-	} field;
-	u32 val;
-};
-
-#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_DIR_0 0x38000010
-#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_DIR_0_RST 0xfbfaf9f8
-union dlb_dp_cfg_ctrl_arb_weights_tqpri_dir_0 {
-	struct {
-		u32 pri0 : 8;
-		u32 pri1 : 8;
-		u32 pri2 : 8;
-		u32 pri3 : 8;
-	} field;
-	u32 val;
-};
-
-#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_1 0x3800000c
-#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_1_RST 0xfffefdfc
-union dlb_dp_cfg_ctrl_arb_weights_tqpri_replay_1 {
-	struct {
-		u32 pri4 : 8;
-		u32 pri5 : 8;
-		u32 pri6 : 8;
-		u32 pri7 : 8;
-	} field;
-	u32 val;
-};
-
-#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_0 0x38000008
-#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_0_RST 0xfbfaf9f8
-union dlb_dp_cfg_ctrl_arb_weights_tqpri_replay_0 {
-	struct {
-		u32 pri0 : 8;
-		u32 pri1 : 8;
-		u32 pri2 : 8;
-		u32 pri3 : 8;
-	} field;
-	u32 val;
-};
-
-#define DLB_NALB_PIPE_CTRL_ARB_WEIGHTS_TQPRI_NALB_1 0x6800001c
-#define DLB_NALB_PIPE_CTRL_ARB_WEIGHTS_TQPRI_NALB_1_RST 0xfffefdfc
-union dlb_nalb_pipe_ctrl_arb_weights_tqpri_nalb_1 {
-	struct {
-		u32 pri4 : 8;
-		u32 pri5 : 8;
-		u32 pri6 : 8;
-		u32 pri7 : 8;
-	} field;
-	u32 val;
-};
-
-#define DLB_NALB_PIPE_CTRL_ARB_WEIGHTS_TQPRI_NALB_0 0x68000018
-#define DLB_NALB_PIPE_CTRL_ARB_WEIGHTS_TQPRI_NALB_0_RST 0xfbfaf9f8
-union dlb_nalb_pipe_ctrl_arb_weights_tqpri_nalb_0 {
-	struct {
-		u32 pri0 : 8;
-		u32 pri1 : 8;
-		u32 pri2 : 8;
-		u32 pri3 : 8;
-	} field;
-	u32 val;
-};
-
-#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_ATQ_1 0x68000014
-#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_ATQ_1_RST 0xfffefdfc
-union dlb_nalb_pipe_cfg_ctrl_arb_weights_tqpri_atq_1 {
-	struct {
-		u32 pri4 : 8;
-		u32 pri5 : 8;
-		u32 pri6 : 8;
-		u32 pri7 : 8;
-	} field;
-	u32 val;
-};
-
-#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_ATQ_0 0x68000010
-#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_ATQ_0_RST 0xfbfaf9f8
-union dlb_nalb_pipe_cfg_ctrl_arb_weights_tqpri_atq_0 {
-	struct {
-		u32 pri0 : 8;
-		u32 pri1 : 8;
-		u32 pri2 : 8;
-		u32 pri3 : 8;
-	} field;
-	u32 val;
-};
-
-#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_1 0x6800000c
-#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_1_RST 0xfffefdfc
-union dlb_nalb_pipe_cfg_ctrl_arb_weights_tqpri_replay_1 {
-	struct {
-		u32 pri4 : 8;
-		u32 pri5 : 8;
-		u32 pri6 : 8;
-		u32 pri7 : 8;
-	} field;
-	u32 val;
-};
-
-#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_0 0x68000008
-#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_0_RST 0xfbfaf9f8
-union dlb_nalb_pipe_cfg_ctrl_arb_weights_tqpri_replay_0 {
-	struct {
-		u32 pri0 : 8;
-		u32 pri1 : 8;
-		u32 pri2 : 8;
-		u32 pri3 : 8;
-	} field;
-	u32 val;
-};
-
-#define DLB_ATM_PIPE_QID_LDB_QID2CQIDX(x, y) \
-	(0x70000000 + (x) * 0x1000 + (y) * 0x4)
-#define DLB_ATM_PIPE_QID_LDB_QID2CQIDX_RST 0x0
-union dlb_atm_pipe_qid_ldb_qid2cqidx {
-	struct {
-		u32 cq_p0 : 8;
-		u32 cq_p1 : 8;
-		u32 cq_p2 : 8;
-		u32 cq_p3 : 8;
-	} field;
-	u32 val;
-};
-
-#define DLB_ATM_PIPE_CFG_CTRL_ARB_WEIGHTS_SCHED_BIN 0x7800000c
-#define DLB_ATM_PIPE_CFG_CTRL_ARB_WEIGHTS_SCHED_BIN_RST 0xfffefdfc
-union dlb_atm_pipe_cfg_ctrl_arb_weights_sched_bin {
-	struct {
-		u32 bin0 : 8;
-		u32 bin1 : 8;
-		u32 bin2 : 8;
-		u32 bin3 : 8;
-	} field;
-	u32 val;
-};
-
-#define DLB_ATM_PIPE_CTRL_ARB_WEIGHTS_RDY_BIN 0x78000008
-#define DLB_ATM_PIPE_CTRL_ARB_WEIGHTS_RDY_BIN_RST 0xfffefdfc
-union dlb_atm_pipe_ctrl_arb_weights_rdy_bin {
-	struct {
-		u32 bin0 : 8;
-		u32 bin1 : 8;
-		u32 bin2 : 8;
-		u32 bin3 : 8;
-	} field;
-	u32 val;
-};
-
-#define DLB_AQED_PIPE_QID_FID_LIM(x) \
-	(0x80000014 + (x) * 0x1000)
-#define DLB_AQED_PIPE_QID_FID_LIM_RST 0x7ff
-union dlb_aqed_pipe_qid_fid_lim {
-	struct {
-		u32 qid_fid_limit : 13;
-		u32 rsvd0 : 19;
-	} field;
-	u32 val;
-};
-
-#define DLB_AQED_PIPE_FL_POP_PTR(x) \
-	(0x80000010 + (x) * 0x1000)
-#define DLB_AQED_PIPE_FL_POP_PTR_RST 0x0
-union dlb_aqed_pipe_fl_pop_ptr {
-	struct {
-		u32 pop_ptr : 11;
-		u32 generation : 1;
-		u32 rsvd0 : 20;
-	} field;
-	u32 val;
-};
-
-#define DLB_AQED_PIPE_FL_PUSH_PTR(x) \
-	(0x8000000c + (x) * 0x1000)
-#define DLB_AQED_PIPE_FL_PUSH_PTR_RST 0x0
-union dlb_aqed_pipe_fl_push_ptr {
-	struct {
-		u32 push_ptr : 11;
-		u32 generation : 1;
-		u32 rsvd0 : 20;
-	} field;
-	u32 val;
-};
-
-#define DLB_AQED_PIPE_FL_BASE(x) \
-	(0x80000008 + (x) * 0x1000)
-#define DLB_AQED_PIPE_FL_BASE_RST 0x0
-union dlb_aqed_pipe_fl_base {
-	struct {
-		u32 base : 11;
-		u32 rsvd0 : 21;
-	} field;
-	u32 val;
-};
-
-#define DLB_AQED_PIPE_FL_LIM(x) \
-	(0x80000004 + (x) * 0x1000)
-#define DLB_AQED_PIPE_FL_LIM_RST 0x800
-union dlb_aqed_pipe_fl_lim {
-	struct {
-		u32 limit : 11;
-		u32 freelist_disable : 1;
-		u32 rsvd0 : 20;
-	} field;
-	u32 val;
-};
-
-#define DLB_AQED_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_ATM_0 0x88000008
-#define DLB_AQED_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_ATM_0_RST 0xfffe
-union dlb_aqed_pipe_cfg_ctrl_arb_weights_tqpri_atm_0 {
-	struct {
-		u32 pri0 : 8;
-		u32 pri1 : 8;
-		u32 pri2 : 8;
-		u32 pri3 : 8;
-	} field;
-	u32 val;
-};
-
-#define DLB_RO_PIPE_QID2GRPSLT(x) \
-	(0x90000000 + (x) * 0x1000)
-#define DLB_RO_PIPE_QID2GRPSLT_RST 0x0
-union dlb_ro_pipe_qid2grpslt {
-	struct {
-		u32 slot : 5;
-		u32 rsvd1 : 3;
-		u32 group : 2;
-		u32 rsvd0 : 22;
-	} field;
-	u32 val;
-};
-
-#define DLB_RO_PIPE_GRP_SN_MODE 0x98000008
-#define DLB_RO_PIPE_GRP_SN_MODE_RST 0x0
-union dlb_ro_pipe_grp_sn_mode {
-	struct {
-		u32 sn_mode_0 : 3;
-		u32 reserved0 : 5;
-		u32 sn_mode_1 : 3;
-		u32 reserved1 : 5;
-		u32 sn_mode_2 : 3;
-		u32 reserved2 : 5;
-		u32 sn_mode_3 : 3;
-		u32 reserved3 : 5;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_CFG_DIR_PP_SW_ALARM_EN(x) \
-	(0xa000003c + (x) * 0x1000)
-#define DLB_CHP_CFG_DIR_PP_SW_ALARM_EN_RST 0x1
-union dlb_chp_cfg_dir_pp_sw_alarm_en {
-	struct {
-		u32 alarm_enable : 1;
-		u32 rsvd0 : 31;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_CQ_WD_ENB(x) \
-	(0xa0000038 + (x) * 0x1000)
-#define DLB_CHP_DIR_CQ_WD_ENB_RST 0x0
-union dlb_chp_dir_cq_wd_enb {
-	struct {
-		u32 wd_enable : 1;
-		u32 rsvd0 : 31;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_LDB_PP2POOL(x) \
-	(0xa0000034 + (x) * 0x1000)
-#define DLB_CHP_DIR_LDB_PP2POOL_RST 0x0
-union dlb_chp_dir_ldb_pp2pool {
-	struct {
-		u32 pool : 6;
-		u32 rsvd0 : 26;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_DIR_PP2POOL(x) \
-	(0xa0000030 + (x) * 0x1000)
-#define DLB_CHP_DIR_DIR_PP2POOL_RST 0x0
-union dlb_chp_dir_dir_pp2pool {
-	struct {
-		u32 pool : 6;
-		u32 rsvd0 : 26;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_PP_LDB_CRD_CNT(x) \
-	(0xa000002c + (x) * 0x1000)
-#define DLB_CHP_DIR_PP_LDB_CRD_CNT_RST 0x0
-union dlb_chp_dir_pp_ldb_crd_cnt {
-	struct {
-		u32 count : 16;
-		u32 rsvd0 : 16;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_PP_DIR_CRD_CNT(x) \
-	(0xa0000028 + (x) * 0x1000)
-#define DLB_CHP_DIR_PP_DIR_CRD_CNT_RST 0x0
-union dlb_chp_dir_pp_dir_crd_cnt {
-	struct {
-		u32 count : 14;
-		u32 rsvd0 : 18;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_CQ_TMR_THRESHOLD(x) \
-	(0xa0000024 + (x) * 0x1000)
-#define DLB_CHP_DIR_CQ_TMR_THRESHOLD_RST 0x0
-union dlb_chp_dir_cq_tmr_threshold {
-	struct {
-		u32 timer_thrsh : 14;
-		u32 rsvd0 : 18;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_CQ_INT_ENB(x) \
-	(0xa0000020 + (x) * 0x1000)
-#define DLB_CHP_DIR_CQ_INT_ENB_RST 0x0
-union dlb_chp_dir_cq_int_enb {
-	struct {
-		u32 en_tim : 1;
-		u32 en_depth : 1;
-		u32 rsvd0 : 30;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_CQ_INT_DEPTH_THRSH(x) \
-	(0xa000001c + (x) * 0x1000)
-#define DLB_CHP_DIR_CQ_INT_DEPTH_THRSH_RST 0x0
-union dlb_chp_dir_cq_int_depth_thrsh {
-	struct {
-		u32 depth_threshold : 12;
-		u32 rsvd0 : 20;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_CQ_TKN_DEPTH_SEL(x) \
-	(0xa0000018 + (x) * 0x1000)
-#define DLB_CHP_DIR_CQ_TKN_DEPTH_SEL_RST 0x0
-union dlb_chp_dir_cq_tkn_depth_sel {
-	struct {
-		u32 token_depth_select : 4;
-		u32 rsvd0 : 28;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_PP_LDB_MIN_CRD_QNT(x) \
-	(0xa0000014 + (x) * 0x1000)
-#define DLB_CHP_DIR_PP_LDB_MIN_CRD_QNT_RST 0x1
-union dlb_chp_dir_pp_ldb_min_crd_qnt {
-	struct {
-		u32 quanta : 10;
-		u32 rsvd0 : 22;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_PP_DIR_MIN_CRD_QNT(x) \
-	(0xa0000010 + (x) * 0x1000)
-#define DLB_CHP_DIR_PP_DIR_MIN_CRD_QNT_RST 0x1
-union dlb_chp_dir_pp_dir_min_crd_qnt {
-	struct {
-		u32 quanta : 10;
-		u32 rsvd0 : 22;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_PP_LDB_CRD_LWM(x) \
-	(0xa000000c + (x) * 0x1000)
-#define DLB_CHP_DIR_PP_LDB_CRD_LWM_RST 0x0
-union dlb_chp_dir_pp_ldb_crd_lwm {
-	struct {
-		u32 lwm : 16;
-		u32 rsvd0 : 16;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_PP_LDB_CRD_HWM(x) \
-	(0xa0000008 + (x) * 0x1000)
-#define DLB_CHP_DIR_PP_LDB_CRD_HWM_RST 0x0
-union dlb_chp_dir_pp_ldb_crd_hwm {
-	struct {
-		u32 hwm : 16;
-		u32 rsvd0 : 16;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_PP_DIR_CRD_LWM(x) \
-	(0xa0000004 + (x) * 0x1000)
-#define DLB_CHP_DIR_PP_DIR_CRD_LWM_RST 0x0
-union dlb_chp_dir_pp_dir_crd_lwm {
-	struct {
-		u32 lwm : 14;
-		u32 rsvd0 : 18;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_PP_DIR_CRD_HWM(x) \
-	(0xa0000000 + (x) * 0x1000)
-#define DLB_CHP_DIR_PP_DIR_CRD_HWM_RST 0x0
-union dlb_chp_dir_pp_dir_crd_hwm {
-	struct {
-		u32 hwm : 14;
-		u32 rsvd0 : 18;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_CFG_LDB_PP_SW_ALARM_EN(x) \
-	(0xa0000148 + (x) * 0x1000)
-#define DLB_CHP_CFG_LDB_PP_SW_ALARM_EN_RST 0x1
-union dlb_chp_cfg_ldb_pp_sw_alarm_en {
-	struct {
-		u32 alarm_enable : 1;
-		u32 rsvd0 : 31;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_CQ_WD_ENB(x) \
-	(0xa0000144 + (x) * 0x1000)
-#define DLB_CHP_LDB_CQ_WD_ENB_RST 0x0
-union dlb_chp_ldb_cq_wd_enb {
-	struct {
-		u32 wd_enable : 1;
-		u32 rsvd0 : 31;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_SN_CHK_ENBL(x) \
-	(0xa0000140 + (x) * 0x1000)
-#define DLB_CHP_SN_CHK_ENBL_RST 0x0
-union dlb_chp_sn_chk_enbl {
-	struct {
-		u32 en : 1;
-		u32 rsvd0 : 31;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_HIST_LIST_BASE(x) \
-	(0xa000013c + (x) * 0x1000)
-#define DLB_CHP_HIST_LIST_BASE_RST 0x0
-union dlb_chp_hist_list_base {
-	struct {
-		u32 base : 13;
-		u32 rsvd0 : 19;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_HIST_LIST_LIM(x) \
-	(0xa0000138 + (x) * 0x1000)
-#define DLB_CHP_HIST_LIST_LIM_RST 0x0
-union dlb_chp_hist_list_lim {
-	struct {
-		u32 limit : 13;
-		u32 rsvd0 : 19;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_LDB_PP2POOL(x) \
-	(0xa0000134 + (x) * 0x1000)
-#define DLB_CHP_LDB_LDB_PP2POOL_RST 0x0
-union dlb_chp_ldb_ldb_pp2pool {
-	struct {
-		u32 pool : 6;
-		u32 rsvd0 : 26;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_DIR_PP2POOL(x) \
-	(0xa0000130 + (x) * 0x1000)
-#define DLB_CHP_LDB_DIR_PP2POOL_RST 0x0
-union dlb_chp_ldb_dir_pp2pool {
-	struct {
-		u32 pool : 6;
-		u32 rsvd0 : 26;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_PP_LDB_CRD_CNT(x) \
-	(0xa000012c + (x) * 0x1000)
-#define DLB_CHP_LDB_PP_LDB_CRD_CNT_RST 0x0
-union dlb_chp_ldb_pp_ldb_crd_cnt {
-	struct {
-		u32 count : 16;
-		u32 rsvd0 : 16;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_PP_DIR_CRD_CNT(x) \
-	(0xa0000128 + (x) * 0x1000)
-#define DLB_CHP_LDB_PP_DIR_CRD_CNT_RST 0x0
-union dlb_chp_ldb_pp_dir_crd_cnt {
-	struct {
-		u32 count : 14;
-		u32 rsvd0 : 18;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_CQ_TMR_THRESHOLD(x) \
-	(0xa0000124 + (x) * 0x1000)
-#define DLB_CHP_LDB_CQ_TMR_THRESHOLD_RST 0x0
-union dlb_chp_ldb_cq_tmr_threshold {
-	struct {
-		u32 thrsh : 14;
-		u32 rsvd0 : 18;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_CQ_INT_ENB(x) \
-	(0xa0000120 + (x) * 0x1000)
-#define DLB_CHP_LDB_CQ_INT_ENB_RST 0x0
-union dlb_chp_ldb_cq_int_enb {
-	struct {
-		u32 en_tim : 1;
-		u32 en_depth : 1;
-		u32 rsvd0 : 30;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_CQ_INT_DEPTH_THRSH(x) \
-	(0xa000011c + (x) * 0x1000)
-#define DLB_CHP_LDB_CQ_INT_DEPTH_THRSH_RST 0x0
-union dlb_chp_ldb_cq_int_depth_thrsh {
-	struct {
-		u32 depth_threshold : 12;
-		u32 rsvd0 : 20;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_CQ_TKN_DEPTH_SEL(x) \
-	(0xa0000118 + (x) * 0x1000)
-#define DLB_CHP_LDB_CQ_TKN_DEPTH_SEL_RST 0x0
-union dlb_chp_ldb_cq_tkn_depth_sel {
-	struct {
-		u32 token_depth_select : 4;
-		u32 rsvd0 : 28;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_PP_LDB_MIN_CRD_QNT(x) \
-	(0xa0000114 + (x) * 0x1000)
-#define DLB_CHP_LDB_PP_LDB_MIN_CRD_QNT_RST 0x1
-union dlb_chp_ldb_pp_ldb_min_crd_qnt {
-	struct {
-		u32 quanta : 10;
-		u32 rsvd0 : 22;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_PP_DIR_MIN_CRD_QNT(x) \
-	(0xa0000110 + (x) * 0x1000)
-#define DLB_CHP_LDB_PP_DIR_MIN_CRD_QNT_RST 0x1
-union dlb_chp_ldb_pp_dir_min_crd_qnt {
-	struct {
-		u32 quanta : 10;
-		u32 rsvd0 : 22;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_PP_LDB_CRD_LWM(x) \
-	(0xa000010c + (x) * 0x1000)
-#define DLB_CHP_LDB_PP_LDB_CRD_LWM_RST 0x0
-union dlb_chp_ldb_pp_ldb_crd_lwm {
-	struct {
-		u32 lwm : 16;
-		u32 rsvd0 : 16;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_PP_LDB_CRD_HWM(x) \
-	(0xa0000108 + (x) * 0x1000)
-#define DLB_CHP_LDB_PP_LDB_CRD_HWM_RST 0x0
-union dlb_chp_ldb_pp_ldb_crd_hwm {
-	struct {
-		u32 hwm : 16;
-		u32 rsvd0 : 16;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_PP_DIR_CRD_LWM(x) \
-	(0xa0000104 + (x) * 0x1000)
-#define DLB_CHP_LDB_PP_DIR_CRD_LWM_RST 0x0
-union dlb_chp_ldb_pp_dir_crd_lwm {
-	struct {
-		u32 lwm : 14;
-		u32 rsvd0 : 18;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_PP_DIR_CRD_HWM(x) \
-	(0xa0000100 + (x) * 0x1000)
-#define DLB_CHP_LDB_PP_DIR_CRD_HWM_RST 0x0
-union dlb_chp_ldb_pp_dir_crd_hwm {
-	struct {
-		u32 hwm : 14;
-		u32 rsvd0 : 18;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_CQ_DEPTH(x) \
-	(0xa0000218 + (x) * 0x1000)
-#define DLB_CHP_DIR_CQ_DEPTH_RST 0x0
-union dlb_chp_dir_cq_depth {
-	struct {
-		u32 cq_depth : 11;
-		u32 rsvd0 : 21;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_CQ_WPTR(x) \
-	(0xa0000214 + (x) * 0x1000)
-#define DLB_CHP_DIR_CQ_WPTR_RST 0x0
-union dlb_chp_dir_cq_wptr {
-	struct {
-		u32 write_pointer : 10;
-		u32 rsvd0 : 22;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_PP_LDB_PUSH_PTR(x) \
-	(0xa0000210 + (x) * 0x1000)
-#define DLB_CHP_DIR_PP_LDB_PUSH_PTR_RST 0x0
-union dlb_chp_dir_pp_ldb_push_ptr {
-	struct {
-		u32 push_pointer : 16;
-		u32 rsvd0 : 16;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_PP_DIR_PUSH_PTR(x) \
-	(0xa000020c + (x) * 0x1000)
-#define DLB_CHP_DIR_PP_DIR_PUSH_PTR_RST 0x0
-union dlb_chp_dir_pp_dir_push_ptr {
-	struct {
-		u32 push_pointer : 16;
-		u32 rsvd0 : 16;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_PP_STATE_RESET(x) \
-	(0xa0000204 + (x) * 0x1000)
-#define DLB_CHP_DIR_PP_STATE_RESET_RST 0x0
-union dlb_chp_dir_pp_state_reset {
-	struct {
-		u32 rsvd1 : 7;
-		u32 dir_type : 1;
-		u32 rsvd0 : 23;
-		u32 reset_pp_state : 1;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_PP_CRD_REQ_STATE(x) \
-	(0xa0000200 + (x) * 0x1000)
-#define DLB_CHP_DIR_PP_CRD_REQ_STATE_RST 0x0
-union dlb_chp_dir_pp_crd_req_state {
-	struct {
-		u32 dir_crd_req_active_valid : 1;
-		u32 dir_crd_req_active_check : 1;
-		u32 dir_crd_req_active_busy : 1;
-		u32 rsvd1 : 1;
-		u32 ldb_crd_req_active_valid : 1;
-		u32 ldb_crd_req_active_check : 1;
-		u32 ldb_crd_req_active_busy : 1;
-		u32 rsvd0 : 1;
-		u32 no_pp_credit_update : 1;
-		u32 crd_req_state : 23;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_CQ_DEPTH(x) \
-	(0xa0000320 + (x) * 0x1000)
-#define DLB_CHP_LDB_CQ_DEPTH_RST 0x0
-union dlb_chp_ldb_cq_depth {
-	struct {
-		u32 depth : 11;
-		u32 reserved : 2;
-		u32 rsvd0 : 19;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_CQ_WPTR(x) \
-	(0xa000031c + (x) * 0x1000)
-#define DLB_CHP_LDB_CQ_WPTR_RST 0x0
-union dlb_chp_ldb_cq_wptr {
-	struct {
-		u32 write_pointer : 10;
-		u32 rsvd0 : 22;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_PP_LDB_PUSH_PTR(x) \
-	(0xa0000318 + (x) * 0x1000)
-#define DLB_CHP_LDB_PP_LDB_PUSH_PTR_RST 0x0
-union dlb_chp_ldb_pp_ldb_push_ptr {
-	struct {
-		u32 push_pointer : 16;
-		u32 rsvd0 : 16;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_PP_DIR_PUSH_PTR(x) \
-	(0xa0000314 + (x) * 0x1000)
-#define DLB_CHP_LDB_PP_DIR_PUSH_PTR_RST 0x0
-union dlb_chp_ldb_pp_dir_push_ptr {
-	struct {
-		u32 push_pointer : 16;
-		u32 rsvd0 : 16;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_HIST_LIST_POP_PTR(x) \
-	(0xa000030c + (x) * 0x1000)
-#define DLB_CHP_HIST_LIST_POP_PTR_RST 0x0
-union dlb_chp_hist_list_pop_ptr {
-	struct {
-		u32 pop_ptr : 13;
-		u32 generation : 1;
-		u32 rsvd0 : 18;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_HIST_LIST_PUSH_PTR(x) \
-	(0xa0000308 + (x) * 0x1000)
-#define DLB_CHP_HIST_LIST_PUSH_PTR_RST 0x0
-union dlb_chp_hist_list_push_ptr {
-	struct {
-		u32 push_ptr : 13;
-		u32 generation : 1;
-		u32 rsvd0 : 18;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_PP_STATE_RESET(x) \
-	(0xa0000304 + (x) * 0x1000)
-#define DLB_CHP_LDB_PP_STATE_RESET_RST 0x0
-union dlb_chp_ldb_pp_state_reset {
-	struct {
-		u32 rsvd1 : 7;
-		u32 dir_type : 1;
-		u32 rsvd0 : 23;
-		u32 reset_pp_state : 1;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_PP_CRD_REQ_STATE(x) \
-	(0xa0000300 + (x) * 0x1000)
-#define DLB_CHP_LDB_PP_CRD_REQ_STATE_RST 0x0
-union dlb_chp_ldb_pp_crd_req_state {
-	struct {
-		u32 dir_crd_req_active_valid : 1;
-		u32 dir_crd_req_active_check : 1;
-		u32 dir_crd_req_active_busy : 1;
-		u32 rsvd1 : 1;
-		u32 ldb_crd_req_active_valid : 1;
-		u32 ldb_crd_req_active_check : 1;
-		u32 ldb_crd_req_active_busy : 1;
-		u32 rsvd0 : 1;
-		u32 no_pp_credit_update : 1;
-		u32 crd_req_state : 23;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_ORD_QID_SN(x) \
-	(0xa0000408 + (x) * 0x1000)
-#define DLB_CHP_ORD_QID_SN_RST 0x0
-union dlb_chp_ord_qid_sn {
-	struct {
-		u32 sn : 12;
-		u32 rsvd0 : 20;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_ORD_QID_SN_MAP(x) \
-	(0xa0000404 + (x) * 0x1000)
-#define DLB_CHP_ORD_QID_SN_MAP_RST 0x0
-union dlb_chp_ord_qid_sn_map {
-	struct {
-		u32 mode : 3;
-		u32 slot : 5;
-		u32 grp : 2;
-		u32 rsvd0 : 22;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_POOL_CRD_CNT(x) \
-	(0xa000050c + (x) * 0x1000)
-#define DLB_CHP_LDB_POOL_CRD_CNT_RST 0x0
-union dlb_chp_ldb_pool_crd_cnt {
-	struct {
-		u32 count : 16;
-		u32 rsvd0 : 16;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_QED_FL_BASE(x) \
-	(0xa0000508 + (x) * 0x1000)
-#define DLB_CHP_QED_FL_BASE_RST 0x0
-union dlb_chp_qed_fl_base {
-	struct {
-		u32 base : 14;
-		u32 rsvd0 : 18;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_QED_FL_LIM(x) \
-	(0xa0000504 + (x) * 0x1000)
-#define DLB_CHP_QED_FL_LIM_RST 0x8000
-union dlb_chp_qed_fl_lim {
-	struct {
-		u32 limit : 14;
-		u32 rsvd1 : 1;
-		u32 freelist_disable : 1;
-		u32 rsvd0 : 16;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_POOL_CRD_LIM(x) \
-	(0xa0000500 + (x) * 0x1000)
-#define DLB_CHP_LDB_POOL_CRD_LIM_RST 0x0
-union dlb_chp_ldb_pool_crd_lim {
-	struct {
-		u32 limit : 16;
-		u32 rsvd0 : 16;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_QED_FL_POP_PTR(x) \
-	(0xa0000604 + (x) * 0x1000)
-#define DLB_CHP_QED_FL_POP_PTR_RST 0x0
-union dlb_chp_qed_fl_pop_ptr {
-	struct {
-		u32 pop_ptr : 14;
-		u32 reserved0 : 1;
-		u32 generation : 1;
-		u32 rsvd0 : 16;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_QED_FL_PUSH_PTR(x) \
-	(0xa0000600 + (x) * 0x1000)
-#define DLB_CHP_QED_FL_PUSH_PTR_RST 0x0
-union dlb_chp_qed_fl_push_ptr {
-	struct {
-		u32 push_ptr : 14;
-		u32 reserved0 : 1;
-		u32 generation : 1;
-		u32 rsvd0 : 16;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_POOL_CRD_CNT(x) \
-	(0xa000070c + (x) * 0x1000)
-#define DLB_CHP_DIR_POOL_CRD_CNT_RST 0x0
-union dlb_chp_dir_pool_crd_cnt {
-	struct {
-		u32 count : 14;
-		u32 rsvd0 : 18;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DQED_FL_BASE(x) \
-	(0xa0000708 + (x) * 0x1000)
-#define DLB_CHP_DQED_FL_BASE_RST 0x0
-union dlb_chp_dqed_fl_base {
-	struct {
-		u32 base : 12;
-		u32 rsvd0 : 20;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DQED_FL_LIM(x) \
-	(0xa0000704 + (x) * 0x1000)
-#define DLB_CHP_DQED_FL_LIM_RST 0x2000
-union dlb_chp_dqed_fl_lim {
-	struct {
-		u32 limit : 12;
-		u32 rsvd1 : 1;
-		u32 freelist_disable : 1;
-		u32 rsvd0 : 18;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_POOL_CRD_LIM(x) \
-	(0xa0000700 + (x) * 0x1000)
-#define DLB_CHP_DIR_POOL_CRD_LIM_RST 0x0
-union dlb_chp_dir_pool_crd_lim {
-	struct {
-		u32 limit : 14;
-		u32 rsvd0 : 18;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DQED_FL_POP_PTR(x) \
-	(0xa0000804 + (x) * 0x1000)
-#define DLB_CHP_DQED_FL_POP_PTR_RST 0x0
-union dlb_chp_dqed_fl_pop_ptr {
-	struct {
-		u32 pop_ptr : 12;
-		u32 reserved0 : 1;
-		u32 generation : 1;
-		u32 rsvd0 : 18;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DQED_FL_PUSH_PTR(x) \
-	(0xa0000800 + (x) * 0x1000)
-#define DLB_CHP_DQED_FL_PUSH_PTR_RST 0x0
-union dlb_chp_dqed_fl_push_ptr {
-	struct {
-		u32 push_ptr : 12;
-		u32 reserved0 : 1;
-		u32 generation : 1;
-		u32 rsvd0 : 18;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_CTRL_DIAG_02 0xa8000154
-#define DLB_CHP_CTRL_DIAG_02_RST 0x0
-union dlb_chp_ctrl_diag_02 {
-	struct {
-		u32 control : 32;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_CFG_CHP_CSR_CTRL 0xa8000130
-#define DLB_CHP_CFG_CHP_CSR_CTRL_RST 0xc0003fff
-#define DLB_CHP_CFG_EXCESS_TOKENS_SHIFT 12
-union dlb_chp_cfg_chp_csr_ctrl {
-	struct {
-		u32 int_inf_alarm_enable_0 : 1;
-		u32 int_inf_alarm_enable_1 : 1;
-		u32 int_inf_alarm_enable_2 : 1;
-		u32 int_inf_alarm_enable_3 : 1;
-		u32 int_inf_alarm_enable_4 : 1;
-		u32 int_inf_alarm_enable_5 : 1;
-		u32 int_inf_alarm_enable_6 : 1;
-		u32 int_inf_alarm_enable_7 : 1;
-		u32 int_inf_alarm_enable_8 : 1;
-		u32 int_inf_alarm_enable_9 : 1;
-		u32 int_inf_alarm_enable_10 : 1;
-		u32 int_inf_alarm_enable_11 : 1;
-		u32 int_inf_alarm_enable_12 : 1;
-		u32 int_cor_alarm_enable : 1;
-		u32 csr_control_spare : 14;
-		u32 cfg_vasr_dis : 1;
-		u32 counter_clear : 1;
-		u32 blk_cor_report : 1;
-		u32 blk_cor_synd : 1;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_CQ_INTR_ARMED1 0xa8000068
-#define DLB_CHP_LDB_CQ_INTR_ARMED1_RST 0x0
-union dlb_chp_ldb_cq_intr_armed1 {
-	struct {
-		u32 armed : 32;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_LDB_CQ_INTR_ARMED0 0xa8000064
-#define DLB_CHP_LDB_CQ_INTR_ARMED0_RST 0x0
-union dlb_chp_ldb_cq_intr_armed0 {
-	struct {
-		u32 armed : 32;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_CQ_INTR_ARMED3 0xa8000024
-#define DLB_CHP_DIR_CQ_INTR_ARMED3_RST 0x0
-union dlb_chp_dir_cq_intr_armed3 {
-	struct {
-		u32 armed : 32;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_CQ_INTR_ARMED2 0xa8000020
-#define DLB_CHP_DIR_CQ_INTR_ARMED2_RST 0x0
-union dlb_chp_dir_cq_intr_armed2 {
-	struct {
-		u32 armed : 32;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_CQ_INTR_ARMED1 0xa800001c
-#define DLB_CHP_DIR_CQ_INTR_ARMED1_RST 0x0
-union dlb_chp_dir_cq_intr_armed1 {
-	struct {
-		u32 armed : 32;
-	} field;
-	u32 val;
-};
-
-#define DLB_CHP_DIR_CQ_INTR_ARMED0 0xa8000018
-#define DLB_CHP_DIR_CQ_INTR_ARMED0_RST 0x0
-union dlb_chp_dir_cq_intr_armed0 {
-	struct {
-		u32 armed : 32;
-	} field;
-	u32 val;
-};
-
-#define DLB_CFG_MSTR_DIAG_RESET_STS 0xb8000004
-#define DLB_CFG_MSTR_DIAG_RESET_STS_RST 0x1ff
-union dlb_cfg_mstr_diag_reset_sts {
-	struct {
-		u32 chp_pf_reset_done : 1;
-		u32 rop_pf_reset_done : 1;
-		u32 lsp_pf_reset_done : 1;
-		u32 nalb_pf_reset_done : 1;
-		u32 ap_pf_reset_done : 1;
-		u32 dp_pf_reset_done : 1;
-		u32 qed_pf_reset_done : 1;
-		u32 dqed_pf_reset_done : 1;
-		u32 aqed_pf_reset_done : 1;
-		u32 rsvd1 : 6;
-		u32 pf_reset_active : 1;
-		u32 chp_vf_reset_done : 1;
-		u32 rop_vf_reset_done : 1;
-		u32 lsp_vf_reset_done : 1;
-		u32 nalb_vf_reset_done : 1;
-		u32 ap_vf_reset_done : 1;
-		u32 dp_vf_reset_done : 1;
-		u32 qed_vf_reset_done : 1;
-		u32 dqed_vf_reset_done : 1;
-		u32 aqed_vf_reset_done : 1;
-		u32 rsvd0 : 6;
-		u32 vf_reset_active : 1;
-	} field;
-	u32 val;
-};
-
-#define DLB_CFG_MSTR_BCAST_RESET_VF_START 0xc8100000
-#define DLB_CFG_MSTR_BCAST_RESET_VF_START_RST 0x0
-/* HW Reset Types */
-#define VF_RST_TYPE_CQ_LDB   0
-#define VF_RST_TYPE_QID_LDB  1
-#define VF_RST_TYPE_POOL_LDB 2
-#define VF_RST_TYPE_CQ_DIR   8
-#define VF_RST_TYPE_QID_DIR  9
-#define VF_RST_TYPE_POOL_DIR 10
-union dlb_cfg_mstr_bcast_reset_vf_start {
-	struct {
-		u32 vf_reset_start : 1;
-		u32 reserved : 3;
-		u32 vf_reset_type : 4;
-		u32 vf_reset_id : 24;
-	} field;
-	u32 val;
-};
-
-#endif /* __DLB_REGS_H */
diff --git a/drivers/event/dlb/pf/base/dlb_resource.c b/drivers/event/dlb/pf/base/dlb_resource.c
deleted file mode 100644
index 4984de5d3..000000000
--- a/drivers/event/dlb/pf/base/dlb_resource.c
+++ /dev/null
@@ -1,6904 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#include "dlb_hw_types.h"
-#include "../../dlb_user.h"
-#include "dlb_resource.h"
-#include "dlb_osdep.h"
-#include "dlb_osdep_bitmap.h"
-#include "dlb_osdep_types.h"
-#include "dlb_regs.h"
-#include "../../dlb_priv.h"
-#include "../../dlb_inline_fns.h"
-
-#define DLB_DOM_LIST_HEAD(head, type) \
-	DLB_LIST_HEAD((head), type, domain_list)
-
-#define DLB_FUNC_LIST_HEAD(head, type) \
-	DLB_LIST_HEAD((head), type, func_list)
-
-#define DLB_DOM_LIST_FOR(head, ptr, iter) \
-	DLB_LIST_FOR_EACH(head, ptr, domain_list, iter)
-
-#define DLB_FUNC_LIST_FOR(head, ptr, iter) \
-	DLB_LIST_FOR_EACH(head, ptr, func_list, iter)
-
-#define DLB_DOM_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
-	DLB_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, domain_list, it, it_tmp)
-
-#define DLB_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
-	DLB_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)
-
-static inline void dlb_flush_csr(struct dlb_hw *hw)
-{
-	DLB_CSR_RD(hw, DLB_SYS_TOTAL_VAS);
-}
-
-static void dlb_init_fn_rsrc_lists(struct dlb_function_resources *rsrc)
-{
-	dlb_list_init_head(&rsrc->avail_domains);
-	dlb_list_init_head(&rsrc->used_domains);
-	dlb_list_init_head(&rsrc->avail_ldb_queues);
-	dlb_list_init_head(&rsrc->avail_ldb_ports);
-	dlb_list_init_head(&rsrc->avail_dir_pq_pairs);
-	dlb_list_init_head(&rsrc->avail_ldb_credit_pools);
-	dlb_list_init_head(&rsrc->avail_dir_credit_pools);
-}
-
-static void dlb_init_domain_rsrc_lists(struct dlb_domain *domain)
-{
-	dlb_list_init_head(&domain->used_ldb_queues);
-	dlb_list_init_head(&domain->used_ldb_ports);
-	dlb_list_init_head(&domain->used_dir_pq_pairs);
-	dlb_list_init_head(&domain->used_ldb_credit_pools);
-	dlb_list_init_head(&domain->used_dir_credit_pools);
-	dlb_list_init_head(&domain->avail_ldb_queues);
-	dlb_list_init_head(&domain->avail_ldb_ports);
-	dlb_list_init_head(&domain->avail_dir_pq_pairs);
-	dlb_list_init_head(&domain->avail_ldb_credit_pools);
-	dlb_list_init_head(&domain->avail_dir_credit_pools);
-}
-
-int dlb_resource_init(struct dlb_hw *hw)
-{
-	struct dlb_list_entry *list;
-	unsigned int i;
-
-	/* For optimal load-balancing, ports that map to one or more QIDs in
-	 * common should not be in numerical sequence. This is application
-	 * dependent, but the driver interleaves port IDs as much as possible
-	 * to reduce the likelihood of this. This initial allocation maximizes
-	 * the average distance between an ID and its immediate neighbors (i.e.
-	 * the distance from 1 to 0 and to 2, the distance from 2 to 1 and to
-	 * 3, etc.).
-	 */
-	u32 init_ldb_port_allocation[DLB_MAX_NUM_LDB_PORTS] = {
-		0,  31, 62, 29, 60, 27, 58, 25, 56, 23, 54, 21, 52, 19, 50, 17,
-		48, 15, 46, 13, 44, 11, 42,  9, 40,  7, 38,  5, 36,  3, 34, 1,
-		32, 63, 30, 61, 28, 59, 26, 57, 24, 55, 22, 53, 20, 51, 18, 49,
-		16, 47, 14, 45, 12, 43, 10, 41,  8, 39,  6, 37,  4, 35,  2, 33
-	};
-
-	/* Zero-out resource tracking data structures */
-	memset(&hw->rsrcs, 0, sizeof(hw->rsrcs));
-	memset(&hw->pf, 0, sizeof(hw->pf));
-
-	dlb_init_fn_rsrc_lists(&hw->pf);
-
-	for (i = 0; i < DLB_MAX_NUM_DOMAINS; i++) {
-		memset(&hw->domains[i], 0, sizeof(hw->domains[i]));
-		dlb_init_domain_rsrc_lists(&hw->domains[i]);
-		hw->domains[i].parent_func = &hw->pf;
-	}
-
-	/* Give all resources to the PF driver */
-	hw->pf.num_avail_domains = DLB_MAX_NUM_DOMAINS;
-	for (i = 0; i < hw->pf.num_avail_domains; i++) {
-		list = &hw->domains[i].func_list;
-
-		dlb_list_add(&hw->pf.avail_domains, list);
-	}
-
-	hw->pf.num_avail_ldb_queues = DLB_MAX_NUM_LDB_QUEUES;
-	for (i = 0; i < hw->pf.num_avail_ldb_queues; i++) {
-		list = &hw->rsrcs.ldb_queues[i].func_list;
-
-		dlb_list_add(&hw->pf.avail_ldb_queues, list);
-	}
-
-	hw->pf.num_avail_ldb_ports = DLB_MAX_NUM_LDB_PORTS;
-	for (i = 0; i < hw->pf.num_avail_ldb_ports; i++) {
-		struct dlb_ldb_port *port;
-
-		port = &hw->rsrcs.ldb_ports[init_ldb_port_allocation[i]];
-
-		dlb_list_add(&hw->pf.avail_ldb_ports, &port->func_list);
-	}
-
-	hw->pf.num_avail_dir_pq_pairs = DLB_MAX_NUM_DIR_PORTS;
-	for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) {
-		list = &hw->rsrcs.dir_pq_pairs[i].func_list;
-
-		dlb_list_add(&hw->pf.avail_dir_pq_pairs, list);
-	}
-
-	hw->pf.num_avail_ldb_credit_pools = DLB_MAX_NUM_LDB_CREDIT_POOLS;
-	for (i = 0; i < hw->pf.num_avail_ldb_credit_pools; i++) {
-		list = &hw->rsrcs.ldb_credit_pools[i].func_list;
-
-		dlb_list_add(&hw->pf.avail_ldb_credit_pools, list);
-	}
-
-	hw->pf.num_avail_dir_credit_pools = DLB_MAX_NUM_DIR_CREDIT_POOLS;
-	for (i = 0; i < hw->pf.num_avail_dir_credit_pools; i++) {
-		list = &hw->rsrcs.dir_credit_pools[i].func_list;
-
-		dlb_list_add(&hw->pf.avail_dir_credit_pools, list);
-	}
-
-	/* There are 5120 history list entries, which allows us to overprovision
-	 * the inflight limit (4096) by 1k.
-	 */
-	if (dlb_bitmap_alloc(hw,
-			     &hw->pf.avail_hist_list_entries,
-			     DLB_MAX_NUM_HIST_LIST_ENTRIES))
-		return -1;
-
-	if (dlb_bitmap_fill(hw->pf.avail_hist_list_entries))
-		return -1;
-
-	if (dlb_bitmap_alloc(hw,
-			     &hw->pf.avail_qed_freelist_entries,
-			     DLB_MAX_NUM_LDB_CREDITS))
-		return -1;
-
-	if (dlb_bitmap_fill(hw->pf.avail_qed_freelist_entries))
-		return -1;
-
-	if (dlb_bitmap_alloc(hw,
-			     &hw->pf.avail_dqed_freelist_entries,
-			     DLB_MAX_NUM_DIR_CREDITS))
-		return -1;
-
-	if (dlb_bitmap_fill(hw->pf.avail_dqed_freelist_entries))
-		return -1;
-
-	if (dlb_bitmap_alloc(hw,
-			     &hw->pf.avail_aqed_freelist_entries,
-			     DLB_MAX_NUM_AQOS_ENTRIES))
-		return -1;
-
-	if (dlb_bitmap_fill(hw->pf.avail_aqed_freelist_entries))
-		return -1;
-
-	/* Initialize the hardware resource IDs */
-	for (i = 0; i < DLB_MAX_NUM_DOMAINS; i++)
-		hw->domains[i].id = i;
-
-	for (i = 0; i < DLB_MAX_NUM_LDB_QUEUES; i++)
-		hw->rsrcs.ldb_queues[i].id = i;
-
-	for (i = 0; i < DLB_MAX_NUM_LDB_PORTS; i++)
-		hw->rsrcs.ldb_ports[i].id = i;
-
-	for (i = 0; i < DLB_MAX_NUM_DIR_PORTS; i++)
-		hw->rsrcs.dir_pq_pairs[i].id = i;
-
-	for (i = 0; i < DLB_MAX_NUM_LDB_CREDIT_POOLS; i++)
-		hw->rsrcs.ldb_credit_pools[i].id = i;
-
-	for (i = 0; i < DLB_MAX_NUM_DIR_CREDIT_POOLS; i++)
-		hw->rsrcs.dir_credit_pools[i].id = i;
-
-	for (i = 0; i < DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
-		hw->rsrcs.sn_groups[i].id = i;
-		/* Default mode (0) is 32 sequence numbers per queue */
-		hw->rsrcs.sn_groups[i].mode = 0;
-		hw->rsrcs.sn_groups[i].sequence_numbers_per_queue = 32;
-		hw->rsrcs.sn_groups[i].slot_use_bitmap = 0;
-	}
-
-	return 0;
-}
-
-void dlb_resource_free(struct dlb_hw *hw)
-{
-	dlb_bitmap_free(hw->pf.avail_hist_list_entries);
-
-	dlb_bitmap_free(hw->pf.avail_qed_freelist_entries);
-
-	dlb_bitmap_free(hw->pf.avail_dqed_freelist_entries);
-
-	dlb_bitmap_free(hw->pf.avail_aqed_freelist_entries);
-}
-
-static struct dlb_domain *dlb_get_domain_from_id(struct dlb_hw *hw, u32 id)
-{
-	if (id >= DLB_MAX_NUM_DOMAINS)
-		return NULL;
-
-	return &hw->domains[id];
-}
-
-static int dlb_attach_ldb_queues(struct dlb_hw *hw,
-				 struct dlb_function_resources *rsrcs,
-				 struct dlb_domain *domain,
-				 u32 num_queues,
-				 struct dlb_cmd_response *resp)
-{
-	unsigned int i, j;
-
-	if (rsrcs->num_avail_ldb_queues < num_queues) {
-		resp->status = DLB_ST_LDB_QUEUES_UNAVAILABLE;
-		return -1;
-	}
-
-	for (i = 0; i < num_queues; i++) {
-		struct dlb_ldb_queue *queue;
-
-		queue = DLB_FUNC_LIST_HEAD(rsrcs->avail_ldb_queues,
-					   typeof(*queue));
-		if (queue == NULL) {
-			DLB_HW_ERR(hw,
-				   "[%s()] Internal error: domain validation failed\n",
-				   __func__);
-			goto cleanup;
-		}
-
-		dlb_list_del(&rsrcs->avail_ldb_queues, &queue->func_list);
-
-		queue->domain_id = domain->id;
-		queue->owned = true;
-
-		dlb_list_add(&domain->avail_ldb_queues, &queue->domain_list);
-	}
-
-	rsrcs->num_avail_ldb_queues -= num_queues;
-
-	return 0;
-
-cleanup:
-
-	/* Return the assigned queues */
-	for (j = 0; j < i; j++) {
-		struct dlb_ldb_queue *queue;
-
-		queue = DLB_FUNC_LIST_HEAD(domain->avail_ldb_queues,
-					   typeof(*queue));
-		/* Unrecoverable internal error */
-		if (queue == NULL)
-			break;
-
-		queue->owned = false;
-
-		dlb_list_del(&domain->avail_ldb_queues, &queue->domain_list);
-
-		dlb_list_add(&rsrcs->avail_ldb_queues, &queue->func_list);
-	}
-
-	return -EFAULT;
-}
-
-static struct dlb_ldb_port *
-dlb_get_next_ldb_port(struct dlb_hw *hw,
-		      struct dlb_function_resources *rsrcs,
-		      u32 domain_id)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_ldb_port *port;
-
-	/* To reduce the odds of consecutive load-balanced ports mapping to the
-	 * same queue(s), the driver attempts to allocate ports whose neighbors
-	 * are owned by a different domain.
-	 */
-	DLB_FUNC_LIST_FOR(rsrcs->avail_ldb_ports, port, iter) {
-		u32 next, prev;
-		u32 phys_id;
-
-		phys_id = port->id;
-		next = phys_id + 1;
-		prev = phys_id - 1;
-
-		if (phys_id == DLB_MAX_NUM_LDB_PORTS - 1)
-			next = 0;
-		if (phys_id == 0)
-			prev = DLB_MAX_NUM_LDB_PORTS - 1;
-
-		if (!hw->rsrcs.ldb_ports[next].owned ||
-		    hw->rsrcs.ldb_ports[next].domain_id == domain_id)
-			continue;
-
-		if (!hw->rsrcs.ldb_ports[prev].owned ||
-		    hw->rsrcs.ldb_ports[prev].domain_id == domain_id)
-			continue;
-
-		return port;
-	}
-
-	/* Failing that, the driver looks for a port with one neighbor owned by
-	 * a different domain and the other unallocated.
-	 */
-	DLB_FUNC_LIST_FOR(rsrcs->avail_ldb_ports, port, iter) {
-		u32 next, prev;
-		u32 phys_id;
-
-		phys_id = port->id;
-		next = phys_id + 1;
-		prev = phys_id - 1;
-
-		if (phys_id == DLB_MAX_NUM_LDB_PORTS - 1)
-			next = 0;
-		if (phys_id == 0)
-			prev = DLB_MAX_NUM_LDB_PORTS - 1;
-
-		if (!hw->rsrcs.ldb_ports[prev].owned &&
-		    hw->rsrcs.ldb_ports[next].owned &&
-		    hw->rsrcs.ldb_ports[next].domain_id != domain_id)
-			return port;
-
-		if (!hw->rsrcs.ldb_ports[next].owned &&
-		    hw->rsrcs.ldb_ports[prev].owned &&
-		    hw->rsrcs.ldb_ports[prev].domain_id != domain_id)
-			return port;
-	}
-
-	/* Failing that, the driver looks for a port with both neighbors
-	 * unallocated.
-	 */
-	DLB_FUNC_LIST_FOR(rsrcs->avail_ldb_ports, port, iter) {
-		u32 next, prev;
-		u32 phys_id;
-
-		phys_id = port->id;
-		next = phys_id + 1;
-		prev = phys_id - 1;
-
-		if (phys_id == DLB_MAX_NUM_LDB_PORTS - 1)
-			next = 0;
-		if (phys_id == 0)
-			prev = DLB_MAX_NUM_LDB_PORTS - 1;
-
-		if (!hw->rsrcs.ldb_ports[prev].owned &&
-		    !hw->rsrcs.ldb_ports[next].owned)
-			return port;
-	}
-
-	/* If all else fails, the driver returns the next available port. */
-	return DLB_FUNC_LIST_HEAD(rsrcs->avail_ldb_ports, typeof(*port));
-}
-
-static int dlb_attach_ldb_ports(struct dlb_hw *hw,
-				struct dlb_function_resources *rsrcs,
-				struct dlb_domain *domain,
-				u32 num_ports,
-				struct dlb_cmd_response *resp)
-{
-	unsigned int i, j;
-
-	if (rsrcs->num_avail_ldb_ports < num_ports) {
-		resp->status = DLB_ST_LDB_PORTS_UNAVAILABLE;
-		return -1;
-	}
-
-	for (i = 0; i < num_ports; i++) {
-		struct dlb_ldb_port *port;
-
-		port = dlb_get_next_ldb_port(hw, rsrcs, domain->id);
-
-		if (port == NULL) {
-			DLB_HW_ERR(hw,
-				   "[%s()] Internal error: domain validation failed\n",
-				   __func__);
-			goto cleanup;
-		}
-
-		dlb_list_del(&rsrcs->avail_ldb_ports, &port->func_list);
-
-		port->domain_id = domain->id;
-		port->owned = true;
-
-		dlb_list_add(&domain->avail_ldb_ports, &port->domain_list);
-	}
-
-	rsrcs->num_avail_ldb_ports -= num_ports;
-
-	return 0;
-
-cleanup:
-
-	/* Return the assigned ports */
-	for (j = 0; j < i; j++) {
-		struct dlb_ldb_port *port;
-
-		port = DLB_FUNC_LIST_HEAD(domain->avail_ldb_ports,
-					  typeof(*port));
-		/* Unrecoverable internal error */
-		if (port == NULL)
-			break;
-
-		port->owned = false;
-
-		dlb_list_del(&domain->avail_ldb_ports, &port->domain_list);
-
-		dlb_list_add(&rsrcs->avail_ldb_ports, &port->func_list);
-	}
-
-	return -EFAULT;
-}
-
-static int dlb_attach_dir_ports(struct dlb_hw *hw,
-				struct dlb_function_resources *rsrcs,
-				struct dlb_domain *domain,
-				u32 num_ports,
-				struct dlb_cmd_response *resp)
-{
-	unsigned int i, j;
-
-	if (rsrcs->num_avail_dir_pq_pairs < num_ports) {
-		resp->status = DLB_ST_DIR_PORTS_UNAVAILABLE;
-		return -1;
-	}
-
-	for (i = 0; i < num_ports; i++) {
-		struct dlb_dir_pq_pair *port;
-
-		port = DLB_FUNC_LIST_HEAD(rsrcs->avail_dir_pq_pairs,
-					  typeof(*port));
-		if (port == NULL) {
-			DLB_HW_ERR(hw,
-				   "[%s()] Internal error: domain validation failed\n",
-				   __func__);
-			goto cleanup;
-		}
-
-		dlb_list_del(&rsrcs->avail_dir_pq_pairs, &port->func_list);
-
-		port->domain_id = domain->id;
-		port->owned = true;
-
-		dlb_list_add(&domain->avail_dir_pq_pairs, &port->domain_list);
-	}
-
-	rsrcs->num_avail_dir_pq_pairs -= num_ports;
-
-	return 0;
-
-cleanup:
-
-	/* Return the assigned ports */
-	for (j = 0; j < i; j++) {
-		struct dlb_dir_pq_pair *port;
-
-		port = DLB_FUNC_LIST_HEAD(domain->avail_dir_pq_pairs,
-					  typeof(*port));
-		/* Unrecoverable internal error */
-		if (port == NULL)
-			break;
-
-		port->owned = false;
-
-		dlb_list_del(&domain->avail_dir_pq_pairs, &port->domain_list);
-
-		dlb_list_add(&rsrcs->avail_dir_pq_pairs, &port->func_list);
-	}
-
-	return -EFAULT;
-}
-
-static int dlb_attach_ldb_credits(struct dlb_function_resources *rsrcs,
-				  struct dlb_domain *domain,
-				  u32 num_credits,
-				  struct dlb_cmd_response *resp)
-{
-	struct dlb_bitmap *bitmap = rsrcs->avail_qed_freelist_entries;
-
-	if (dlb_bitmap_count(bitmap) < (int)num_credits) {
-		resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;
-		return -1;
-	}
-
-	if (num_credits) {
-		int base;
-
-		base = dlb_bitmap_find_set_bit_range(bitmap, num_credits);
-		if (base < 0)
-			goto error;
-
-		domain->qed_freelist.base = base;
-		domain->qed_freelist.bound = base + num_credits;
-		domain->qed_freelist.offset = 0;
-
-		dlb_bitmap_clear_range(bitmap, base, num_credits);
-	}
-
-	return 0;
-
-error:
-	resp->status = DLB_ST_QED_FREELIST_ENTRIES_UNAVAILABLE;
-	return -1;
-}
-
-static int dlb_attach_dir_credits(struct dlb_function_resources *rsrcs,
-				  struct dlb_domain *domain,
-				  u32 num_credits,
-				  struct dlb_cmd_response *resp)
-{
-	struct dlb_bitmap *bitmap = rsrcs->avail_dqed_freelist_entries;
-
-	if (dlb_bitmap_count(bitmap) < (int)num_credits) {
-		resp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;
-		return -1;
-	}
-
-	if (num_credits) {
-		int base;
-
-		base = dlb_bitmap_find_set_bit_range(bitmap, num_credits);
-		if (base < 0)
-			goto error;
-
-		domain->dqed_freelist.base = base;
-		domain->dqed_freelist.bound = base + num_credits;
-		domain->dqed_freelist.offset = 0;
-
-		dlb_bitmap_clear_range(bitmap, base, num_credits);
-	}
-
-	return 0;
-
-error:
-	resp->status = DLB_ST_DQED_FREELIST_ENTRIES_UNAVAILABLE;
-	return -1;
-}
-
-static int dlb_attach_ldb_credit_pools(struct dlb_hw *hw,
-				       struct dlb_function_resources *rsrcs,
-				       struct dlb_domain *domain,
-				       u32 num_credit_pools,
-				       struct dlb_cmd_response *resp)
-{
-	unsigned int i, j;
-
-	if (rsrcs->num_avail_ldb_credit_pools < num_credit_pools) {
-		resp->status = DLB_ST_LDB_CREDIT_POOLS_UNAVAILABLE;
-		return -1;
-	}
-
-	for (i = 0; i < num_credit_pools; i++) {
-		struct dlb_credit_pool *pool;
-
-		pool = DLB_FUNC_LIST_HEAD(rsrcs->avail_ldb_credit_pools,
-					  typeof(*pool));
-		if (pool == NULL) {
-			DLB_HW_ERR(hw,
-				   "[%s()] Internal error: domain validation failed\n",
-				   __func__);
-			goto cleanup;
-		}
-
-		dlb_list_del(&rsrcs->avail_ldb_credit_pools,
-			     &pool->func_list);
-
-		pool->domain_id = domain->id;
-		pool->owned = true;
-
-		dlb_list_add(&domain->avail_ldb_credit_pools,
-			     &pool->domain_list);
-	}
-
-	rsrcs->num_avail_ldb_credit_pools -= num_credit_pools;
-
-	return 0;
-
-cleanup:
-
-	/* Return the assigned credit pools */
-	for (j = 0; j < i; j++) {
-		struct dlb_credit_pool *pool;
-
-		pool = DLB_FUNC_LIST_HEAD(domain->avail_ldb_credit_pools,
-					  typeof(*pool));
-		/* Unrecoverable internal error */
-		if (pool == NULL)
-			break;
-
-		pool->owned = false;
-
-		dlb_list_del(&domain->avail_ldb_credit_pools,
-			     &pool->domain_list);
-
-		dlb_list_add(&rsrcs->avail_ldb_credit_pools,
-			     &pool->func_list);
-	}
-
-	return -EFAULT;
-}
-
-static int dlb_attach_dir_credit_pools(struct dlb_hw *hw,
-				       struct dlb_function_resources *rsrcs,
-				       struct dlb_domain *domain,
-				       u32 num_credit_pools,
-				       struct dlb_cmd_response *resp)
-{
-	unsigned int i, j;
-
-	if (rsrcs->num_avail_dir_credit_pools < num_credit_pools) {
-		resp->status = DLB_ST_DIR_CREDIT_POOLS_UNAVAILABLE;
-		return -1;
-	}
-
-	for (i = 0; i < num_credit_pools; i++) {
-		struct dlb_credit_pool *pool;
-
-		pool = DLB_FUNC_LIST_HEAD(rsrcs->avail_dir_credit_pools,
-					  typeof(*pool));
-		if (pool == NULL) {
-			DLB_HW_ERR(hw,
-				   "[%s()] Internal error: domain validation failed\n",
-				   __func__);
-			goto cleanup;
-		}
-
-		dlb_list_del(&rsrcs->avail_dir_credit_pools,
-			     &pool->func_list);
-
-		pool->domain_id = domain->id;
-		pool->owned = true;
-
-		dlb_list_add(&domain->avail_dir_credit_pools,
-			     &pool->domain_list);
-	}
-
-	rsrcs->num_avail_dir_credit_pools -= num_credit_pools;
-
-	return 0;
-
-cleanup:
-
-	/* Return the assigned credit pools */
-	for (j = 0; j < i; j++) {
-		struct dlb_credit_pool *pool;
-
-		pool = DLB_FUNC_LIST_HEAD(domain->avail_dir_credit_pools,
-					  typeof(*pool));
-		/* Unrecoverable internal error */
-		if (pool == NULL)
-			break;
-
-		pool->owned = false;
-
-		dlb_list_del(&domain->avail_dir_credit_pools,
-			     &pool->domain_list);
-
-		dlb_list_add(&rsrcs->avail_dir_credit_pools,
-			     &pool->func_list);
-	}
-
-	return -EFAULT;
-}
-
-static int
-dlb_attach_domain_hist_list_entries(struct dlb_function_resources *rsrcs,
-				    struct dlb_domain *domain,
-				    u32 num_hist_list_entries,
-				    struct dlb_cmd_response *resp)
-{
-	struct dlb_bitmap *bitmap;
-	int base;
-
-	if (num_hist_list_entries) {
-		bitmap = rsrcs->avail_hist_list_entries;
-
-		base = dlb_bitmap_find_set_bit_range(bitmap,
-						     num_hist_list_entries);
-		if (base < 0)
-			goto error;
-
-		domain->total_hist_list_entries = num_hist_list_entries;
-		domain->avail_hist_list_entries = num_hist_list_entries;
-		domain->hist_list_entry_base = base;
-		domain->hist_list_entry_offset = 0;
-
-		dlb_bitmap_clear_range(bitmap, base, num_hist_list_entries);
-	}
-	return 0;
-
-error:
-	resp->status = DLB_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
-	return -1;
-}
-
-static int dlb_attach_atomic_inflights(struct dlb_function_resources *rsrcs,
-				       struct dlb_domain *domain,
-				       u32 num_atomic_inflights,
-				       struct dlb_cmd_response *resp)
-{
-	if (num_atomic_inflights) {
-		struct dlb_bitmap *bitmap =
-			rsrcs->avail_aqed_freelist_entries;
-		int base;
-
-		base = dlb_bitmap_find_set_bit_range(bitmap,
-						     num_atomic_inflights);
-		if (base < 0)
-			goto error;
-
-		domain->aqed_freelist.base = base;
-		domain->aqed_freelist.bound = base + num_atomic_inflights;
-		domain->aqed_freelist.offset = 0;
-
-		dlb_bitmap_clear_range(bitmap, base, num_atomic_inflights);
-	}
-
-	return 0;
-
-error:
-	resp->status = DLB_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
-	return -1;
-}
-
-
-static int
-dlb_domain_attach_resources(struct dlb_hw *hw,
-			    struct dlb_function_resources *rsrcs,
-			    struct dlb_domain *domain,
-			    struct dlb_create_sched_domain_args *args,
-			    struct dlb_cmd_response *resp)
-{
-	int ret;
-
-	ret = dlb_attach_ldb_queues(hw,
-				    rsrcs,
-				    domain,
-				    args->num_ldb_queues,
-				    resp);
-	if (ret < 0)
-		return ret;
-
-	ret = dlb_attach_ldb_ports(hw,
-				   rsrcs,
-				   domain,
-				   args->num_ldb_ports,
-				   resp);
-	if (ret < 0)
-		return ret;
-
-	ret = dlb_attach_dir_ports(hw,
-				   rsrcs,
-				   domain,
-				   args->num_dir_ports,
-				   resp);
-	if (ret < 0)
-		return ret;
-
-	ret = dlb_attach_ldb_credits(rsrcs,
-				     domain,
-				     args->num_ldb_credits,
-				     resp);
-	if (ret < 0)
-		return ret;
-
-	ret = dlb_attach_dir_credits(rsrcs,
-				     domain,
-				     args->num_dir_credits,
-				     resp);
-	if (ret < 0)
-		return ret;
-
-	ret = dlb_attach_ldb_credit_pools(hw,
-					  rsrcs,
-					  domain,
-					  args->num_ldb_credit_pools,
-					  resp);
-	if (ret < 0)
-		return ret;
-
-	ret = dlb_attach_dir_credit_pools(hw,
-					  rsrcs,
-					  domain,
-					  args->num_dir_credit_pools,
-					  resp);
-	if (ret < 0)
-		return ret;
-
-	ret = dlb_attach_domain_hist_list_entries(rsrcs,
-						  domain,
-						  args->num_hist_list_entries,
-						  resp);
-	if (ret < 0)
-		return ret;
-
-	ret = dlb_attach_atomic_inflights(rsrcs,
-					  domain,
-					  args->num_atomic_inflights,
-					  resp);
-	if (ret < 0)
-		return ret;
-
-	domain->configured = true;
-
-	domain->started = false;
-
-	rsrcs->num_avail_domains--;
-
-	return 0;
-}
-
-static void dlb_ldb_port_cq_enable(struct dlb_hw *hw,
-				   struct dlb_ldb_port *port)
-{
-	union dlb_lsp_cq_ldb_dsbl reg;
-
-	/* Don't re-enable the port if a removal is pending. The caller should
-	 * mark this port as enabled (if it isn't already), and when the
-	 * removal completes the port will be enabled.
-	 */
-	if (port->num_pending_removals)
-		return;
-
-	reg.field.disabled = 0;
-
-	DLB_CSR_WR(hw, DLB_LSP_CQ_LDB_DSBL(port->id), reg.val);
-
-	dlb_flush_csr(hw);
-}
-
-static void dlb_dir_port_cq_enable(struct dlb_hw *hw,
-				   struct dlb_dir_pq_pair *port)
-{
-	union dlb_lsp_cq_dir_dsbl reg;
-
-	reg.field.disabled = 0;
-
-	DLB_CSR_WR(hw, DLB_LSP_CQ_DIR_DSBL(port->id), reg.val);
-
-	dlb_flush_csr(hw);
-}
-
-
-static void dlb_ldb_port_cq_disable(struct dlb_hw *hw,
-				    struct dlb_ldb_port *port)
-{
-	union dlb_lsp_cq_ldb_dsbl reg;
-
-	reg.field.disabled = 1;
-
-	DLB_CSR_WR(hw, DLB_LSP_CQ_LDB_DSBL(port->id), reg.val);
-
-	dlb_flush_csr(hw);
-}
-
-static void dlb_dir_port_cq_disable(struct dlb_hw *hw,
-				    struct dlb_dir_pq_pair *port)
-{
-	union dlb_lsp_cq_dir_dsbl reg;
-
-	reg.field.disabled = 1;
-
-	DLB_CSR_WR(hw, DLB_LSP_CQ_DIR_DSBL(port->id), reg.val);
-
-	dlb_flush_csr(hw);
-}
-
-
-
-void dlb_disable_dp_vasr_feature(struct dlb_hw *hw)
-{
-	union dlb_dp_dir_csr_ctrl r0;
-
-	r0.val = DLB_CSR_RD(hw, DLB_DP_DIR_CSR_CTRL);
-
-	r0.field.cfg_vasr_dis = 1;
-
-	DLB_CSR_WR(hw, DLB_DP_DIR_CSR_CTRL, r0.val);
-}
-
-void dlb_enable_excess_tokens_alarm(struct dlb_hw *hw)
-{
-	union dlb_chp_cfg_chp_csr_ctrl r0;
-
-	r0.val = DLB_CSR_RD(hw, DLB_CHP_CFG_CHP_CSR_CTRL);
-
-	r0.val |= 1 << DLB_CHP_CFG_EXCESS_TOKENS_SHIFT;
-
-	DLB_CSR_WR(hw, DLB_CHP_CFG_CHP_CSR_CTRL, r0.val);
-}
-
-void dlb_hw_enable_sparse_ldb_cq_mode(struct dlb_hw *hw)
-{
-	union dlb_sys_cq_mode r0;
-
-	r0.val = DLB_CSR_RD(hw, DLB_SYS_CQ_MODE);
-
-	r0.field.ldb_cq64 = 1;
-
-	DLB_CSR_WR(hw, DLB_SYS_CQ_MODE, r0.val);
-}
-
-void dlb_hw_enable_sparse_dir_cq_mode(struct dlb_hw *hw)
-{
-	union dlb_sys_cq_mode r0;
-
-	r0.val = DLB_CSR_RD(hw, DLB_SYS_CQ_MODE);
-
-	r0.field.dir_cq64 = 1;
-
-	DLB_CSR_WR(hw, DLB_SYS_CQ_MODE, r0.val);
-}
-
-void dlb_hw_disable_pf_to_vf_isr_pend_err(struct dlb_hw *hw)
-{
-	union dlb_sys_sys_alarm_int_enable r0;
-
-	r0.val = DLB_CSR_RD(hw, DLB_SYS_SYS_ALARM_INT_ENABLE);
-
-	r0.field.pf_to_vf_isr_pend_error = 0;
-
-	DLB_CSR_WR(hw, DLB_SYS_SYS_ALARM_INT_ENABLE, r0.val);
-}
-
-static unsigned int
-dlb_get_num_ports_in_use(struct dlb_hw *hw)
-{
-	unsigned int i, n = 0;
-
-	for (i = 0; i < DLB_MAX_NUM_LDB_PORTS; i++)
-		if (hw->rsrcs.ldb_ports[i].owned)
-			n++;
-
-	for (i = 0; i < DLB_MAX_NUM_DIR_PORTS; i++)
-		if (hw->rsrcs.dir_pq_pairs[i].owned)
-			n++;
-
-	return n;
-}
-
-static bool dlb_port_find_slot(struct dlb_ldb_port *port,
-			       enum dlb_qid_map_state state,
-			       int *slot)
-{
-	int i;
-
-	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
-		if (port->qid_map[i].state == state)
-			break;
-	}
-
-	*slot = i;
-
-	return (i < DLB_MAX_NUM_QIDS_PER_LDB_CQ);
-}
-
-static bool dlb_port_find_slot_queue(struct dlb_ldb_port *port,
-				     enum dlb_qid_map_state state,
-				     struct dlb_ldb_queue *queue,
-				     int *slot)
-{
-	int i;
-
-	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
-		if (port->qid_map[i].state == state &&
-		    port->qid_map[i].qid == queue->id)
-			break;
-	}
-
-	*slot = i;
-
-	return (i < DLB_MAX_NUM_QIDS_PER_LDB_CQ);
-}
-
-static int dlb_port_slot_state_transition(struct dlb_hw *hw,
-					  struct dlb_ldb_port *port,
-					  struct dlb_ldb_queue *queue,
-					  int slot,
-					  enum dlb_qid_map_state new_state)
-{
-	enum dlb_qid_map_state curr_state = port->qid_map[slot].state;
-	struct dlb_domain *domain;
-
-	domain = dlb_get_domain_from_id(hw, port->domain_id);
-	if (domain == NULL) {
-		DLB_HW_ERR(hw,
-			   "[%s()] Internal error: unable to find domain %d\n",
-			   __func__, port->domain_id);
-		return -EFAULT;
-	}
-
-	switch (curr_state) {
-	case DLB_QUEUE_UNMAPPED:
-		switch (new_state) {
-		case DLB_QUEUE_MAPPED:
-			queue->num_mappings++;
-			port->num_mappings++;
-			break;
-		case DLB_QUEUE_MAP_IN_PROGRESS:
-			queue->num_pending_additions++;
-			domain->num_pending_additions++;
-			break;
-		default:
-			goto error;
-		}
-		break;
-	case DLB_QUEUE_MAPPED:
-		switch (new_state) {
-		case DLB_QUEUE_UNMAPPED:
-			queue->num_mappings--;
-			port->num_mappings--;
-			break;
-		case DLB_QUEUE_UNMAP_IN_PROGRESS:
-			port->num_pending_removals++;
-			domain->num_pending_removals++;
-			break;
-		case DLB_QUEUE_MAPPED:
-			/* Priority change, nothing to update */
-			break;
-		default:
-			goto error;
-		}
-		break;
-	case DLB_QUEUE_MAP_IN_PROGRESS:
-		switch (new_state) {
-		case DLB_QUEUE_UNMAPPED:
-			queue->num_pending_additions--;
-			domain->num_pending_additions--;
-			break;
-		case DLB_QUEUE_MAPPED:
-			queue->num_mappings++;
-			port->num_mappings++;
-			queue->num_pending_additions--;
-			domain->num_pending_additions--;
-			break;
-		default:
-			goto error;
-		}
-		break;
-	case DLB_QUEUE_UNMAP_IN_PROGRESS:
-		switch (new_state) {
-		case DLB_QUEUE_UNMAPPED:
-			port->num_pending_removals--;
-			domain->num_pending_removals--;
-			queue->num_mappings--;
-			port->num_mappings--;
-			break;
-		case DLB_QUEUE_MAPPED:
-			port->num_pending_removals--;
-			domain->num_pending_removals--;
-			break;
-		case DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP:
-			/* Nothing to update */
-			break;
-		default:
-			goto error;
-		}
-		break;
-	case DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP:
-		switch (new_state) {
-		case DLB_QUEUE_UNMAP_IN_PROGRESS:
-			/* Nothing to update */
-			break;
-		case DLB_QUEUE_UNMAPPED:
-			/* An UNMAP_IN_PROGRESS_PENDING_MAP slot briefly
-			 * becomes UNMAPPED before it transitions to
-			 * MAP_IN_PROGRESS.
-			 */
-			queue->num_mappings--;
-			port->num_mappings--;
-			port->num_pending_removals--;
-			domain->num_pending_removals--;
-			break;
-		default:
-			goto error;
-		}
-		break;
-	default:
-		goto error;
-	}
-
-	port->qid_map[slot].state = new_state;
-
-	DLB_HW_INFO(hw,
-		    "[%s()] queue %d -> port %d state transition (%d -> %d)\n",
-		    __func__, queue->id, port->id, curr_state,
-		    new_state);
-	return 0;
-
-error:
-	DLB_HW_ERR(hw,
-		   "[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\n",
-		   __func__, queue->id, port->id, curr_state,
-		   new_state);
-	return -EFAULT;
-}
-
-/* dlb_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as their
- * function names imply, and should only be called by the dynamic CQ mapping
- * code.
- */
-static void dlb_ldb_queue_disable_mapped_cqs(struct dlb_hw *hw,
-					     struct dlb_domain *domain,
-					     struct dlb_ldb_queue *queue)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_ldb_port *port;
-	int slot;
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
-		enum dlb_qid_map_state state = DLB_QUEUE_MAPPED;
-
-		if (!dlb_port_find_slot_queue(port, state, queue, &slot))
-			continue;
-
-		if (port->enabled)
-			dlb_ldb_port_cq_disable(hw, port);
-	}
-}
-
-static void dlb_ldb_queue_enable_mapped_cqs(struct dlb_hw *hw,
-					    struct dlb_domain *domain,
-					    struct dlb_ldb_queue *queue)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_ldb_port *port;
-	int slot;
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
-		enum dlb_qid_map_state state = DLB_QUEUE_MAPPED;
-
-		if (!dlb_port_find_slot_queue(port, state, queue, &slot))
-			continue;
-
-		if (port->enabled)
-			dlb_ldb_port_cq_enable(hw, port);
-	}
-}
-
-static int dlb_ldb_port_map_qid_static(struct dlb_hw *hw,
-				       struct dlb_ldb_port *p,
-				       struct dlb_ldb_queue *q,
-				       u8 priority)
-{
-	union dlb_lsp_cq2priov r0;
-	union dlb_lsp_cq2qid r1;
-	union dlb_atm_pipe_qid_ldb_qid2cqidx r2;
-	union dlb_lsp_qid_ldb_qid2cqidx r3;
-	union dlb_lsp_qid_ldb_qid2cqidx2 r4;
-	enum dlb_qid_map_state state;
-	int i;
-
-	/* Look for a pending or already mapped slot, else an unused slot */
-	if (!dlb_port_find_slot_queue(p, DLB_QUEUE_MAP_IN_PROGRESS, q, &i) &&
-	    !dlb_port_find_slot_queue(p, DLB_QUEUE_MAPPED, q, &i) &&
-	    !dlb_port_find_slot(p, DLB_QUEUE_UNMAPPED, &i)) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: CQ has no available QID mapping slots\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: port slot tracking failed\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	/* Read-modify-write the priority and valid bit register */
-	r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ2PRIOV(p->id));
-
-	r0.field.v |= 1 << i;
-	r0.field.prio |= (priority & 0x7) << i * 3;
-
-	DLB_CSR_WR(hw, DLB_LSP_CQ2PRIOV(p->id), r0.val);
-
-	/* Read-modify-write the QID map register */
-	r1.val = DLB_CSR_RD(hw, DLB_LSP_CQ2QID(p->id, i / 4));
-
-	if (i == 0 || i == 4)
-		r1.field.qid_p0 = q->id;
-	if (i == 1 || i == 5)
-		r1.field.qid_p1 = q->id;
-	if (i == 2 || i == 6)
-		r1.field.qid_p2 = q->id;
-	if (i == 3 || i == 7)
-		r1.field.qid_p3 = q->id;
-
-	DLB_CSR_WR(hw, DLB_LSP_CQ2QID(p->id, i / 4), r1.val);
-
-	r2.val = DLB_CSR_RD(hw,
-			    DLB_ATM_PIPE_QID_LDB_QID2CQIDX(q->id,
-							   p->id / 4));
-
-	r3.val = DLB_CSR_RD(hw,
-			    DLB_LSP_QID_LDB_QID2CQIDX(q->id,
-						      p->id / 4));
-
-	r4.val = DLB_CSR_RD(hw,
-			    DLB_LSP_QID_LDB_QID2CQIDX2(q->id,
-						       p->id / 4));
-
-	switch (p->id % 4) {
-	case 0:
-		r2.field.cq_p0 |= 1 << i;
-		r3.field.cq_p0 |= 1 << i;
-		r4.field.cq_p0 |= 1 << i;
-		break;
-
-	case 1:
-		r2.field.cq_p1 |= 1 << i;
-		r3.field.cq_p1 |= 1 << i;
-		r4.field.cq_p1 |= 1 << i;
-		break;
-
-	case 2:
-		r2.field.cq_p2 |= 1 << i;
-		r3.field.cq_p2 |= 1 << i;
-		r4.field.cq_p2 |= 1 << i;
-		break;
-
-	case 3:
-		r2.field.cq_p3 |= 1 << i;
-		r3.field.cq_p3 |= 1 << i;
-		r4.field.cq_p3 |= 1 << i;
-		break;
-	}
-
-	DLB_CSR_WR(hw,
-		   DLB_ATM_PIPE_QID_LDB_QID2CQIDX(q->id,
-						  p->id / 4),
-		   r2.val);
-
-	DLB_CSR_WR(hw,
-		   DLB_LSP_QID_LDB_QID2CQIDX(q->id,
-					     p->id / 4),
-		   r3.val);
-
-	DLB_CSR_WR(hw,
-		   DLB_LSP_QID_LDB_QID2CQIDX2(q->id,
-					      p->id / 4),
-		   r4.val);
-
-	dlb_flush_csr(hw);
-
-	p->qid_map[i].qid = q->id;
-	p->qid_map[i].priority = priority;
-
-	state = DLB_QUEUE_MAPPED;
-
-	return dlb_port_slot_state_transition(hw, p, q, i, state);
-}
-
-static int dlb_ldb_port_set_has_work_bits(struct dlb_hw *hw,
-					  struct dlb_ldb_port *port,
-					  struct dlb_ldb_queue *queue,
-					  int slot)
-{
-	union dlb_lsp_qid_aqed_active_cnt r0;
-	union dlb_lsp_qid_ldb_enqueue_cnt r1;
-	union dlb_lsp_ldb_sched_ctrl r2 = { {0} };
-
-	/* Set the atomic scheduling haswork bit */
-	r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_AQED_ACTIVE_CNT(queue->id));
-
-	r2.field.cq = port->id;
-	r2.field.qidix = slot;
-	r2.field.value = 1;
-	r2.field.rlist_haswork_v = r0.field.count > 0;
-
-	/* Set the non-atomic scheduling haswork bit */
-	DLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r2.val);
-
-	r1.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_ENQUEUE_CNT(queue->id));
-
-	memset(&r2, 0, sizeof(r2));
-
-	r2.field.cq = port->id;
-	r2.field.qidix = slot;
-	r2.field.value = 1;
-	r2.field.nalb_haswork_v = (r1.field.count > 0);
-
-	DLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r2.val);
-
-	dlb_flush_csr(hw);
-
-	return 0;
-}
-
-static void dlb_ldb_port_clear_queue_if_status(struct dlb_hw *hw,
-					       struct dlb_ldb_port *port,
-					       int slot)
-{
-	union dlb_lsp_ldb_sched_ctrl r0 = { {0} };
-
-	r0.field.cq = port->id;
-	r0.field.qidix = slot;
-	r0.field.value = 0;
-	r0.field.inflight_ok_v = 1;
-
-	DLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r0.val);
-
-	dlb_flush_csr(hw);
-}
-
-static void dlb_ldb_port_set_queue_if_status(struct dlb_hw *hw,
-					     struct dlb_ldb_port *port,
-					     int slot)
-{
-	union dlb_lsp_ldb_sched_ctrl r0 = { {0} };
-
-	r0.field.cq = port->id;
-	r0.field.qidix = slot;
-	r0.field.value = 1;
-	r0.field.inflight_ok_v = 1;
-
-	DLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r0.val);
-
-	dlb_flush_csr(hw);
-}
-
-static void dlb_ldb_queue_set_inflight_limit(struct dlb_hw *hw,
-					     struct dlb_ldb_queue *queue)
-{
-	union dlb_lsp_qid_ldb_infl_lim r0 = { {0} };
-
-	r0.field.limit = queue->num_qid_inflights;
-
-	DLB_CSR_WR(hw, DLB_LSP_QID_LDB_INFL_LIM(queue->id), r0.val);
-}
-
-static void dlb_ldb_queue_clear_inflight_limit(struct dlb_hw *hw,
-					       struct dlb_ldb_queue *queue)
-{
-	DLB_CSR_WR(hw,
-		   DLB_LSP_QID_LDB_INFL_LIM(queue->id),
-		   DLB_LSP_QID_LDB_INFL_LIM_RST);
-}
-
-static int dlb_ldb_port_finish_map_qid_dynamic(struct dlb_hw *hw,
-					       struct dlb_domain *domain,
-					       struct dlb_ldb_port *port,
-					       struct dlb_ldb_queue *queue)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	union dlb_lsp_qid_ldb_infl_cnt r0;
-	enum dlb_qid_map_state state;
-	int slot, ret;
-	u8 prio;
-
-	r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(queue->id));
-
-	if (r0.field.count) {
-		DLB_HW_ERR(hw,
-			   "[%s()] Internal error: non-zero QID inflight count\n",
-			   __func__);
-		return -EFAULT;
-	}
-
-	/* For each port with a pending mapping to this queue, perform the
-	 * static mapping and set the corresponding has_work bits.
-	 */
-	state = DLB_QUEUE_MAP_IN_PROGRESS;
-	if (!dlb_port_find_slot_queue(port, state, queue, &slot))
-		return -EINVAL;
-
-	if (slot >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: port slot tracking failed\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	prio = port->qid_map[slot].priority;
-
-	/* Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and
-	 * the port's qid_map state.
-	 */
-	ret = dlb_ldb_port_map_qid_static(hw, port, queue, prio);
-	if (ret)
-		return ret;
-
-	ret = dlb_ldb_port_set_has_work_bits(hw, port, queue, slot);
-	if (ret)
-		return ret;
-
-	/* Ensure IF_status(cq,qid) is 0 before enabling the port to
-	 * prevent spurious schedules to cause the queue's inflight
-	 * count to increase.
-	 */
-	dlb_ldb_port_clear_queue_if_status(hw, port, slot);
-
-	/* Reset the queue's inflight status */
-	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
-		state = DLB_QUEUE_MAPPED;
-		if (!dlb_port_find_slot_queue(port, state, queue, &slot))
-			continue;
-
-		dlb_ldb_port_set_queue_if_status(hw, port, slot);
-	}
-
-	dlb_ldb_queue_set_inflight_limit(hw, queue);
-
-	/* Re-enable CQs mapped to this queue */
-	dlb_ldb_queue_enable_mapped_cqs(hw, domain, queue);
-
-	/* If this queue has other mappings pending, clear its inflight limit */
-	if (queue->num_pending_additions > 0)
-		dlb_ldb_queue_clear_inflight_limit(hw, queue);
-
-	return 0;
-}
-
-/**
- * dlb_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping
- * @hw: dlb_hw handle for a particular device.
- * @port: load-balanced port
- * @queue: load-balanced queue
- * @priority: queue servicing priority
- *
- * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur
- * at a later point, and <0 if an error occurred.
- */
-static int dlb_ldb_port_map_qid_dynamic(struct dlb_hw *hw,
-					struct dlb_ldb_port *port,
-					struct dlb_ldb_queue *queue,
-					u8 priority)
-{
-	union dlb_lsp_qid_ldb_infl_cnt r0 = { {0} };
-	enum dlb_qid_map_state state;
-	struct dlb_domain *domain;
-	int slot, ret;
-
-	domain = dlb_get_domain_from_id(hw, port->domain_id);
-	if (domain == NULL) {
-		DLB_HW_ERR(hw,
-			   "[%s()] Internal error: unable to find domain %d\n",
-			   __func__, port->domain_id);
-		return -EFAULT;
-	}
-
-	/* Set the QID inflight limit to 0 to prevent further scheduling of the
-	 * queue.
-	 */
-	DLB_CSR_WR(hw, DLB_LSP_QID_LDB_INFL_LIM(queue->id), 0);
-
-	if (!dlb_port_find_slot(port, DLB_QUEUE_UNMAPPED, &slot)) {
-		DLB_HW_ERR(hw,
-			   "Internal error: No available unmapped slots\n");
-		return -EFAULT;
-	}
-
-	if (slot >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: port slot tracking failed\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	port->qid_map[slot].qid = queue->id;
-	port->qid_map[slot].priority = priority;
-
-	state = DLB_QUEUE_MAP_IN_PROGRESS;
-	ret = dlb_port_slot_state_transition(hw, port, queue, slot, state);
-	if (ret)
-		return ret;
-
-	r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(queue->id));
-
-	if (r0.field.count) {
-		/* The queue is owed completions so it's not safe to map it
-		 * yet. Schedule a kernel thread to complete the mapping later,
-		 * once software has completed all the queue's inflight events.
-		 */
-		if (!os_worker_active(hw))
-			os_schedule_work(hw);
-
-		return 1;
-	}
-
-	/* Disable the affected CQ, and the CQs already mapped to the QID,
-	 * before reading the QID's inflight count a second time. There is an
-	 * unlikely race in which the QID may schedule one more QE after we
-	 * read an inflight count of 0, and disabling the CQs guarantees that
-	 * the race will not occur after a re-read of the inflight count
-	 * register.
-	 */
-	if (port->enabled)
-		dlb_ldb_port_cq_disable(hw, port);
-
-	dlb_ldb_queue_disable_mapped_cqs(hw, domain, queue);
-
-	r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(queue->id));
-
-	if (r0.field.count) {
-		if (port->enabled)
-			dlb_ldb_port_cq_enable(hw, port);
-
-		dlb_ldb_queue_enable_mapped_cqs(hw, domain, queue);
-
-		/* The queue is owed completions so it's not safe to map it
-		 * yet. Schedule a kernel thread to complete the mapping later,
-		 * once software has completed all the queue's inflight events.
-		 */
-		if (!os_worker_active(hw))
-			os_schedule_work(hw);
-
-		return 1;
-	}
-
-	return dlb_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
-}
-
-
-static int dlb_ldb_port_map_qid(struct dlb_hw *hw,
-				struct dlb_domain *domain,
-				struct dlb_ldb_port *port,
-				struct dlb_ldb_queue *queue,
-				u8 prio)
-{
-	if (domain->started)
-		return dlb_ldb_port_map_qid_dynamic(hw, port, queue, prio);
-	else
-		return dlb_ldb_port_map_qid_static(hw, port, queue, prio);
-}
-
-static int dlb_ldb_port_unmap_qid(struct dlb_hw *hw,
-				  struct dlb_ldb_port *port,
-				  struct dlb_ldb_queue *queue)
-{
-	enum dlb_qid_map_state mapped, in_progress, pending_map, unmapped;
-	union dlb_lsp_cq2priov r0;
-	union dlb_atm_pipe_qid_ldb_qid2cqidx r1;
-	union dlb_lsp_qid_ldb_qid2cqidx r2;
-	union dlb_lsp_qid_ldb_qid2cqidx2 r3;
-	u32 queue_id;
-	u32 port_id;
-	int i;
-
-	/* Find the queue's slot */
-	mapped = DLB_QUEUE_MAPPED;
-	in_progress = DLB_QUEUE_UNMAP_IN_PROGRESS;
-	pending_map = DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP;
-
-	if (!dlb_port_find_slot_queue(port, mapped, queue, &i) &&
-	    !dlb_port_find_slot_queue(port, in_progress, queue, &i) &&
-	    !dlb_port_find_slot_queue(port, pending_map, queue, &i)) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: QID %d isn't mapped\n",
-			   __func__, __LINE__, queue->id);
-		return -EFAULT;
-	}
-
-	if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: port slot tracking failed\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	port_id = port->id;
-	queue_id = queue->id;
-
-	/* Read-modify-write the priority and valid bit register */
-	r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ2PRIOV(port_id));
-
-	r0.field.v &= ~(1 << i);
-
-	DLB_CSR_WR(hw, DLB_LSP_CQ2PRIOV(port_id), r0.val);
-
-	r1.val = DLB_CSR_RD(hw,
-			    DLB_ATM_PIPE_QID_LDB_QID2CQIDX(queue_id,
-							   port_id / 4));
-
-	r2.val = DLB_CSR_RD(hw,
-			    DLB_LSP_QID_LDB_QID2CQIDX(queue_id,
-						      port_id / 4));
-
-	r3.val = DLB_CSR_RD(hw,
-			    DLB_LSP_QID_LDB_QID2CQIDX2(queue_id,
-						       port_id / 4));
-
-	switch (port_id % 4) {
-	case 0:
-		r1.field.cq_p0 &= ~(1 << i);
-		r2.field.cq_p0 &= ~(1 << i);
-		r3.field.cq_p0 &= ~(1 << i);
-		break;
-
-	case 1:
-		r1.field.cq_p1 &= ~(1 << i);
-		r2.field.cq_p1 &= ~(1 << i);
-		r3.field.cq_p1 &= ~(1 << i);
-		break;
-
-	case 2:
-		r1.field.cq_p2 &= ~(1 << i);
-		r2.field.cq_p2 &= ~(1 << i);
-		r3.field.cq_p2 &= ~(1 << i);
-		break;
-
-	case 3:
-		r1.field.cq_p3 &= ~(1 << i);
-		r2.field.cq_p3 &= ~(1 << i);
-		r3.field.cq_p3 &= ~(1 << i);
-		break;
-	}
-
-	DLB_CSR_WR(hw,
-		   DLB_ATM_PIPE_QID_LDB_QID2CQIDX(queue_id, port_id / 4),
-		   r1.val);
-
-	DLB_CSR_WR(hw,
-		   DLB_LSP_QID_LDB_QID2CQIDX(queue_id, port_id / 4),
-		   r2.val);
-
-	DLB_CSR_WR(hw,
-		   DLB_LSP_QID_LDB_QID2CQIDX2(queue_id, port_id / 4),
-		   r3.val);
-
-	dlb_flush_csr(hw);
-
-	unmapped = DLB_QUEUE_UNMAPPED;
-
-	return dlb_port_slot_state_transition(hw, port, queue, i, unmapped);
-}
-
-static int
-dlb_verify_create_sched_domain_args(struct dlb_hw *hw,
-				    struct dlb_function_resources *rsrcs,
-				    struct dlb_create_sched_domain_args *args,
-				    struct dlb_cmd_response *resp)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_bitmap *ldb_credit_freelist;
-	struct dlb_bitmap *dir_credit_freelist;
-	unsigned int ldb_credit_freelist_count;
-	unsigned int dir_credit_freelist_count;
-	unsigned int max_contig_aqed_entries;
-	unsigned int max_contig_dqed_entries;
-	unsigned int max_contig_qed_entries;
-	unsigned int max_contig_hl_entries;
-	struct dlb_bitmap *aqed_freelist;
-	enum dlb_dev_revision revision;
-
-	ldb_credit_freelist = rsrcs->avail_qed_freelist_entries;
-	dir_credit_freelist = rsrcs->avail_dqed_freelist_entries;
-	aqed_freelist = rsrcs->avail_aqed_freelist_entries;
-
-	ldb_credit_freelist_count = dlb_bitmap_count(ldb_credit_freelist);
-	dir_credit_freelist_count = dlb_bitmap_count(dir_credit_freelist);
-
-	max_contig_hl_entries =
-		dlb_bitmap_longest_set_range(rsrcs->avail_hist_list_entries);
-	max_contig_aqed_entries =
-		dlb_bitmap_longest_set_range(aqed_freelist);
-	max_contig_qed_entries =
-		dlb_bitmap_longest_set_range(ldb_credit_freelist);
-	max_contig_dqed_entries =
-		dlb_bitmap_longest_set_range(dir_credit_freelist);
-
-	if (rsrcs->num_avail_domains < 1)
-		resp->status = DLB_ST_DOMAIN_UNAVAILABLE;
-	else if (rsrcs->num_avail_ldb_queues < args->num_ldb_queues)
-		resp->status = DLB_ST_LDB_QUEUES_UNAVAILABLE;
-	else if (rsrcs->num_avail_ldb_ports < args->num_ldb_ports)
-		resp->status = DLB_ST_LDB_PORTS_UNAVAILABLE;
-	else if (args->num_ldb_queues > 0 && args->num_ldb_ports == 0)
-		resp->status = DLB_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES;
-	else if (rsrcs->num_avail_dir_pq_pairs < args->num_dir_ports)
-		resp->status = DLB_ST_DIR_PORTS_UNAVAILABLE;
-	else if (ldb_credit_freelist_count < args->num_ldb_credits)
-		resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;
-	else if (dir_credit_freelist_count < args->num_dir_credits)
-		resp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;
-	else if (rsrcs->num_avail_ldb_credit_pools < args->num_ldb_credit_pools)
-		resp->status = DLB_ST_LDB_CREDIT_POOLS_UNAVAILABLE;
-	else if (rsrcs->num_avail_dir_credit_pools < args->num_dir_credit_pools)
-		resp->status = DLB_ST_DIR_CREDIT_POOLS_UNAVAILABLE;
-	else if (max_contig_hl_entries < args->num_hist_list_entries)
-		resp->status = DLB_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
-	else if (max_contig_aqed_entries < args->num_atomic_inflights)
-		resp->status = DLB_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
-	else if (max_contig_qed_entries < args->num_ldb_credits)
-		resp->status = DLB_ST_QED_FREELIST_ENTRIES_UNAVAILABLE;
-	else if (max_contig_dqed_entries < args->num_dir_credits)
-		resp->status = DLB_ST_DQED_FREELIST_ENTRIES_UNAVAILABLE;
-
-	/* DLB A-stepping workaround for hardware write buffer lock up issue:
-	 * limit the maximum configured ports to less than 128 and disable CQ
-	 * occupancy interrupts.
-	 */
-	revision = os_get_dev_revision(hw);
-
-	if (revision < DLB_B0) {
-		u32 n = dlb_get_num_ports_in_use(hw);
-
-		n += args->num_ldb_ports + args->num_dir_ports;
-
-		if (n >= DLB_A_STEP_MAX_PORTS)
-			resp->status = args->num_ldb_ports ?
-				DLB_ST_LDB_PORTS_UNAVAILABLE :
-				DLB_ST_DIR_PORTS_UNAVAILABLE;
-	}
-
-	if (resp->status)
-		return -1;
-
-	return 0;
-}
-
-
-static void
-dlb_log_create_sched_domain_args(struct dlb_hw *hw,
-				 struct dlb_create_sched_domain_args *args)
-{
-	DLB_HW_INFO(hw, "DLB create sched domain arguments:\n");
-	DLB_HW_INFO(hw, "\tNumber of LDB queues:        %d\n",
-		    args->num_ldb_queues);
-	DLB_HW_INFO(hw, "\tNumber of LDB ports:         %d\n",
-		    args->num_ldb_ports);
-	DLB_HW_INFO(hw, "\tNumber of DIR ports:         %d\n",
-		    args->num_dir_ports);
-	DLB_HW_INFO(hw, "\tNumber of ATM inflights:     %d\n",
-		    args->num_atomic_inflights);
-	DLB_HW_INFO(hw, "\tNumber of hist list entries: %d\n",
-		    args->num_hist_list_entries);
-	DLB_HW_INFO(hw, "\tNumber of LDB credits:       %d\n",
-		    args->num_ldb_credits);
-	DLB_HW_INFO(hw, "\tNumber of DIR credits:       %d\n",
-		    args->num_dir_credits);
-	DLB_HW_INFO(hw, "\tNumber of LDB credit pools:  %d\n",
-		    args->num_ldb_credit_pools);
-	DLB_HW_INFO(hw, "\tNumber of DIR credit pools:  %d\n",
-		    args->num_dir_credit_pools);
-}
-
-/**
- * dlb_hw_create_sched_domain() - Allocate and initialize a DLB scheduling
- *	domain and its resources.
- * @hw:	  Contains the current state of the DLB hardware.
- * @args: User-provided arguments.
- * @resp: Response to user.
- *
- * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
- * satisfy a request, resp->status will be set accordingly.
- */
-int dlb_hw_create_sched_domain(struct dlb_hw *hw,
-			       struct dlb_create_sched_domain_args *args,
-			       struct dlb_cmd_response *resp)
-{
-	struct dlb_domain *domain;
-	struct dlb_function_resources *rsrcs;
-	int ret;
-
-	rsrcs = &hw->pf;
-
-	dlb_log_create_sched_domain_args(hw, args);
-
-	/* Verify that hardware resources are available before attempting to
-	 * satisfy the request. This simplifies the error unwinding code.
-	 */
-	if (dlb_verify_create_sched_domain_args(hw, rsrcs, args, resp))
-		return -EINVAL;
-
-	domain = DLB_FUNC_LIST_HEAD(rsrcs->avail_domains, typeof(*domain));
-
-	/* Verification should catch this. */
-	if (domain == NULL) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: no available domains\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	if (domain->configured) {
-		DLB_HW_ERR(hw,
-			   "[%s()] Internal error: avail_domains contains configured domains.\n",
-			   __func__);
-		return -EFAULT;
-	}
-
-	dlb_init_domain_rsrc_lists(domain);
-
-	/* Verification should catch this too. */
-	ret = dlb_domain_attach_resources(hw, rsrcs, domain, args, resp);
-	if (ret < 0) {
-		DLB_HW_ERR(hw,
-			   "[%s()] Internal error: failed to verify args.\n",
-			   __func__);
-
-		return -EFAULT;
-	}
-
-	dlb_list_del(&rsrcs->avail_domains, &domain->func_list);
-
-	dlb_list_add(&rsrcs->used_domains, &domain->func_list);
-
-	resp->id = domain->id;
-	resp->status = 0;
-
-	return 0;
-}
-
-static void
-dlb_configure_ldb_credit_pool(struct dlb_hw *hw,
-			      struct dlb_domain *domain,
-			      struct dlb_create_ldb_pool_args *args,
-			      struct dlb_credit_pool *pool)
-{
-	union dlb_sys_ldb_pool_enbld r0 = { {0} };
-	union dlb_chp_ldb_pool_crd_lim r1 = { {0} };
-	union dlb_chp_ldb_pool_crd_cnt r2 = { {0} };
-	union dlb_chp_qed_fl_base  r3 = { {0} };
-	union dlb_chp_qed_fl_lim r4 = { {0} };
-	union dlb_chp_qed_fl_push_ptr r5 = { {0} };
-	union dlb_chp_qed_fl_pop_ptr  r6 = { {0} };
-
-	r1.field.limit = args->num_ldb_credits;
-
-	DLB_CSR_WR(hw, DLB_CHP_LDB_POOL_CRD_LIM(pool->id), r1.val);
-
-	r2.field.count = args->num_ldb_credits;
-
-	DLB_CSR_WR(hw, DLB_CHP_LDB_POOL_CRD_CNT(pool->id), r2.val);
-
-	r3.field.base = domain->qed_freelist.base + domain->qed_freelist.offset;
-
-	DLB_CSR_WR(hw, DLB_CHP_QED_FL_BASE(pool->id), r3.val);
-
-	r4.field.freelist_disable = 0;
-	r4.field.limit = r3.field.base + args->num_ldb_credits - 1;
-
-	DLB_CSR_WR(hw, DLB_CHP_QED_FL_LIM(pool->id), r4.val);
-
-	r5.field.push_ptr = r3.field.base;
-	r5.field.generation = 1;
-
-	DLB_CSR_WR(hw, DLB_CHP_QED_FL_PUSH_PTR(pool->id), r5.val);
-
-	r6.field.pop_ptr = r3.field.base;
-	r6.field.generation = 0;
-
-	DLB_CSR_WR(hw, DLB_CHP_QED_FL_POP_PTR(pool->id), r6.val);
-
-	r0.field.pool_enabled = 1;
-
-	DLB_CSR_WR(hw, DLB_SYS_LDB_POOL_ENBLD(pool->id), r0.val);
-
-	pool->avail_credits = args->num_ldb_credits;
-	pool->total_credits = args->num_ldb_credits;
-	domain->qed_freelist.offset += args->num_ldb_credits;
-
-	pool->configured = true;
-}
-
-static int
-dlb_verify_create_ldb_pool_args(struct dlb_hw *hw,
-				u32 domain_id,
-				struct dlb_create_ldb_pool_args *args,
-				struct dlb_cmd_response *resp)
-{
-	struct dlb_freelist *qed_freelist;
-	struct dlb_domain *domain;
-
-	domain = dlb_get_domain_from_id(hw, domain_id);
-
-	if (domain == NULL) {
-		resp->status = DLB_ST_INVALID_DOMAIN_ID;
-		return -1;
-	}
-
-	if (!domain->configured) {
-		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
-		return -1;
-	}
-
-	qed_freelist = &domain->qed_freelist;
-
-	if (dlb_freelist_count(qed_freelist) < args->num_ldb_credits) {
-		resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;
-		return -1;
-	}
-
-	if (dlb_list_empty(&domain->avail_ldb_credit_pools)) {
-		resp->status = DLB_ST_LDB_CREDIT_POOLS_UNAVAILABLE;
-		return -1;
-	}
-
-	if (domain->started) {
-		resp->status = DLB_ST_DOMAIN_STARTED;
-		return -1;
-	}
-
-	return 0;
-}
-
-static void
-dlb_log_create_ldb_pool_args(struct dlb_hw *hw,
-			     u32 domain_id,
-			     struct dlb_create_ldb_pool_args *args)
-{
-	DLB_HW_INFO(hw, "DLB create load-balanced credit pool arguments:\n");
-	DLB_HW_INFO(hw, "\tDomain ID:             %d\n", domain_id);
-	DLB_HW_INFO(hw, "\tNumber of LDB credits: %d\n",
-		    args->num_ldb_credits);
-}
-
-/**
- * dlb_hw_create_ldb_pool() - Allocate and initialize a DLB credit pool.
- * @hw:	  Contains the current state of the DLB hardware.
- * @args: User-provided arguments.
- * @resp: Response to user.
- *
- * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
- * satisfy a request, resp->status will be set accordingly.
- */
-int dlb_hw_create_ldb_pool(struct dlb_hw *hw,
-			   u32 domain_id,
-			   struct dlb_create_ldb_pool_args *args,
-			   struct dlb_cmd_response *resp)
-{
-	struct dlb_credit_pool *pool;
-	struct dlb_domain *domain;
-
-	dlb_log_create_ldb_pool_args(hw, domain_id, args);
-
-	/* Verify that hardware resources are available before attempting to
-	 * satisfy the request. This simplifies the error unwinding code.
-	 */
-	if (dlb_verify_create_ldb_pool_args(hw, domain_id, args, resp))
-		return -EINVAL;
-
-	domain = dlb_get_domain_from_id(hw, domain_id);
-	if (domain == NULL) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: domain not found\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	pool = DLB_DOM_LIST_HEAD(domain->avail_ldb_credit_pools, typeof(*pool));
-
-	/* Verification should catch this. */
-	if (pool == NULL) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: no available ldb credit pools\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	dlb_configure_ldb_credit_pool(hw, domain, args, pool);
-
-	/* Configuration succeeded, so move the resource from the 'avail' to
-	 * the 'used' list.
-	 */
-	dlb_list_del(&domain->avail_ldb_credit_pools, &pool->domain_list);
-
-	dlb_list_add(&domain->used_ldb_credit_pools, &pool->domain_list);
-
-	resp->status = 0;
-	resp->id = pool->id;
-
-	return 0;
-}
-
-static void
-dlb_configure_dir_credit_pool(struct dlb_hw *hw,
-			      struct dlb_domain *domain,
-			      struct dlb_create_dir_pool_args *args,
-			      struct dlb_credit_pool *pool)
-{
-	union dlb_sys_dir_pool_enbld r0 = { {0} };
-	union dlb_chp_dir_pool_crd_lim r1 = { {0} };
-	union dlb_chp_dir_pool_crd_cnt r2 = { {0} };
-	union dlb_chp_dqed_fl_base  r3 = { {0} };
-	union dlb_chp_dqed_fl_lim r4 = { {0} };
-	union dlb_chp_dqed_fl_push_ptr r5 = { {0} };
-	union dlb_chp_dqed_fl_pop_ptr  r6 = { {0} };
-
-	r1.field.limit = args->num_dir_credits;
-
-	DLB_CSR_WR(hw, DLB_CHP_DIR_POOL_CRD_LIM(pool->id), r1.val);
-
-	r2.field.count = args->num_dir_credits;
-
-	DLB_CSR_WR(hw, DLB_CHP_DIR_POOL_CRD_CNT(pool->id), r2.val);
-
-	r3.field.base = domain->dqed_freelist.base +
-			domain->dqed_freelist.offset;
-
-	DLB_CSR_WR(hw, DLB_CHP_DQED_FL_BASE(pool->id), r3.val);
-
-	r4.field.freelist_disable = 0;
-	r4.field.limit = r3.field.base + args->num_dir_credits - 1;
-
-	DLB_CSR_WR(hw, DLB_CHP_DQED_FL_LIM(pool->id), r4.val);
-
-	r5.field.push_ptr = r3.field.base;
-	r5.field.generation = 1;
-
-	DLB_CSR_WR(hw, DLB_CHP_DQED_FL_PUSH_PTR(pool->id), r5.val);
-
-	r6.field.pop_ptr = r3.field.base;
-	r6.field.generation = 0;
-
-	DLB_CSR_WR(hw, DLB_CHP_DQED_FL_POP_PTR(pool->id), r6.val);
-
-	r0.field.pool_enabled = 1;
-
-	DLB_CSR_WR(hw, DLB_SYS_DIR_POOL_ENBLD(pool->id), r0.val);
-
-	pool->avail_credits = args->num_dir_credits;
-	pool->total_credits = args->num_dir_credits;
-	domain->dqed_freelist.offset += args->num_dir_credits;
-
-	pool->configured = true;
-}
-
-static int
-dlb_verify_create_dir_pool_args(struct dlb_hw *hw,
-				u32 domain_id,
-				struct dlb_create_dir_pool_args *args,
-				struct dlb_cmd_response *resp)
-{
-	struct dlb_freelist *dqed_freelist;
-	struct dlb_domain *domain;
-
-	domain = dlb_get_domain_from_id(hw, domain_id);
-
-	if (domain == NULL) {
-		resp->status = DLB_ST_INVALID_DOMAIN_ID;
-		return -1;
-	}
-
-	if (!domain->configured) {
-		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
-		return -1;
-	}
-
-	dqed_freelist = &domain->dqed_freelist;
-
-	if (dlb_freelist_count(dqed_freelist) < args->num_dir_credits) {
-		resp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;
-		return -1;
-	}
-
-	if (dlb_list_empty(&domain->avail_dir_credit_pools)) {
-		resp->status = DLB_ST_DIR_CREDIT_POOLS_UNAVAILABLE;
-		return -1;
-	}
-
-	if (domain->started) {
-		resp->status = DLB_ST_DOMAIN_STARTED;
-		return -1;
-	}
-
-	return 0;
-}
-
-static void
-dlb_log_create_dir_pool_args(struct dlb_hw *hw,
-			     u32 domain_id,
-			     struct dlb_create_dir_pool_args *args)
-{
-	DLB_HW_INFO(hw, "DLB create directed credit pool arguments:\n");
-	DLB_HW_INFO(hw, "\tDomain ID:             %d\n", domain_id);
-	DLB_HW_INFO(hw, "\tNumber of DIR credits: %d\n",
-		    args->num_dir_credits);
-}
-
-/**
- * dlb_hw_create_dir_pool() - Allocate and initialize a DLB credit pool.
- * @hw:	  Contains the current state of the DLB hardware.
- * @args: User-provided arguments.
- * @resp: Response to user.
- *
- * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
- * satisfy a request, resp->status will be set accordingly.
- */
-int dlb_hw_create_dir_pool(struct dlb_hw *hw,
-			   u32 domain_id,
-			   struct dlb_create_dir_pool_args *args,
-			   struct dlb_cmd_response *resp)
-{
-	struct dlb_credit_pool *pool;
-	struct dlb_domain *domain;
-
-	dlb_log_create_dir_pool_args(hw, domain_id, args);
-
-	/* Verify that hardware resources are available before attempting to
-	 * satisfy the request. This simplifies the error unwinding code.
-	 */
-	/* At least one available pool */
-	if (dlb_verify_create_dir_pool_args(hw, domain_id, args, resp))
-		return -EINVAL;
-
-	domain = dlb_get_domain_from_id(hw, domain_id);
-	if (domain == NULL) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: domain not found\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	pool = DLB_DOM_LIST_HEAD(domain->avail_dir_credit_pools, typeof(*pool));
-
-	/* Verification should catch this. */
-	if (pool == NULL) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: no available dir credit pools\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	dlb_configure_dir_credit_pool(hw, domain, args, pool);
-
-	/* Configuration succeeded, so move the resource from the 'avail' to
-	 * the 'used' list.
-	 */
-	dlb_list_del(&domain->avail_dir_credit_pools, &pool->domain_list);
-
-	dlb_list_add(&domain->used_dir_credit_pools, &pool->domain_list);
-
-	resp->status = 0;
-	resp->id = pool->id;
-
-	return 0;
-}
-
-static u32 dlb_ldb_cq_inflight_count(struct dlb_hw *hw,
-				     struct dlb_ldb_port *port)
-{
-	union dlb_lsp_cq_ldb_infl_cnt r0;
-
-	r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ_LDB_INFL_CNT(port->id));
-
-	return r0.field.count;
-}
-
-static u32 dlb_ldb_cq_token_count(struct dlb_hw *hw,
-				  struct dlb_ldb_port *port)
-{
-	union dlb_lsp_cq_ldb_tkn_cnt r0;
-
-	r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ_LDB_TKN_CNT(port->id));
-
-	return r0.field.token_count;
-}
-
-static int dlb_drain_ldb_cq(struct dlb_hw *hw, struct dlb_ldb_port *port)
-{
-	u32 infl_cnt, tkn_cnt;
-	unsigned int i;
-
-	infl_cnt = dlb_ldb_cq_inflight_count(hw, port);
-
-	/* Account for the initial token count, which is used in order to
-	 * provide a CQ with depth less than 8.
-	 */
-	tkn_cnt = dlb_ldb_cq_token_count(hw, port) - port->init_tkn_cnt;
-
-	if (infl_cnt || tkn_cnt) {
-		struct dlb_hcw hcw_mem[8], *hcw;
-		void  *pp_addr;
-
-		pp_addr = os_map_producer_port(hw, port->id, true);
-
-		/* Point hcw to a 64B-aligned location */
-		hcw = (struct dlb_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
-
-		/* Program the first HCW for a completion and token return and
-		 * the other HCWs as NOOPS
-		 */
-
-		memset(hcw, 0, 4 * sizeof(*hcw));
-		hcw->qe_comp = (infl_cnt > 0);
-		hcw->cq_token = (tkn_cnt > 0);
-		hcw->lock_id = tkn_cnt - 1;
-
-		/* Return tokens in the first HCW */
-		dlb_movdir64b(pp_addr, hcw);
-
-		hcw->cq_token = 0;
-
-		/* Issue remaining completions (if any) */
-		for (i = 1; i < infl_cnt; i++)
-			dlb_movdir64b(pp_addr, hcw);
-
-		os_fence_hcw(hw, pp_addr);
-
-		os_unmap_producer_port(hw, pp_addr);
-	}
-
-	return 0;
-}
-
-static int dlb_domain_drain_ldb_cqs(struct dlb_hw *hw,
-				    struct dlb_domain *domain,
-				    bool toggle_port)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_ldb_port *port;
-	int ret;
-
-	/* If the domain hasn't been started, there's no traffic to drain */
-	if (!domain->started)
-		return 0;
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
-		if (toggle_port)
-			dlb_ldb_port_cq_disable(hw, port);
-
-		ret = dlb_drain_ldb_cq(hw, port);
-		if (ret < 0)
-			return ret;
-
-		if (toggle_port)
-			dlb_ldb_port_cq_enable(hw, port);
-	}
-
-	return 0;
-}
-
-static void dlb_domain_disable_ldb_queue_write_perms(struct dlb_hw *hw,
-						     struct dlb_domain *domain)
-{
-	int domain_offset = domain->id * DLB_MAX_NUM_LDB_QUEUES;
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	union dlb_sys_ldb_vasqid_v r0;
-	struct dlb_ldb_queue *queue;
-
-	r0.field.vasqid_v = 0;
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
-		int idx = domain_offset + queue->id;
-
-		DLB_CSR_WR(hw, DLB_SYS_LDB_VASQID_V(idx), r0.val);
-	}
-}
-
-static void dlb_domain_disable_ldb_seq_checks(struct dlb_hw *hw,
-					      struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	union dlb_chp_sn_chk_enbl r1;
-	struct dlb_ldb_port *port;
-
-	r1.field.en = 0;
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
-		DLB_CSR_WR(hw,
-			   DLB_CHP_SN_CHK_ENBL(port->id),
-			   r1.val);
-}
-
-static void dlb_domain_disable_ldb_port_crd_updates(struct dlb_hw *hw,
-						    struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	union dlb_chp_ldb_pp_crd_req_state r0;
-	struct dlb_ldb_port *port;
-
-	r0.field.no_pp_credit_update = 1;
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
-		DLB_CSR_WR(hw,
-			   DLB_CHP_LDB_PP_CRD_REQ_STATE(port->id),
-			   r0.val);
-}
-
-static void dlb_domain_disable_ldb_port_interrupts(struct dlb_hw *hw,
-						   struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	union dlb_chp_ldb_cq_int_enb r0 = { {0} };
-	union dlb_chp_ldb_cq_wd_enb r1 = { {0} };
-	struct dlb_ldb_port *port;
-
-	r0.field.en_tim = 0;
-	r0.field.en_depth = 0;
-
-	r1.field.wd_enable = 0;
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
-		DLB_CSR_WR(hw,
-			   DLB_CHP_LDB_CQ_INT_ENB(port->id),
-			   r0.val);
-
-		DLB_CSR_WR(hw,
-			   DLB_CHP_LDB_CQ_WD_ENB(port->id),
-			   r1.val);
-	}
-}
-
-static void dlb_domain_disable_dir_queue_write_perms(struct dlb_hw *hw,
-						     struct dlb_domain *domain)
-{
-	int domain_offset = domain->id * DLB_MAX_NUM_DIR_PORTS;
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	union dlb_sys_dir_vasqid_v r0;
-	struct dlb_dir_pq_pair *port;
-
-	r0.field.vasqid_v = 0;
-
-	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
-		int idx = domain_offset + port->id;
-
-		DLB_CSR_WR(hw, DLB_SYS_DIR_VASQID_V(idx), r0.val);
-	}
-}
-
-static void dlb_domain_disable_dir_port_interrupts(struct dlb_hw *hw,
-						   struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	union dlb_chp_dir_cq_int_enb r0 = { {0} };
-	union dlb_chp_dir_cq_wd_enb r1 = { {0} };
-	struct dlb_dir_pq_pair *port;
-
-	r0.field.en_tim = 0;
-	r0.field.en_depth = 0;
-
-	r1.field.wd_enable = 0;
-
-	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
-		DLB_CSR_WR(hw,
-			   DLB_CHP_DIR_CQ_INT_ENB(port->id),
-			   r0.val);
-
-		DLB_CSR_WR(hw,
-			   DLB_CHP_DIR_CQ_WD_ENB(port->id),
-			   r1.val);
-	}
-}
-
-static void dlb_domain_disable_dir_port_crd_updates(struct dlb_hw *hw,
-						    struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	union dlb_chp_dir_pp_crd_req_state r0;
-	struct dlb_dir_pq_pair *port;
-
-	r0.field.no_pp_credit_update = 1;
-
-	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
-		DLB_CSR_WR(hw,
-			   DLB_CHP_DIR_PP_CRD_REQ_STATE(port->id),
-			   r0.val);
-}
-
-static void dlb_domain_disable_dir_cqs(struct dlb_hw *hw,
-				       struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_dir_pq_pair *port;
-
-	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
-		port->enabled = false;
-
-		dlb_dir_port_cq_disable(hw, port);
-	}
-}
-
-static void dlb_domain_disable_ldb_cqs(struct dlb_hw *hw,
-				       struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_ldb_port *port;
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
-		port->enabled = false;
-
-		dlb_ldb_port_cq_disable(hw, port);
-	}
-}
-
-static void dlb_domain_enable_ldb_cqs(struct dlb_hw *hw,
-				      struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_ldb_port *port;
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
-		port->enabled = true;
-
-		dlb_ldb_port_cq_enable(hw, port);
-	}
-}
-
-static struct dlb_ldb_queue *dlb_get_ldb_queue_from_id(struct dlb_hw *hw,
-						       u32 id)
-{
-	if (id >= DLB_MAX_NUM_LDB_QUEUES)
-		return NULL;
-
-	return &hw->rsrcs.ldb_queues[id];
-}
-
-static void dlb_ldb_port_clear_has_work_bits(struct dlb_hw *hw,
-					     struct dlb_ldb_port *port,
-					     u8 slot)
-{
-	union dlb_lsp_ldb_sched_ctrl r2 = { {0} };
-
-	r2.field.cq = port->id;
-	r2.field.qidix = slot;
-	r2.field.value = 0;
-	r2.field.rlist_haswork_v = 1;
-
-	DLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r2.val);
-
-	memset(&r2, 0, sizeof(r2));
-
-	r2.field.cq = port->id;
-	r2.field.qidix = slot;
-	r2.field.value = 0;
-	r2.field.nalb_haswork_v = 1;
-
-	DLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r2.val);
-
-	dlb_flush_csr(hw);
-}
-
-static void dlb_domain_finish_map_port(struct dlb_hw *hw,
-				       struct dlb_domain *domain,
-				       struct dlb_ldb_port *port)
-{
-	int i;
-
-	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
-		union dlb_lsp_qid_ldb_infl_cnt r0;
-		struct dlb_ldb_queue *queue;
-		int qid;
-
-		if (port->qid_map[i].state != DLB_QUEUE_MAP_IN_PROGRESS)
-			continue;
-
-		qid = port->qid_map[i].qid;
-
-		queue = dlb_get_ldb_queue_from_id(hw, qid);
-
-		if (queue == NULL) {
-			DLB_HW_ERR(hw,
-				   "[%s()] Internal error: unable to find queue %d\n",
-				   __func__, qid);
-			continue;
-		}
-
-		r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(qid));
-
-		if (r0.field.count)
-			continue;
-
-		/* Disable the affected CQ, and the CQs already mapped to the
-		 * QID, before reading the QID's inflight count a second time.
-		 * There is an unlikely race in which the QID may schedule one
-		 * more QE after we read an inflight count of 0, and disabling
-		 * the CQs guarantees that the race will not occur after a
-		 * re-read of the inflight count register.
-		 */
-		if (port->enabled)
-			dlb_ldb_port_cq_disable(hw, port);
-
-		dlb_ldb_queue_disable_mapped_cqs(hw, domain, queue);
-
-		r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(qid));
-
-		if (r0.field.count) {
-			if (port->enabled)
-				dlb_ldb_port_cq_enable(hw, port);
-
-			dlb_ldb_queue_enable_mapped_cqs(hw, domain, queue);
-
-			continue;
-		}
-
-		dlb_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
-	}
-}
-
-static unsigned int
-dlb_domain_finish_map_qid_procedures(struct dlb_hw *hw,
-				     struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_ldb_port *port;
-
-	if (!domain->configured || domain->num_pending_additions == 0)
-		return 0;
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
-		dlb_domain_finish_map_port(hw, domain, port);
-
-	return domain->num_pending_additions;
-}
-
-unsigned int dlb_finish_map_qid_procedures(struct dlb_hw *hw)
-{
-	int i, num = 0;
-
-	/* Finish queue map jobs for any domain that needs it */
-	for (i = 0; i < DLB_MAX_NUM_DOMAINS; i++) {
-		struct dlb_domain *domain = &hw->domains[i];
-
-		num += dlb_domain_finish_map_qid_procedures(hw, domain);
-	}
-
-	return num;
-}
-
-
-static int dlb_domain_wait_for_ldb_cqs_to_empty(struct dlb_hw *hw,
-						struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_ldb_port *port;
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
-		int i;
-
-		for (i = 0; i < DLB_MAX_CQ_COMP_CHECK_LOOPS; i++) {
-			if (dlb_ldb_cq_inflight_count(hw, port) == 0)
-				break;
-		}
-
-		if (i == DLB_MAX_CQ_COMP_CHECK_LOOPS) {
-			DLB_HW_ERR(hw,
-				   "[%s()] Internal error: failed to flush load-balanced port %d's completions.\n",
-				   __func__, port->id);
-			return -EFAULT;
-		}
-	}
-
-	return 0;
-}
-
-
-static void dlb_domain_finish_unmap_port_slot(struct dlb_hw *hw,
-					      struct dlb_domain *domain,
-					      struct dlb_ldb_port *port,
-					      int slot)
-{
-	enum dlb_qid_map_state state;
-	struct dlb_ldb_queue *queue;
-
-	queue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid];
-
-	state = port->qid_map[slot].state;
-
-	/* Update the QID2CQIDX and CQ2QID vectors */
-	dlb_ldb_port_unmap_qid(hw, port, queue);
-
-	/* Ensure the QID will not be serviced by this {CQ, slot} by clearing
-	 * the has_work bits
-	 */
-	dlb_ldb_port_clear_has_work_bits(hw, port, slot);
-
-	/* Reset the {CQ, slot} to its default state */
-	dlb_ldb_port_set_queue_if_status(hw, port, slot);
-
-	/* Re-enable the CQ if it was not manually disabled by the user */
-	if (port->enabled)
-		dlb_ldb_port_cq_enable(hw, port);
-
-	/* If there is a mapping that is pending this slot's removal, perform
-	 * the mapping now.
-	 */
-	if (state == DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP) {
-		struct dlb_ldb_port_qid_map *map;
-		struct dlb_ldb_queue *map_queue;
-		u8 prio;
-
-		map = &port->qid_map[slot];
-
-		map->qid = map->pending_qid;
-		map->priority = map->pending_priority;
-
-		map_queue = &hw->rsrcs.ldb_queues[map->qid];
-		prio = map->priority;
-
-		dlb_ldb_port_map_qid(hw, domain, port, map_queue, prio);
-	}
-}
-
-static bool dlb_domain_finish_unmap_port(struct dlb_hw *hw,
-					 struct dlb_domain *domain,
-					 struct dlb_ldb_port *port)
-{
-	union dlb_lsp_cq_ldb_infl_cnt r0;
-	int i;
-
-	if (port->num_pending_removals == 0)
-		return false;
-
-	/* The unmap requires all the CQ's outstanding inflights to be
-	 * completed.
-	 */
-	r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ_LDB_INFL_CNT(port->id));
-	if (r0.field.count > 0)
-		return false;
-
-	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
-		struct dlb_ldb_port_qid_map *map;
-
-		map = &port->qid_map[i];
-
-		if (map->state != DLB_QUEUE_UNMAP_IN_PROGRESS &&
-		    map->state != DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP)
-			continue;
-
-		dlb_domain_finish_unmap_port_slot(hw, domain, port, i);
-	}
-
-	return true;
-}
-
-static unsigned int
-dlb_domain_finish_unmap_qid_procedures(struct dlb_hw *hw,
-				       struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_ldb_port *port;
-
-	if (!domain->configured || domain->num_pending_removals == 0)
-		return 0;
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
-		dlb_domain_finish_unmap_port(hw, domain, port);
-
-	return domain->num_pending_removals;
-}
-
-unsigned int dlb_finish_unmap_qid_procedures(struct dlb_hw *hw)
-{
-	int i, num = 0;
-
-	/* Finish queue unmap jobs for any domain that needs it */
-	for (i = 0; i < DLB_MAX_NUM_DOMAINS; i++) {
-		struct dlb_domain *domain = &hw->domains[i];
-
-		num += dlb_domain_finish_unmap_qid_procedures(hw, domain);
-	}
-
-	return num;
-}
-
-/* Returns whether the queue is empty, including its inflight and replay
- * counts.
- */
-static bool dlb_ldb_queue_is_empty(struct dlb_hw *hw,
-				   struct dlb_ldb_queue *queue)
-{
-	union dlb_lsp_qid_ldb_replay_cnt r0;
-	union dlb_lsp_qid_aqed_active_cnt r1;
-	union dlb_lsp_qid_atq_enqueue_cnt r2;
-	union dlb_lsp_qid_ldb_enqueue_cnt r3;
-	union dlb_lsp_qid_ldb_infl_cnt r4;
-
-	r0.val = DLB_CSR_RD(hw,
-			    DLB_LSP_QID_LDB_REPLAY_CNT(queue->id));
-	if (r0.val)
-		return false;
-
-	r1.val = DLB_CSR_RD(hw,
-			    DLB_LSP_QID_AQED_ACTIVE_CNT(queue->id));
-	if (r1.val)
-		return false;
-
-	r2.val = DLB_CSR_RD(hw,
-			    DLB_LSP_QID_ATQ_ENQUEUE_CNT(queue->id));
-	if (r2.val)
-		return false;
-
-	r3.val = DLB_CSR_RD(hw,
-			    DLB_LSP_QID_LDB_ENQUEUE_CNT(queue->id));
-	if (r3.val)
-		return false;
-
-	r4.val = DLB_CSR_RD(hw,
-			    DLB_LSP_QID_LDB_INFL_CNT(queue->id));
-	if (r4.val)
-		return false;
-
-	return true;
-}
-
-static bool dlb_domain_mapped_queues_empty(struct dlb_hw *hw,
-					   struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_ldb_queue *queue;
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
-		if (queue->num_mappings == 0)
-			continue;
-
-		if (!dlb_ldb_queue_is_empty(hw, queue))
-			return false;
-	}
-
-	return true;
-}
-
-static int dlb_domain_drain_mapped_queues(struct dlb_hw *hw,
-					  struct dlb_domain *domain)
-{
-	int i, ret;
-
-	/* If the domain hasn't been started, there's no traffic to drain */
-	if (!domain->started)
-		return 0;
-
-	if (domain->num_pending_removals > 0) {
-		DLB_HW_ERR(hw,
-			   "[%s()] Internal error: failed to unmap domain queues\n",
-			   __func__);
-		return -EFAULT;
-	}
-
-	for (i = 0; i < DLB_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
-		ret = dlb_domain_drain_ldb_cqs(hw, domain, true);
-		if (ret < 0)
-			return ret;
-
-		if (dlb_domain_mapped_queues_empty(hw, domain))
-			break;
-	}
-
-	if (i == DLB_MAX_QID_EMPTY_CHECK_LOOPS) {
-		DLB_HW_ERR(hw,
-			   "[%s()] Internal error: failed to empty queues\n",
-			   __func__);
-		return -EFAULT;
-	}
-
-	/* Drain the CQs one more time. For the queues to go empty, they would
-	 * have scheduled one or more QEs.
-	 */
-	ret = dlb_domain_drain_ldb_cqs(hw, domain, true);
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
-static int dlb_domain_drain_unmapped_queue(struct dlb_hw *hw,
-					   struct dlb_domain *domain,
-					   struct dlb_ldb_queue *queue)
-{
-	struct dlb_ldb_port *port;
-	int ret;
-
-	/* If a domain has LDB queues, it must have LDB ports */
-	if (dlb_list_empty(&domain->used_ldb_ports)) {
-		DLB_HW_ERR(hw,
-			   "[%s()] Internal error: No configured LDB ports\n",
-			   __func__);
-		return -EFAULT;
-	}
-
-	port = DLB_DOM_LIST_HEAD(domain->used_ldb_ports, typeof(*port));
-
-	/* If necessary, free up a QID slot in this CQ */
-	if (port->num_mappings == DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
-		struct dlb_ldb_queue *mapped_queue;
-
-		mapped_queue = &hw->rsrcs.ldb_queues[port->qid_map[0].qid];
-
-		ret = dlb_ldb_port_unmap_qid(hw, port, mapped_queue);
-		if (ret)
-			return ret;
-	}
-
-	ret = dlb_ldb_port_map_qid_dynamic(hw, port, queue, 0);
-	if (ret)
-		return ret;
-
-	return dlb_domain_drain_mapped_queues(hw, domain);
-}
-
-static int dlb_domain_drain_unmapped_queues(struct dlb_hw *hw,
-					    struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_ldb_queue *queue;
-	int ret;
-
-	/* If the domain hasn't been started, there's no traffic to drain */
-	if (!domain->started)
-		return 0;
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
-		if (queue->num_mappings != 0 ||
-		    dlb_ldb_queue_is_empty(hw, queue))
-			continue;
-
-		ret = dlb_domain_drain_unmapped_queue(hw, domain, queue);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static int dlb_domain_wait_for_ldb_pool_refill(struct dlb_hw *hw,
-					       struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_credit_pool *pool;
-
-	/* Confirm that all credits are returned to the domain's credit pools */
-	DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter) {
-		union dlb_chp_qed_fl_push_ptr r0;
-		union dlb_chp_qed_fl_pop_ptr r1;
-		unsigned long pop_offs, push_offs;
-		int i;
-
-		push_offs = DLB_CHP_QED_FL_PUSH_PTR(pool->id);
-		pop_offs = DLB_CHP_QED_FL_POP_PTR(pool->id);
-
-		for (i = 0; i < DLB_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
-			r0.val = DLB_CSR_RD(hw, push_offs);
-
-			r1.val = DLB_CSR_RD(hw, pop_offs);
-
-			/* Break early if the freelist is replenished */
-			if (r1.field.pop_ptr == r0.field.push_ptr &&
-			    r1.field.generation != r0.field.generation) {
-				break;
-			}
-		}
-
-		/* Error if the freelist is not full */
-		if (r1.field.pop_ptr != r0.field.push_ptr ||
-		    r1.field.generation == r0.field.generation) {
-			return -EFAULT;
-		}
-	}
-
-	return 0;
-}
-
-static int dlb_domain_wait_for_dir_pool_refill(struct dlb_hw *hw,
-					       struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_credit_pool *pool;
-
-	/* Confirm that all credits are returned to the domain's credit pools */
-	DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) {
-		union dlb_chp_dqed_fl_push_ptr r0;
-		union dlb_chp_dqed_fl_pop_ptr r1;
-		unsigned long pop_offs, push_offs;
-		int i;
-
-		push_offs = DLB_CHP_DQED_FL_PUSH_PTR(pool->id);
-		pop_offs = DLB_CHP_DQED_FL_POP_PTR(pool->id);
-
-		for (i = 0; i < DLB_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
-			r0.val = DLB_CSR_RD(hw, push_offs);
-
-			r1.val = DLB_CSR_RD(hw, pop_offs);
-
-			/* Break early if the freelist is replenished */
-			if (r1.field.pop_ptr == r0.field.push_ptr &&
-			    r1.field.generation != r0.field.generation) {
-				break;
-			}
-		}
-
-		/* Error if the freelist is not full */
-		if (r1.field.pop_ptr != r0.field.push_ptr ||
-		    r1.field.generation == r0.field.generation) {
-			return -EFAULT;
-		}
-	}
-
-	return 0;
-}
-
-static u32 dlb_dir_queue_depth(struct dlb_hw *hw,
-			       struct dlb_dir_pq_pair *queue)
-{
-	union dlb_lsp_qid_dir_enqueue_cnt r0;
-
-	r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_DIR_ENQUEUE_CNT(queue->id));
-
-	return r0.field.count;
-}
-
-static bool dlb_dir_queue_is_empty(struct dlb_hw *hw,
-				   struct dlb_dir_pq_pair *queue)
-{
-	return dlb_dir_queue_depth(hw, queue) == 0;
-}
-
-static bool dlb_domain_dir_queues_empty(struct dlb_hw *hw,
-					struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_dir_pq_pair *queue;
-
-	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
-		if (!dlb_dir_queue_is_empty(hw, queue))
-			return false;
-	}
-
-	return true;
-}
-
-static u32 dlb_dir_cq_token_count(struct dlb_hw *hw,
-				  struct dlb_dir_pq_pair *port)
-{
-	union dlb_lsp_cq_dir_tkn_cnt r0;
-
-	r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ_DIR_TKN_CNT(port->id));
-
-	return r0.field.count;
-}
-
-static void dlb_drain_dir_cq(struct dlb_hw *hw, struct dlb_dir_pq_pair *port)
-{
-	unsigned int port_id = port->id;
-	u32 cnt;
-
-	/* Return any outstanding tokens */
-	cnt = dlb_dir_cq_token_count(hw, port);
-
-	if (cnt != 0) {
-		struct dlb_hcw hcw_mem[8], *hcw;
-		void  *pp_addr;
-
-		pp_addr = os_map_producer_port(hw, port_id, false);
-
-		/* Point hcw to a 64B-aligned location */
-		hcw = (struct dlb_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
-
-		/* Program the first HCW for a batch token return and
-		 * the rest as NOOPS
-		 */
-		memset(hcw, 0, 4 * sizeof(*hcw));
-		hcw->cq_token = 1;
-		hcw->lock_id = cnt - 1;
-
-		dlb_movdir64b(pp_addr, hcw);
-
-		os_fence_hcw(hw, pp_addr);
-
-		os_unmap_producer_port(hw, pp_addr);
-	}
-}
-
-static int dlb_domain_drain_dir_cqs(struct dlb_hw *hw,
-				    struct dlb_domain *domain,
-				    bool toggle_port)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_dir_pq_pair *port;
-
-	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
-		/* Can't drain a port if it's not configured, and there's
-		 * nothing to drain if its queue is unconfigured.
-		 */
-		if (!port->port_configured || !port->queue_configured)
-			continue;
-
-		if (toggle_port)
-			dlb_dir_port_cq_disable(hw, port);
-
-		dlb_drain_dir_cq(hw, port);
-
-		if (toggle_port)
-			dlb_dir_port_cq_enable(hw, port);
-	}
-
-	return 0;
-}
-
-static int dlb_domain_drain_dir_queues(struct dlb_hw *hw,
-				       struct dlb_domain *domain)
-{
-	int i;
-
-	/* If the domain hasn't been started, there's no traffic to drain */
-	if (!domain->started)
-		return 0;
-
-	for (i = 0; i < DLB_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
-		dlb_domain_drain_dir_cqs(hw, domain, true);
-
-		if (dlb_domain_dir_queues_empty(hw, domain))
-			break;
-	}
-
-	if (i == DLB_MAX_QID_EMPTY_CHECK_LOOPS) {
-		DLB_HW_ERR(hw,
-			   "[%s()] Internal error: failed to empty queues\n",
-			   __func__);
-		return -EFAULT;
-	}
-
-	/* Drain the CQs one more time. For the queues to go empty, they would
-	 * have scheduled one or more QEs.
-	 */
-	dlb_domain_drain_dir_cqs(hw, domain, true);
-
-	return 0;
-}
-
-static void dlb_domain_disable_dir_producer_ports(struct dlb_hw *hw,
-						  struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_dir_pq_pair *port;
-	union dlb_sys_dir_pp_v r1;
-
-	r1.field.pp_v = 0;
-
-	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
-		DLB_CSR_WR(hw,
-			   DLB_SYS_DIR_PP_V(port->id),
-			   r1.val);
-}
-
-static void dlb_domain_disable_ldb_producer_ports(struct dlb_hw *hw,
-						  struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	union dlb_sys_ldb_pp_v r1;
-	struct dlb_ldb_port *port;
-
-	r1.field.pp_v = 0;
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
-		DLB_CSR_WR(hw,
-			   DLB_SYS_LDB_PP_V(port->id),
-			   r1.val);
-
-		hw->pf.num_enabled_ldb_ports--;
-	}
-}
-
-static void dlb_domain_disable_dir_pools(struct dlb_hw *hw,
-					 struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	union dlb_sys_dir_pool_enbld r0 = { {0} };
-	struct dlb_credit_pool *pool;
-
-	DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter)
-		DLB_CSR_WR(hw,
-			   DLB_SYS_DIR_POOL_ENBLD(pool->id),
-			   r0.val);
-}
-
-static void dlb_domain_disable_ldb_pools(struct dlb_hw *hw,
-					 struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	union dlb_sys_ldb_pool_enbld r0 = { {0} };
-	struct dlb_credit_pool *pool;
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter)
-		DLB_CSR_WR(hw,
-			   DLB_SYS_LDB_POOL_ENBLD(pool->id),
-			   r0.val);
-}
-
-static int dlb_reset_hw_resource(struct dlb_hw *hw, int type, int id)
-{
-	union dlb_cfg_mstr_diag_reset_sts r0 = { {0} };
-	union dlb_cfg_mstr_bcast_reset_vf_start r1 = { {0} };
-	int i;
-
-	r1.field.vf_reset_start = 1;
-
-	r1.field.vf_reset_type = type;
-	r1.field.vf_reset_id = id;
-
-	DLB_CSR_WR(hw, DLB_CFG_MSTR_BCAST_RESET_VF_START, r1.val);
-
-	/* Wait for hardware to complete. This is a finite time operation,
-	 * but wait set a loop bound just in case.
-	 */
-	for (i = 0; i < 1024 * 1024; i++) {
-		r0.val = DLB_CSR_RD(hw, DLB_CFG_MSTR_DIAG_RESET_STS);
-
-		if (r0.field.chp_vf_reset_done &&
-		    r0.field.rop_vf_reset_done &&
-		    r0.field.lsp_vf_reset_done &&
-		    r0.field.nalb_vf_reset_done &&
-		    r0.field.ap_vf_reset_done &&
-		    r0.field.dp_vf_reset_done &&
-		    r0.field.qed_vf_reset_done &&
-		    r0.field.dqed_vf_reset_done &&
-		    r0.field.aqed_vf_reset_done)
-			return 0;
-
-		os_udelay(1);
-	}
-
-	return -ETIMEDOUT;
-}
-
-static int dlb_domain_reset_hw_resources(struct dlb_hw *hw,
-					 struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_dir_pq_pair *dir_port;
-	struct dlb_ldb_queue *ldb_queue;
-	struct dlb_ldb_port *ldb_port;
-	struct dlb_credit_pool *pool;
-	int ret;
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter) {
-		ret = dlb_reset_hw_resource(hw,
-					    VF_RST_TYPE_POOL_LDB,
-					    pool->id);
-		if (ret)
-			return ret;
-	}
-
-	DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) {
-		ret = dlb_reset_hw_resource(hw,
-					    VF_RST_TYPE_POOL_DIR,
-					    pool->id);
-		if (ret)
-			return ret;
-	}
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {
-		ret = dlb_reset_hw_resource(hw,
-					    VF_RST_TYPE_QID_LDB,
-					    ldb_queue->id);
-		if (ret)
-			return ret;
-	}
-
-	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
-		ret = dlb_reset_hw_resource(hw,
-					    VF_RST_TYPE_QID_DIR,
-					    dir_port->id);
-		if (ret)
-			return ret;
-	}
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_ports, ldb_port, iter) {
-		ret = dlb_reset_hw_resource(hw,
-					    VF_RST_TYPE_CQ_LDB,
-					    ldb_port->id);
-		if (ret)
-			return ret;
-	}
-
-	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
-		ret = dlb_reset_hw_resource(hw,
-					    VF_RST_TYPE_CQ_DIR,
-					    dir_port->id);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static int dlb_domain_verify_reset_success(struct dlb_hw *hw,
-					   struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_dir_pq_pair *dir_port;
-	struct dlb_ldb_port *ldb_port;
-	struct dlb_credit_pool *pool;
-	struct dlb_ldb_queue *queue;
-
-	/* Confirm that all credits are returned to the domain's credit pools */
-	DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) {
-		union dlb_chp_dqed_fl_pop_ptr r0;
-		union dlb_chp_dqed_fl_push_ptr r1;
-
-		r0.val = DLB_CSR_RD(hw,
-				    DLB_CHP_DQED_FL_POP_PTR(pool->id));
-
-		r1.val = DLB_CSR_RD(hw,
-				    DLB_CHP_DQED_FL_PUSH_PTR(pool->id));
-
-		if (r0.field.pop_ptr != r1.field.push_ptr ||
-		    r0.field.generation == r1.field.generation) {
-			DLB_HW_ERR(hw,
-				   "[%s()] Internal error: failed to refill directed pool %d's credits.\n",
-				   __func__, pool->id);
-			return -EFAULT;
-		}
-	}
-
-	/* Confirm that all the domain's queue's inflight counts and AQED
-	 * active counts are 0.
-	 */
-	DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
-		if (!dlb_ldb_queue_is_empty(hw, queue)) {
-			DLB_HW_ERR(hw,
-				   "[%s()] Internal error: failed to empty ldb queue %d\n",
-				   __func__, queue->id);
-			return -EFAULT;
-		}
-	}
-
-	/* Confirm that all the domain's CQs inflight and token counts are 0. */
-	DLB_DOM_LIST_FOR(domain->used_ldb_ports, ldb_port, iter) {
-		if (dlb_ldb_cq_inflight_count(hw, ldb_port) ||
-		    dlb_ldb_cq_token_count(hw, ldb_port)) {
-			DLB_HW_ERR(hw,
-				   "[%s()] Internal error: failed to empty ldb port %d\n",
-				   __func__, ldb_port->id);
-			return -EFAULT;
-		}
-	}
-
-	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
-		if (!dlb_dir_queue_is_empty(hw, dir_port)) {
-			DLB_HW_ERR(hw,
-				   "[%s()] Internal error: failed to empty dir queue %d\n",
-				   __func__, dir_port->id);
-			return -EFAULT;
-		}
-
-		if (dlb_dir_cq_token_count(hw, dir_port)) {
-			DLB_HW_ERR(hw,
-				   "[%s()] Internal error: failed to empty dir port %d\n",
-				   __func__, dir_port->id);
-			return -EFAULT;
-		}
-	}
-
-	return 0;
-}
-
-static void __dlb_domain_reset_ldb_port_registers(struct dlb_hw *hw,
-						  struct dlb_ldb_port *port)
-{
-	union dlb_chp_ldb_pp_state_reset r0 = { {0} };
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_LDB_PP_CRD_REQ_STATE(port->id),
-		   DLB_CHP_LDB_PP_CRD_REQ_STATE_RST);
-
-	/* Reset the port's load-balanced and directed credit state */
-	r0.field.dir_type = 0;
-	r0.field.reset_pp_state = 1;
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_LDB_PP_STATE_RESET(port->id),
-		   r0.val);
-
-	r0.field.dir_type = 1;
-	r0.field.reset_pp_state = 1;
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_LDB_PP_STATE_RESET(port->id),
-		   r0.val);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_LDB_PP_DIR_PUSH_PTR(port->id),
-		   DLB_CHP_LDB_PP_DIR_PUSH_PTR_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_LDB_PP_LDB_PUSH_PTR(port->id),
-		   DLB_CHP_LDB_PP_LDB_PUSH_PTR_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_LDB_PP_LDB_MIN_CRD_QNT(port->id),
-		   DLB_CHP_LDB_PP_LDB_MIN_CRD_QNT_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_LDB_PP_LDB_CRD_LWM(port->id),
-		   DLB_CHP_LDB_PP_LDB_CRD_LWM_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_LDB_PP_LDB_CRD_HWM(port->id),
-		   DLB_CHP_LDB_PP_LDB_CRD_HWM_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_LDB_LDB_PP2POOL(port->id),
-		   DLB_CHP_LDB_LDB_PP2POOL_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_LDB_PP_DIR_MIN_CRD_QNT(port->id),
-		   DLB_CHP_LDB_PP_DIR_MIN_CRD_QNT_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_LDB_PP_DIR_CRD_LWM(port->id),
-		   DLB_CHP_LDB_PP_DIR_CRD_LWM_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_LDB_PP_DIR_CRD_HWM(port->id),
-		   DLB_CHP_LDB_PP_DIR_CRD_HWM_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_LDB_DIR_PP2POOL(port->id),
-		   DLB_CHP_LDB_DIR_PP2POOL_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_LDB_PP2LDBPOOL(port->id),
-		   DLB_SYS_LDB_PP2LDBPOOL_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_LDB_PP2DIRPOOL(port->id),
-		   DLB_SYS_LDB_PP2DIRPOOL_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_HIST_LIST_LIM(port->id),
-		   DLB_CHP_HIST_LIST_LIM_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_HIST_LIST_BASE(port->id),
-		   DLB_CHP_HIST_LIST_BASE_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_HIST_LIST_POP_PTR(port->id),
-		   DLB_CHP_HIST_LIST_POP_PTR_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_HIST_LIST_PUSH_PTR(port->id),
-		   DLB_CHP_HIST_LIST_PUSH_PTR_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_LDB_CQ_WPTR(port->id),
-		   DLB_CHP_LDB_CQ_WPTR_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_LDB_CQ_INT_DEPTH_THRSH(port->id),
-		   DLB_CHP_LDB_CQ_INT_DEPTH_THRSH_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_LDB_CQ_TMR_THRESHOLD(port->id),
-		   DLB_CHP_LDB_CQ_TMR_THRESHOLD_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_LDB_CQ_INT_ENB(port->id),
-		   DLB_CHP_LDB_CQ_INT_ENB_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_LSP_CQ_LDB_INFL_LIM(port->id),
-		   DLB_LSP_CQ_LDB_INFL_LIM_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_LSP_CQ2PRIOV(port->id),
-		   DLB_LSP_CQ2PRIOV_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_LSP_CQ_LDB_TOT_SCH_CNT_CTRL(port->id),
-		   DLB_LSP_CQ_LDB_TOT_SCH_CNT_CTRL_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id),
-		   DLB_LSP_CQ_LDB_TKN_DEPTH_SEL_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id),
-		   DLB_CHP_LDB_CQ_TKN_DEPTH_SEL_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_LSP_CQ_LDB_DSBL(port->id),
-		   DLB_LSP_CQ_LDB_DSBL_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_LDB_CQ2VF_PF(port->id),
-		   DLB_SYS_LDB_CQ2VF_PF_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_LDB_PP2VF_PF(port->id),
-		   DLB_SYS_LDB_PP2VF_PF_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_LDB_CQ_ADDR_L(port->id),
-		   DLB_SYS_LDB_CQ_ADDR_L_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_LDB_CQ_ADDR_U(port->id),
-		   DLB_SYS_LDB_CQ_ADDR_U_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_LDB_PP_ADDR_L(port->id),
-		   DLB_SYS_LDB_PP_ADDR_L_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_LDB_PP_ADDR_U(port->id),
-		   DLB_SYS_LDB_PP_ADDR_U_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_LDB_PP_V(port->id),
-		   DLB_SYS_LDB_PP_V_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_LDB_PP2VAS(port->id),
-		   DLB_SYS_LDB_PP2VAS_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_LDB_CQ_ISR(port->id),
-		   DLB_SYS_LDB_CQ_ISR_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_WBUF_LDB_FLAGS(port->id),
-		   DLB_SYS_WBUF_LDB_FLAGS_RST);
-}
-
-static void __dlb_domain_reset_dir_port_registers(struct dlb_hw *hw,
-						  struct dlb_dir_pq_pair *port)
-{
-	union dlb_chp_dir_pp_state_reset r0 = { {0} };
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_PP_CRD_REQ_STATE(port->id),
-		   DLB_CHP_DIR_PP_CRD_REQ_STATE_RST);
-
-	/* Reset the port's load-balanced and directed credit state */
-	r0.field.dir_type = 0;
-	r0.field.reset_pp_state = 1;
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_PP_STATE_RESET(port->id),
-		   r0.val);
-
-	r0.field.dir_type = 1;
-	r0.field.reset_pp_state = 1;
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_PP_STATE_RESET(port->id),
-		   r0.val);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_PP_DIR_PUSH_PTR(port->id),
-		   DLB_CHP_DIR_PP_DIR_PUSH_PTR_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_PP_LDB_PUSH_PTR(port->id),
-		   DLB_CHP_DIR_PP_LDB_PUSH_PTR_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_PP_LDB_MIN_CRD_QNT(port->id),
-		   DLB_CHP_DIR_PP_LDB_MIN_CRD_QNT_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_PP_LDB_CRD_LWM(port->id),
-		   DLB_CHP_DIR_PP_LDB_CRD_LWM_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_PP_LDB_CRD_HWM(port->id),
-		   DLB_CHP_DIR_PP_LDB_CRD_HWM_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_LDB_PP2POOL(port->id),
-		   DLB_CHP_DIR_LDB_PP2POOL_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_PP_DIR_MIN_CRD_QNT(port->id),
-		   DLB_CHP_DIR_PP_DIR_MIN_CRD_QNT_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_PP_DIR_CRD_LWM(port->id),
-		   DLB_CHP_DIR_PP_DIR_CRD_LWM_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_PP_DIR_CRD_HWM(port->id),
-		   DLB_CHP_DIR_PP_DIR_CRD_HWM_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_DIR_PP2POOL(port->id),
-		   DLB_CHP_DIR_DIR_PP2POOL_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_DIR_PP2LDBPOOL(port->id),
-		   DLB_SYS_DIR_PP2LDBPOOL_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_DIR_PP2DIRPOOL(port->id),
-		   DLB_SYS_DIR_PP2DIRPOOL_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_CQ_WPTR(port->id),
-		   DLB_CHP_DIR_CQ_WPTR_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id),
-		   DLB_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id),
-		   DLB_CHP_DIR_CQ_TKN_DEPTH_SEL_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_LSP_CQ_DIR_DSBL(port->id),
-		   DLB_LSP_CQ_DIR_DSBL_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_CQ_WPTR(port->id),
-		   DLB_CHP_DIR_CQ_WPTR_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_CQ_INT_DEPTH_THRSH(port->id),
-		   DLB_CHP_DIR_CQ_INT_DEPTH_THRSH_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_CQ_TMR_THRESHOLD(port->id),
-		   DLB_CHP_DIR_CQ_TMR_THRESHOLD_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_CQ_INT_ENB(port->id),
-		   DLB_CHP_DIR_CQ_INT_ENB_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_DIR_CQ2VF_PF(port->id),
-		   DLB_SYS_DIR_CQ2VF_PF_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_DIR_PP2VF_PF(port->id),
-		   DLB_SYS_DIR_PP2VF_PF_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_DIR_CQ_ADDR_L(port->id),
-		   DLB_SYS_DIR_CQ_ADDR_L_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_DIR_CQ_ADDR_U(port->id),
-		   DLB_SYS_DIR_CQ_ADDR_U_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_DIR_PP_ADDR_L(port->id),
-		   DLB_SYS_DIR_PP_ADDR_L_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_DIR_PP_ADDR_U(port->id),
-		   DLB_SYS_DIR_PP_ADDR_U_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_DIR_PP_V(port->id),
-		   DLB_SYS_DIR_PP_V_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_DIR_PP2VAS(port->id),
-		   DLB_SYS_DIR_PP2VAS_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_DIR_CQ_ISR(port->id),
-		   DLB_SYS_DIR_CQ_ISR_RST);
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_WBUF_DIR_FLAGS(port->id),
-		   DLB_SYS_WBUF_DIR_FLAGS_RST);
-}
-
-static void dlb_domain_reset_dir_port_registers(struct dlb_hw *hw,
-						struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_dir_pq_pair *port;
-
-	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
-		__dlb_domain_reset_dir_port_registers(hw, port);
-}
-
-static void dlb_domain_reset_ldb_queue_registers(struct dlb_hw *hw,
-						 struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_ldb_queue *queue;
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
-		DLB_CSR_WR(hw,
-			   DLB_AQED_PIPE_FL_LIM(queue->id),
-			   DLB_AQED_PIPE_FL_LIM_RST);
-
-		DLB_CSR_WR(hw,
-			   DLB_AQED_PIPE_FL_BASE(queue->id),
-			   DLB_AQED_PIPE_FL_BASE_RST);
-
-		DLB_CSR_WR(hw,
-			   DLB_AQED_PIPE_FL_POP_PTR(queue->id),
-			   DLB_AQED_PIPE_FL_POP_PTR_RST);
-
-		DLB_CSR_WR(hw,
-			   DLB_AQED_PIPE_FL_PUSH_PTR(queue->id),
-			   DLB_AQED_PIPE_FL_PUSH_PTR_RST);
-
-		DLB_CSR_WR(hw,
-			   DLB_AQED_PIPE_QID_FID_LIM(queue->id),
-			   DLB_AQED_PIPE_QID_FID_LIM_RST);
-
-		DLB_CSR_WR(hw,
-			   DLB_LSP_QID_AQED_ACTIVE_LIM(queue->id),
-			   DLB_LSP_QID_AQED_ACTIVE_LIM_RST);
-
-		DLB_CSR_WR(hw,
-			   DLB_LSP_QID_LDB_INFL_LIM(queue->id),
-			   DLB_LSP_QID_LDB_INFL_LIM_RST);
-
-		DLB_CSR_WR(hw,
-			   DLB_SYS_LDB_QID_V(queue->id),
-			   DLB_SYS_LDB_QID_V_RST);
-
-		DLB_CSR_WR(hw,
-			   DLB_SYS_LDB_QID_V(queue->id),
-			   DLB_SYS_LDB_QID_V_RST);
-
-		DLB_CSR_WR(hw,
-			   DLB_CHP_ORD_QID_SN(queue->id),
-			   DLB_CHP_ORD_QID_SN_RST);
-
-		DLB_CSR_WR(hw,
-			   DLB_CHP_ORD_QID_SN_MAP(queue->id),
-			   DLB_CHP_ORD_QID_SN_MAP_RST);
-
-		DLB_CSR_WR(hw,
-			   DLB_RO_PIPE_QID2GRPSLT(queue->id),
-			   DLB_RO_PIPE_QID2GRPSLT_RST);
-	}
-}
-
-static void dlb_domain_reset_dir_queue_registers(struct dlb_hw *hw,
-						 struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_dir_pq_pair *queue;
-
-	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
-		DLB_CSR_WR(hw,
-			   DLB_SYS_DIR_QID_V(queue->id),
-			   DLB_SYS_DIR_QID_V_RST);
-	}
-}
-
-static void dlb_domain_reset_ldb_pool_registers(struct dlb_hw *hw,
-						struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_credit_pool *pool;
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter) {
-		DLB_CSR_WR(hw,
-			   DLB_CHP_LDB_POOL_CRD_LIM(pool->id),
-			   DLB_CHP_LDB_POOL_CRD_LIM_RST);
-
-		DLB_CSR_WR(hw,
-			   DLB_CHP_LDB_POOL_CRD_CNT(pool->id),
-			   DLB_CHP_LDB_POOL_CRD_CNT_RST);
-
-		DLB_CSR_WR(hw,
-			   DLB_CHP_QED_FL_BASE(pool->id),
-			   DLB_CHP_QED_FL_BASE_RST);
-
-		DLB_CSR_WR(hw,
-			   DLB_CHP_QED_FL_LIM(pool->id),
-			   DLB_CHP_QED_FL_LIM_RST);
-
-		DLB_CSR_WR(hw,
-			   DLB_CHP_QED_FL_PUSH_PTR(pool->id),
-			   DLB_CHP_QED_FL_PUSH_PTR_RST);
-
-		DLB_CSR_WR(hw,
-			   DLB_CHP_QED_FL_POP_PTR(pool->id),
-			   DLB_CHP_QED_FL_POP_PTR_RST);
-	}
-}
-
-static void dlb_domain_reset_dir_pool_registers(struct dlb_hw *hw,
-						struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_credit_pool *pool;
-
-	DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) {
-		DLB_CSR_WR(hw,
-			   DLB_CHP_DIR_POOL_CRD_LIM(pool->id),
-			   DLB_CHP_DIR_POOL_CRD_LIM_RST);
-
-		DLB_CSR_WR(hw,
-			   DLB_CHP_DIR_POOL_CRD_CNT(pool->id),
-			   DLB_CHP_DIR_POOL_CRD_CNT_RST);
-
-		DLB_CSR_WR(hw,
-			   DLB_CHP_DQED_FL_BASE(pool->id),
-			   DLB_CHP_DQED_FL_BASE_RST);
-
-		DLB_CSR_WR(hw,
-			   DLB_CHP_DQED_FL_LIM(pool->id),
-			   DLB_CHP_DQED_FL_LIM_RST);
-
-		DLB_CSR_WR(hw,
-			   DLB_CHP_DQED_FL_PUSH_PTR(pool->id),
-			   DLB_CHP_DQED_FL_PUSH_PTR_RST);
-
-		DLB_CSR_WR(hw,
-			   DLB_CHP_DQED_FL_POP_PTR(pool->id),
-			   DLB_CHP_DQED_FL_POP_PTR_RST);
-	}
-}
-
-static void dlb_domain_reset_ldb_port_registers(struct dlb_hw *hw,
-						struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	RTE_SET_USED(iter);
-	struct dlb_ldb_port *port;
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
-		__dlb_domain_reset_ldb_port_registers(hw, port);
-}
-
-static void dlb_domain_reset_registers(struct dlb_hw *hw,
-				       struct dlb_domain *domain)
-{
-	dlb_domain_reset_ldb_port_registers(hw, domain);
-
-	dlb_domain_reset_dir_port_registers(hw, domain);
-
-	dlb_domain_reset_ldb_queue_registers(hw, domain);
-
-	dlb_domain_reset_dir_queue_registers(hw, domain);
-
-	dlb_domain_reset_ldb_pool_registers(hw, domain);
-
-	dlb_domain_reset_dir_pool_registers(hw, domain);
-}
-
-static int dlb_domain_reset_software_state(struct dlb_hw *hw,
-					   struct dlb_domain *domain)
-{
-	struct dlb_ldb_queue *tmp_ldb_queue;
-	RTE_SET_USED(tmp_ldb_queue);
-	struct dlb_dir_pq_pair *tmp_dir_port;
-	RTE_SET_USED(tmp_dir_port);
-	struct dlb_ldb_port *tmp_ldb_port;
-	RTE_SET_USED(tmp_ldb_port);
-	struct dlb_credit_pool *tmp_pool;
-	RTE_SET_USED(tmp_pool);
-	struct dlb_list_entry *iter1;
-	RTE_SET_USED(iter1);
-	struct dlb_list_entry *iter2;
-	RTE_SET_USED(iter2);
-	struct dlb_ldb_queue *ldb_queue;
-	struct dlb_dir_pq_pair *dir_port;
-	struct dlb_ldb_port *ldb_port;
-	struct dlb_credit_pool *pool;
-
-	struct dlb_function_resources *rsrcs;
-	struct dlb_list_head *list;
-	int ret;
-
-	rsrcs = domain->parent_func;
-
-	/* Move the domain's ldb queues to the function's avail list */
-	list = &domain->used_ldb_queues;
-	DLB_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
-		if (ldb_queue->sn_cfg_valid) {
-			struct dlb_sn_group *grp;
-
-			grp = &hw->rsrcs.sn_groups[ldb_queue->sn_group];
-
-			dlb_sn_group_free_slot(grp, ldb_queue->sn_slot);
-			ldb_queue->sn_cfg_valid = false;
-		}
-
-		ldb_queue->owned = false;
-		ldb_queue->num_mappings = 0;
-		ldb_queue->num_pending_additions = 0;
-
-		dlb_list_del(&domain->used_ldb_queues, &ldb_queue->domain_list);
-		dlb_list_add(&rsrcs->avail_ldb_queues, &ldb_queue->func_list);
-		rsrcs->num_avail_ldb_queues++;
-	}
-
-	list = &domain->avail_ldb_queues;
-	DLB_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
-		ldb_queue->owned = false;
-
-		dlb_list_del(&domain->avail_ldb_queues,
-			     &ldb_queue->domain_list);
-		dlb_list_add(&rsrcs->avail_ldb_queues,
-			     &ldb_queue->func_list);
-		rsrcs->num_avail_ldb_queues++;
-	}
-
-	/* Move the domain's ldb ports to the function's avail list */
-	list = &domain->used_ldb_ports;
-	DLB_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port, iter1, iter2) {
-		int i;
-
-		ldb_port->owned = false;
-		ldb_port->configured = false;
-		ldb_port->num_pending_removals = 0;
-		ldb_port->num_mappings = 0;
-		for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++)
-			ldb_port->qid_map[i].state = DLB_QUEUE_UNMAPPED;
-
-		dlb_list_del(&domain->used_ldb_ports, &ldb_port->domain_list);
-		dlb_list_add(&rsrcs->avail_ldb_ports, &ldb_port->func_list);
-		rsrcs->num_avail_ldb_ports++;
-	}
-
-	list = &domain->avail_ldb_ports;
-	DLB_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port, iter1, iter2) {
-		ldb_port->owned = false;
-
-		dlb_list_del(&domain->avail_ldb_ports, &ldb_port->domain_list);
-		dlb_list_add(&rsrcs->avail_ldb_ports, &ldb_port->func_list);
-		rsrcs->num_avail_ldb_ports++;
-	}
-
-	/* Move the domain's dir ports to the function's avail list */
-	list = &domain->used_dir_pq_pairs;
-	DLB_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
-		dir_port->owned = false;
-		dir_port->port_configured = false;
-
-		dlb_list_del(&domain->used_dir_pq_pairs,
-			     &dir_port->domain_list);
-
-		dlb_list_add(&rsrcs->avail_dir_pq_pairs,
-			     &dir_port->func_list);
-		rsrcs->num_avail_dir_pq_pairs++;
-	}
-
-	list = &domain->avail_dir_pq_pairs;
-	DLB_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
-		dir_port->owned = false;
-
-		dlb_list_del(&domain->avail_dir_pq_pairs,
-			     &dir_port->domain_list);
-
-		dlb_list_add(&rsrcs->avail_dir_pq_pairs,
-			     &dir_port->func_list);
-		rsrcs->num_avail_dir_pq_pairs++;
-	}
-
-	/* Return hist list entries to the function */
-	ret = dlb_bitmap_set_range(rsrcs->avail_hist_list_entries,
-				   domain->hist_list_entry_base,
-				   domain->total_hist_list_entries);
-	if (ret) {
-		DLB_HW_ERR(hw,
-			   "[%s()] Internal error: domain hist list base does not match the function's bitmap.\n",
-			   __func__);
-		return -EFAULT;
-	}
-
-	domain->total_hist_list_entries = 0;
-	domain->avail_hist_list_entries = 0;
-	domain->hist_list_entry_base = 0;
-	domain->hist_list_entry_offset = 0;
-
-	/* Return QED entries to the function */
-	ret = dlb_bitmap_set_range(rsrcs->avail_qed_freelist_entries,
-				   domain->qed_freelist.base,
-				   (domain->qed_freelist.bound -
-					domain->qed_freelist.base));
-	if (ret) {
-		DLB_HW_ERR(hw,
-			   "[%s()] Internal error: domain QED base does not match the function's bitmap.\n",
-			   __func__);
-		return -EFAULT;
-	}
-
-	domain->qed_freelist.base = 0;
-	domain->qed_freelist.bound = 0;
-	domain->qed_freelist.offset = 0;
-
-	/* Return DQED entries back to the function */
-	ret = dlb_bitmap_set_range(rsrcs->avail_dqed_freelist_entries,
-				   domain->dqed_freelist.base,
-				   (domain->dqed_freelist.bound -
-					domain->dqed_freelist.base));
-	if (ret) {
-		DLB_HW_ERR(hw,
-			   "[%s()] Internal error: domain DQED base does not match the function's bitmap.\n",
-			   __func__);
-		return -EFAULT;
-	}
-
-	domain->dqed_freelist.base = 0;
-	domain->dqed_freelist.bound = 0;
-	domain->dqed_freelist.offset = 0;
-
-	/* Return AQED entries back to the function */
-	ret = dlb_bitmap_set_range(rsrcs->avail_aqed_freelist_entries,
-				   domain->aqed_freelist.base,
-				   (domain->aqed_freelist.bound -
-					domain->aqed_freelist.base));
-	if (ret) {
-		DLB_HW_ERR(hw,
-			   "[%s()] Internal error: domain AQED base does not match the function's bitmap.\n",
-			   __func__);
-		return -EFAULT;
-	}
-
-	domain->aqed_freelist.base = 0;
-	domain->aqed_freelist.bound = 0;
-	domain->aqed_freelist.offset = 0;
-
-	/* Return ldb credit pools back to the function's avail list */
-	list = &domain->used_ldb_credit_pools;
-	DLB_DOM_LIST_FOR_SAFE(*list, pool, tmp_pool, iter1, iter2) {
-		pool->owned = false;
-		pool->configured = false;
-
-		dlb_list_del(&domain->used_ldb_credit_pools,
-			     &pool->domain_list);
-		dlb_list_add(&rsrcs->avail_ldb_credit_pools,
-			     &pool->func_list);
-		rsrcs->num_avail_ldb_credit_pools++;
-	}
-
-	list = &domain->avail_ldb_credit_pools;
-	DLB_DOM_LIST_FOR_SAFE(*list, pool, tmp_pool, iter1, iter2) {
-		pool->owned = false;
-
-		dlb_list_del(&domain->avail_ldb_credit_pools,
-			     &pool->domain_list);
-		dlb_list_add(&rsrcs->avail_ldb_credit_pools,
-			     &pool->func_list);
-		rsrcs->num_avail_ldb_credit_pools++;
-	}
-
-	/* Move dir credit pools back to the function */
-	list = &domain->used_dir_credit_pools;
-	DLB_DOM_LIST_FOR_SAFE(*list, pool, tmp_pool, iter1, iter2) {
-		pool->owned = false;
-		pool->configured = false;
-
-		dlb_list_del(&domain->used_dir_credit_pools,
-			     &pool->domain_list);
-		dlb_list_add(&rsrcs->avail_dir_credit_pools,
-			     &pool->func_list);
-		rsrcs->num_avail_dir_credit_pools++;
-	}
-
-	list = &domain->avail_dir_credit_pools;
-	DLB_DOM_LIST_FOR_SAFE(*list, pool, tmp_pool, iter1, iter2) {
-		pool->owned = false;
-
-		dlb_list_del(&domain->avail_dir_credit_pools,
-			     &pool->domain_list);
-		dlb_list_add(&rsrcs->avail_dir_credit_pools,
-			     &pool->func_list);
-		rsrcs->num_avail_dir_credit_pools++;
-	}
-
-	domain->num_pending_removals = 0;
-	domain->num_pending_additions = 0;
-	domain->configured = false;
-	domain->started = false;
-
-	/* Move the domain out of the used_domains list and back to the
-	 * function's avail_domains list.
-	 */
-	dlb_list_del(&rsrcs->used_domains, &domain->func_list);
-	dlb_list_add(&rsrcs->avail_domains, &domain->func_list);
-	rsrcs->num_avail_domains++;
-
-	return 0;
-}
-
-static void dlb_log_reset_domain(struct dlb_hw *hw, u32 domain_id)
-{
-	DLB_HW_INFO(hw, "DLB reset domain:\n");
-	DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
-}
-
-/**
- * dlb_reset_domain() - Reset a DLB scheduling domain and its associated
- *	hardware resources.
- * @hw:	  Contains the current state of the DLB hardware.
- * @args: User-provided arguments.
- * @resp: Response to user.
- *
- * Note: User software *must* stop sending to this domain's producer ports
- * before invoking this function, otherwise undefined behavior will result.
- *
- * Return: returns < 0 on error, 0 otherwise.
- */
-int dlb_reset_domain(struct dlb_hw *hw, u32 domain_id)
-{
-	struct dlb_domain *domain;
-	int ret;
-
-	dlb_log_reset_domain(hw, domain_id);
-
-	domain = dlb_get_domain_from_id(hw, domain_id);
-
-	if (domain  == NULL || !domain->configured)
-		return -EINVAL;
-
-	/* For each queue owned by this domain, disable its write permissions to
-	 * cause any traffic sent to it to be dropped. Well-behaved software
-	 * should not be sending QEs at this point.
-	 */
-	dlb_domain_disable_dir_queue_write_perms(hw, domain);
-
-	dlb_domain_disable_ldb_queue_write_perms(hw, domain);
-
-	/* Disable credit updates and turn off completion tracking on all the
-	 * domain's PPs.
-	 */
-	dlb_domain_disable_dir_port_crd_updates(hw, domain);
-
-	dlb_domain_disable_ldb_port_crd_updates(hw, domain);
-
-	dlb_domain_disable_dir_port_interrupts(hw, domain);
-
-	dlb_domain_disable_ldb_port_interrupts(hw, domain);
-
-	dlb_domain_disable_ldb_seq_checks(hw, domain);
-
-	/* Disable the LDB CQs and drain them in order to complete the map and
-	 * unmap procedures, which require zero CQ inflights and zero QID
-	 * inflights respectively.
-	 */
-	dlb_domain_disable_ldb_cqs(hw, domain);
-
-	ret = dlb_domain_drain_ldb_cqs(hw, domain, false);
-	if (ret < 0)
-		return ret;
-
-	ret = dlb_domain_wait_for_ldb_cqs_to_empty(hw, domain);
-	if (ret < 0)
-		return ret;
-
-	ret = dlb_domain_finish_unmap_qid_procedures(hw, domain);
-	if (ret < 0)
-		return ret;
-
-	ret = dlb_domain_finish_map_qid_procedures(hw, domain);
-	if (ret < 0)
-		return ret;
-
-	/* Re-enable the CQs in order to drain the mapped queues. */
-	dlb_domain_enable_ldb_cqs(hw, domain);
-
-	ret = dlb_domain_drain_mapped_queues(hw, domain);
-	if (ret < 0)
-		return ret;
-
-	ret = dlb_domain_drain_unmapped_queues(hw, domain);
-	if (ret < 0)
-		return ret;
-
-	ret = dlb_domain_wait_for_ldb_pool_refill(hw, domain);
-	if (ret) {
-		DLB_HW_ERR(hw,
-			   "[%s()] Internal error: LDB credits failed to refill\n",
-			   __func__);
-		return ret;
-	}
-
-	/* Done draining LDB QEs, so disable the CQs. */
-	dlb_domain_disable_ldb_cqs(hw, domain);
-
-	/* Directed queues are reset in dlb_domain_reset_hw_resources(), but
-	 * that process does not decrement the directed queue size counters used
-	 * by SMON for its average DQED depth measurement. So, we manually drain
-	 * the directed queues here.
-	 */
-	dlb_domain_drain_dir_queues(hw, domain);
-
-	ret = dlb_domain_wait_for_dir_pool_refill(hw, domain);
-	if (ret) {
-		DLB_HW_ERR(hw,
-			   "[%s()] Internal error: DIR credits failed to refill\n",
-			   __func__);
-		return ret;
-	}
-
-	/* Done draining DIR QEs, so disable the CQs. */
-	dlb_domain_disable_dir_cqs(hw, domain);
-
-	dlb_domain_disable_dir_producer_ports(hw, domain);
-
-	dlb_domain_disable_ldb_producer_ports(hw, domain);
-
-	dlb_domain_disable_dir_pools(hw, domain);
-
-	dlb_domain_disable_ldb_pools(hw, domain);
-
-	/* Reset the QID, credit pool, and CQ hardware.
-	 *
-	 * Note: DLB 1.0 A0 h/w does not disarm CQ interrupts during sched
-	 * domain reset.
-	 * A spurious interrupt can occur on subsequent use of a reset CQ.
-	 */
-	ret = dlb_domain_reset_hw_resources(hw, domain);
-	if (ret)
-		return ret;
-
-	ret = dlb_domain_verify_reset_success(hw, domain);
-	if (ret)
-		return ret;
-
-	dlb_domain_reset_registers(hw, domain);
-
-	/* Hardware reset complete. Reset the domain's software state */
-	ret = dlb_domain_reset_software_state(hw, domain);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-void dlb_hw_get_num_resources(struct dlb_hw *hw,
-			      struct dlb_get_num_resources_args *arg)
-{
-	struct dlb_function_resources *rsrcs;
-	struct dlb_bitmap *map;
-
-	rsrcs = &hw->pf;
-
-	arg->num_sched_domains = rsrcs->num_avail_domains;
-
-	arg->num_ldb_queues = rsrcs->num_avail_ldb_queues;
-
-	arg->num_ldb_ports = rsrcs->num_avail_ldb_ports;
-
-	arg->num_dir_ports = rsrcs->num_avail_dir_pq_pairs;
-
-	map = rsrcs->avail_aqed_freelist_entries;
-
-	arg->num_atomic_inflights = dlb_bitmap_count(map);
-
-	arg->max_contiguous_atomic_inflights =
-		dlb_bitmap_longest_set_range(map);
-
-	map = rsrcs->avail_hist_list_entries;
-
-	arg->num_hist_list_entries = dlb_bitmap_count(map);
-
-	arg->max_contiguous_hist_list_entries =
-		dlb_bitmap_longest_set_range(map);
-
-	map = rsrcs->avail_qed_freelist_entries;
-
-	arg->num_ldb_credits = dlb_bitmap_count(map);
-
-	arg->max_contiguous_ldb_credits = dlb_bitmap_longest_set_range(map);
-
-	map = rsrcs->avail_dqed_freelist_entries;
-
-	arg->num_dir_credits = dlb_bitmap_count(map);
-
-	arg->max_contiguous_dir_credits = dlb_bitmap_longest_set_range(map);
-
-	arg->num_ldb_credit_pools = rsrcs->num_avail_ldb_credit_pools;
-
-	arg->num_dir_credit_pools = rsrcs->num_avail_dir_credit_pools;
-}
-
-void dlb_hw_disable_vf_to_pf_isr_pend_err(struct dlb_hw *hw)
-{
-	union dlb_sys_sys_alarm_int_enable r0;
-
-	r0.val = DLB_CSR_RD(hw, DLB_SYS_SYS_ALARM_INT_ENABLE);
-
-	r0.field.vf_to_pf_isr_pend_error = 0;
-
-	DLB_CSR_WR(hw, DLB_SYS_SYS_ALARM_INT_ENABLE, r0.val);
-}
-
-static void dlb_configure_ldb_queue(struct dlb_hw *hw,
-				    struct dlb_domain *domain,
-				    struct dlb_ldb_queue *queue,
-				    struct dlb_create_ldb_queue_args *args)
-{
-	union dlb_sys_ldb_vasqid_v r0 = { {0} };
-	union dlb_lsp_qid_ldb_infl_lim r1 = { {0} };
-	union dlb_lsp_qid_aqed_active_lim r2 = { {0} };
-	union dlb_aqed_pipe_fl_lim r3 = { {0} };
-	union dlb_aqed_pipe_fl_base r4 = { {0} };
-	union dlb_chp_ord_qid_sn_map r7 = { {0} };
-	union dlb_sys_ldb_qid_cfg_v r10 = { {0} };
-	union dlb_sys_ldb_qid_v r11 = { {0} };
-	union dlb_aqed_pipe_fl_push_ptr r5 = { {0} };
-	union dlb_aqed_pipe_fl_pop_ptr r6 = { {0} };
-	union dlb_aqed_pipe_qid_fid_lim r8 = { {0} };
-	union dlb_ro_pipe_qid2grpslt r9 = { {0} };
-	struct dlb_sn_group *sn_group;
-	unsigned int offs;
-
-	/* QID write permissions are turned on when the domain is started */
-	r0.field.vasqid_v = 0;
-
-	offs = domain->id * DLB_MAX_NUM_LDB_QUEUES + queue->id;
-
-	DLB_CSR_WR(hw, DLB_SYS_LDB_VASQID_V(offs), r0.val);
-
-	/*
-	 * Unordered QIDs get 4K inflights, ordered get as many as the number
-	 * of sequence numbers.
-	 */
-	r1.field.limit = args->num_qid_inflights;
-
-	DLB_CSR_WR(hw, DLB_LSP_QID_LDB_INFL_LIM(queue->id), r1.val);
-
-	r2.field.limit = queue->aqed_freelist.bound -
-			 queue->aqed_freelist.base;
-
-	if (r2.field.limit > DLB_MAX_NUM_AQOS_ENTRIES)
-		r2.field.limit = DLB_MAX_NUM_AQOS_ENTRIES;
-
-	/* AQOS */
-	DLB_CSR_WR(hw, DLB_LSP_QID_AQED_ACTIVE_LIM(queue->id), r2.val);
-
-	r3.field.freelist_disable = 0;
-	r3.field.limit = queue->aqed_freelist.bound - 1;
-
-	DLB_CSR_WR(hw, DLB_AQED_PIPE_FL_LIM(queue->id), r3.val);
-
-	r4.field.base = queue->aqed_freelist.base;
-
-	DLB_CSR_WR(hw, DLB_AQED_PIPE_FL_BASE(queue->id), r4.val);
-
-	r5.field.push_ptr = r4.field.base;
-	r5.field.generation = 1;
-
-	DLB_CSR_WR(hw, DLB_AQED_PIPE_FL_PUSH_PTR(queue->id), r5.val);
-
-	r6.field.pop_ptr = r4.field.base;
-	r6.field.generation = 0;
-
-	DLB_CSR_WR(hw, DLB_AQED_PIPE_FL_POP_PTR(queue->id), r6.val);
-
-	/* Configure SNs */
-	sn_group = &hw->rsrcs.sn_groups[queue->sn_group];
-	r7.field.mode = sn_group->mode;
-	r7.field.slot = queue->sn_slot;
-	r7.field.grp  = sn_group->id;
-
-	DLB_CSR_WR(hw, DLB_CHP_ORD_QID_SN_MAP(queue->id), r7.val);
-
-	/*
-	 * This register limits the number of inflight flows a queue can have
-	 * at one time.  It has an upper bound of 2048, but can be
-	 * over-subscribed. 512 is chosen so that a single queue doesn't use
-	 * the entire atomic storage, but can use a substantial portion if
-	 * needed.
-	 */
-	r8.field.qid_fid_limit = 512;
-
-	DLB_CSR_WR(hw, DLB_AQED_PIPE_QID_FID_LIM(queue->id), r8.val);
-
-	r9.field.group = sn_group->id;
-	r9.field.slot = queue->sn_slot;
-
-	DLB_CSR_WR(hw, DLB_RO_PIPE_QID2GRPSLT(queue->id), r9.val);
-
-	r10.field.sn_cfg_v = (args->num_sequence_numbers != 0);
-	r10.field.fid_cfg_v = (args->num_atomic_inflights != 0);
-
-	DLB_CSR_WR(hw, DLB_SYS_LDB_QID_CFG_V(queue->id), r10.val);
-
-	r11.field.qid_v = 1;
-
-	DLB_CSR_WR(hw, DLB_SYS_LDB_QID_V(queue->id), r11.val);
-}
-
-int dlb_get_group_sequence_numbers(struct dlb_hw *hw, unsigned int group_id)
-{
-	if (group_id >= DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
-		return -EINVAL;
-
-	return hw->rsrcs.sn_groups[group_id].sequence_numbers_per_queue;
-}
-
-int dlb_get_group_sequence_number_occupancy(struct dlb_hw *hw,
-					    unsigned int group_id)
-{
-	if (group_id >= DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
-		return -EINVAL;
-
-	return dlb_sn_group_used_slots(&hw->rsrcs.sn_groups[group_id]);
-}
-
-static void dlb_log_set_group_sequence_numbers(struct dlb_hw *hw,
-					       unsigned int group_id,
-					       unsigned long val)
-{
-	DLB_HW_INFO(hw, "DLB set group sequence numbers:\n");
-	DLB_HW_INFO(hw, "\tGroup ID: %u\n", group_id);
-	DLB_HW_INFO(hw, "\tValue:    %lu\n", val);
-}
-
-int dlb_set_group_sequence_numbers(struct dlb_hw *hw,
-				   unsigned int group_id,
-				   unsigned long val)
-{
-	u32 valid_allocations[6] = {32, 64, 128, 256, 512, 1024};
-	union dlb_ro_pipe_grp_sn_mode r0 = { {0} };
-	struct dlb_sn_group *group;
-	int mode;
-
-	if (group_id >= DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
-		return -EINVAL;
-
-	group = &hw->rsrcs.sn_groups[group_id];
-
-	/* Once the first load-balanced queue using an SN group is configured,
-	 * the group cannot be changed.
-	 */
-	if (group->slot_use_bitmap != 0)
-		return -EPERM;
-
-	for (mode = 0; mode < DLB_MAX_NUM_SEQUENCE_NUMBER_MODES; mode++)
-		if (val == valid_allocations[mode])
-			break;
-
-	if (mode == DLB_MAX_NUM_SEQUENCE_NUMBER_MODES)
-		return -EINVAL;
-
-	group->mode = mode;
-	group->sequence_numbers_per_queue = val;
-
-	r0.field.sn_mode_0 = hw->rsrcs.sn_groups[0].mode;
-	r0.field.sn_mode_1 = hw->rsrcs.sn_groups[1].mode;
-	r0.field.sn_mode_2 = hw->rsrcs.sn_groups[2].mode;
-	r0.field.sn_mode_3 = hw->rsrcs.sn_groups[3].mode;
-
-	DLB_CSR_WR(hw, DLB_RO_PIPE_GRP_SN_MODE, r0.val);
-
-	dlb_log_set_group_sequence_numbers(hw, group_id, val);
-
-	return 0;
-}
-
-static int
-dlb_ldb_queue_attach_to_sn_group(struct dlb_hw *hw,
-				 struct dlb_ldb_queue *queue,
-				 struct dlb_create_ldb_queue_args *args)
-{
-	int slot = -1;
-	int i;
-
-	queue->sn_cfg_valid = false;
-
-	if (args->num_sequence_numbers == 0)
-		return 0;
-
-	for (i = 0; i < DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
-		struct dlb_sn_group *group = &hw->rsrcs.sn_groups[i];
-
-		if (group->sequence_numbers_per_queue ==
-		    args->num_sequence_numbers &&
-		    !dlb_sn_group_full(group)) {
-			slot = dlb_sn_group_alloc_slot(group);
-			if (slot >= 0)
-				break;
-		}
-	}
-
-	if (slot == -1) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: no sequence number slots available\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	queue->sn_cfg_valid = true;
-	queue->sn_group = i;
-	queue->sn_slot = slot;
-	return 0;
-}
-
-static int
-dlb_ldb_queue_attach_resources(struct dlb_hw *hw,
-			       struct dlb_domain *domain,
-			       struct dlb_ldb_queue *queue,
-			       struct dlb_create_ldb_queue_args *args)
-{
-	int ret;
-
-	ret = dlb_ldb_queue_attach_to_sn_group(hw, queue, args);
-	if (ret)
-		return ret;
-
-	/* Attach QID inflights */
-	queue->num_qid_inflights = args->num_qid_inflights;
-
-	/* Attach atomic inflights */
-	queue->aqed_freelist.base = domain->aqed_freelist.base +
-				    domain->aqed_freelist.offset;
-	queue->aqed_freelist.bound = queue->aqed_freelist.base +
-				     args->num_atomic_inflights;
-	domain->aqed_freelist.offset += args->num_atomic_inflights;
-
-	return 0;
-}
-
-static int
-dlb_verify_create_ldb_queue_args(struct dlb_hw *hw,
-				 u32 domain_id,
-				 struct dlb_create_ldb_queue_args *args,
-				 struct dlb_cmd_response *resp)
-{
-	struct dlb_freelist *aqed_freelist;
-	struct dlb_domain *domain;
-	int i;
-
-	domain = dlb_get_domain_from_id(hw, domain_id);
-
-	if (domain == NULL) {
-		resp->status = DLB_ST_INVALID_DOMAIN_ID;
-		return -1;
-	}
-
-	if (!domain->configured) {
-		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
-		return -1;
-	}
-
-	if (domain->started) {
-		resp->status = DLB_ST_DOMAIN_STARTED;
-		return -1;
-	}
-
-	if (dlb_list_empty(&domain->avail_ldb_queues)) {
-		resp->status = DLB_ST_LDB_QUEUES_UNAVAILABLE;
-		return -1;
-	}
-
-	if (args->num_sequence_numbers) {
-		for (i = 0; i < DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
-			struct dlb_sn_group *group = &hw->rsrcs.sn_groups[i];
-
-			if (group->sequence_numbers_per_queue ==
-			    args->num_sequence_numbers &&
-			    !dlb_sn_group_full(group))
-				break;
-		}
-
-		if (i == DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS) {
-			resp->status = DLB_ST_SEQUENCE_NUMBERS_UNAVAILABLE;
-			return -1;
-		}
-	}
-
-	if (args->num_qid_inflights > 4096) {
-		resp->status = DLB_ST_INVALID_QID_INFLIGHT_ALLOCATION;
-		return -1;
-	}
-
-	/* Inflights must be <= number of sequence numbers if ordered */
-	if (args->num_sequence_numbers != 0 &&
-	    args->num_qid_inflights > args->num_sequence_numbers) {
-		resp->status = DLB_ST_INVALID_QID_INFLIGHT_ALLOCATION;
-		return -1;
-	}
-
-	aqed_freelist = &domain->aqed_freelist;
-
-	if (dlb_freelist_count(aqed_freelist) < args->num_atomic_inflights) {
-		resp->status = DLB_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
-		return -1;
-	}
-
-	return 0;
-}
-
-static void
-dlb_log_create_ldb_queue_args(struct dlb_hw *hw,
-			      u32 domain_id,
-			      struct dlb_create_ldb_queue_args *args)
-{
-	DLB_HW_INFO(hw, "DLB create load-balanced queue arguments:\n");
-	DLB_HW_INFO(hw, "\tDomain ID:                  %d\n",
-		    domain_id);
-	DLB_HW_INFO(hw, "\tNumber of sequence numbers: %d\n",
-		    args->num_sequence_numbers);
-	DLB_HW_INFO(hw, "\tNumber of QID inflights:    %d\n",
-		    args->num_qid_inflights);
-	DLB_HW_INFO(hw, "\tNumber of ATM inflights:    %d\n",
-		    args->num_atomic_inflights);
-}
-
-/**
- * dlb_hw_create_ldb_queue() - Allocate and initialize a DLB LDB queue.
- * @hw:	  Contains the current state of the DLB hardware.
- * @args: User-provided arguments.
- * @resp: Response to user.
- *
- * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
- * satisfy a request, resp->status will be set accordingly.
- */
-int dlb_hw_create_ldb_queue(struct dlb_hw *hw,
-			    u32 domain_id,
-			    struct dlb_create_ldb_queue_args *args,
-			    struct dlb_cmd_response *resp)
-{
-	struct dlb_ldb_queue *queue;
-	struct dlb_domain *domain;
-	int ret;
-
-	dlb_log_create_ldb_queue_args(hw, domain_id, args);
-
-	/* Verify that hardware resources are available before attempting to
-	 * satisfy the request. This simplifies the error unwinding code.
-	 */
-	/* At least one available queue */
-	if (dlb_verify_create_ldb_queue_args(hw, domain_id, args, resp))
-		return -EINVAL;
-
-	domain = dlb_get_domain_from_id(hw, domain_id);
-	if (domain == NULL) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: domain not found\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	queue = DLB_DOM_LIST_HEAD(domain->avail_ldb_queues, typeof(*queue));
-
-	/* Verification should catch this. */
-	if (queue == NULL) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: no available ldb queues\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	ret = dlb_ldb_queue_attach_resources(hw, domain, queue, args);
-	if (ret < 0) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: failed to attach the ldb queue resources\n",
-			   __func__, __LINE__);
-		return ret;
-	}
-
-	dlb_configure_ldb_queue(hw, domain, queue, args);
-
-	queue->num_mappings = 0;
-
-	queue->configured = true;
-
-	/* Configuration succeeded, so move the resource from the 'avail' to
-	 * the 'used' list.
-	 */
-	dlb_list_del(&domain->avail_ldb_queues, &queue->domain_list);
-
-	dlb_list_add(&domain->used_ldb_queues, &queue->domain_list);
-
-	resp->status = 0;
-	resp->id = queue->id;
-
-	return 0;
-}
-
-
-static void
-dlb_log_create_dir_queue_args(struct dlb_hw *hw,
-			      u32 domain_id,
-			      struct dlb_create_dir_queue_args *args)
-{
-	DLB_HW_INFO(hw, "DLB create directed queue arguments:\n");
-	DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
-	DLB_HW_INFO(hw, "\tPort ID:   %d\n", args->port_id);
-}
-
-static struct dlb_dir_pq_pair *
-dlb_get_domain_used_dir_pq(u32 id, struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	struct dlb_dir_pq_pair *port;
-	RTE_SET_USED(iter);
-
-	if (id >= DLB_MAX_NUM_DIR_PORTS)
-		return NULL;
-
-	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
-		if (port->id == id)
-			return port;
-
-	return NULL;
-}
-
-static int
-dlb_verify_create_dir_queue_args(struct dlb_hw *hw,
-				 u32 domain_id,
-				 struct dlb_create_dir_queue_args *args,
-				 struct dlb_cmd_response *resp)
-{
-	struct dlb_domain *domain;
-
-	domain = dlb_get_domain_from_id(hw, domain_id);
-
-	if (domain == NULL) {
-		resp->status = DLB_ST_INVALID_DOMAIN_ID;
-		return -1;
-	}
-
-	if (!domain->configured) {
-		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
-		return -1;
-	}
-
-	if (domain->started) {
-		resp->status = DLB_ST_DOMAIN_STARTED;
-		return -1;
-	}
-
-	/* If the user claims the port is already configured, validate the port
-	 * ID, its domain, and whether the port is configured.
-	 */
-	if (args->port_id != -1) {
-		struct dlb_dir_pq_pair *port;
-
-		port = dlb_get_domain_used_dir_pq(args->port_id, domain);
-
-		if (port  == NULL || port->domain_id != domain->id ||
-		    !port->port_configured) {
-			resp->status = DLB_ST_INVALID_PORT_ID;
-			return -1;
-		}
-	}
-
-	/* If the queue's port is not configured, validate that a free
-	 * port-queue pair is available.
-	 */
-	if (args->port_id == -1 &&
-	    dlb_list_empty(&domain->avail_dir_pq_pairs)) {
-		resp->status = DLB_ST_DIR_QUEUES_UNAVAILABLE;
-		return -1;
-	}
-
-	return 0;
-}
-
-static void dlb_configure_dir_queue(struct dlb_hw *hw,
-				    struct dlb_domain *domain,
-				    struct dlb_dir_pq_pair *queue)
-{
-	union dlb_sys_dir_vasqid_v r0 = { {0} };
-	union dlb_sys_dir_qid_v r1 = { {0} };
-	unsigned int offs;
-
-	/* QID write permissions are turned on when the domain is started */
-	r0.field.vasqid_v = 0;
-
-	offs = (domain->id * DLB_MAX_NUM_DIR_PORTS) + queue->id;
-
-	DLB_CSR_WR(hw, DLB_SYS_DIR_VASQID_V(offs), r0.val);
-
-	r1.field.qid_v = 1;
-
-	DLB_CSR_WR(hw, DLB_SYS_DIR_QID_V(queue->id), r1.val);
-
-	queue->queue_configured = true;
-}
-
-/**
- * dlb_hw_create_dir_queue() - Allocate and initialize a DLB DIR queue.
- * @hw:	  Contains the current state of the DLB hardware.
- * @args: User-provided arguments.
- * @resp: Response to user.
- *
- * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
- * satisfy a request, resp->status will be set accordingly.
- */
-int dlb_hw_create_dir_queue(struct dlb_hw *hw,
-			    u32 domain_id,
-			    struct dlb_create_dir_queue_args *args,
-			    struct dlb_cmd_response *resp)
-{
-	struct dlb_dir_pq_pair *queue;
-	struct dlb_domain *domain;
-
-	dlb_log_create_dir_queue_args(hw, domain_id, args);
-
-	/* Verify that hardware resources are available before attempting to
-	 * satisfy the request. This simplifies the error unwinding code.
-	 */
-	if (dlb_verify_create_dir_queue_args(hw, domain_id, args, resp))
-		return -EINVAL;
-
-	domain = dlb_get_domain_from_id(hw, domain_id);
-	if (domain == NULL) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: domain not found\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	if (args->port_id != -1)
-		queue = dlb_get_domain_used_dir_pq(args->port_id, domain);
-	else
-		queue = DLB_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
-					  typeof(*queue));
-
-	/* Verification should catch this. */
-	if (queue == NULL) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: no available dir queues\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	dlb_configure_dir_queue(hw, domain, queue);
-
-	/* Configuration succeeded, so move the resource from the 'avail' to
-	 * the 'used' list (if it's not already there).
-	 */
-	if (args->port_id == -1) {
-		dlb_list_del(&domain->avail_dir_pq_pairs, &queue->domain_list);
-
-		dlb_list_add(&domain->used_dir_pq_pairs, &queue->domain_list);
-	}
-
-	resp->status = 0;
-
-	resp->id = queue->id;
-
-	return 0;
-}
-
-static void dlb_log_create_ldb_port_args(struct dlb_hw *hw,
-					 u32 domain_id,
-					 u64 pop_count_dma_base,
-					 u64 cq_dma_base,
-					 struct dlb_create_ldb_port_args *args)
-{
-	DLB_HW_INFO(hw, "DLB create load-balanced port arguments:\n");
-	DLB_HW_INFO(hw, "\tDomain ID:                 %d\n",
-		    domain_id);
-	DLB_HW_INFO(hw, "\tLDB credit pool ID:        %d\n",
-		    args->ldb_credit_pool_id);
-	DLB_HW_INFO(hw, "\tLDB credit high watermark: %d\n",
-		    args->ldb_credit_high_watermark);
-	DLB_HW_INFO(hw, "\tLDB credit low watermark:  %d\n",
-		    args->ldb_credit_low_watermark);
-	DLB_HW_INFO(hw, "\tLDB credit quantum:        %d\n",
-		    args->ldb_credit_quantum);
-	DLB_HW_INFO(hw, "\tDIR credit pool ID:        %d\n",
-		    args->dir_credit_pool_id);
-	DLB_HW_INFO(hw, "\tDIR credit high watermark: %d\n",
-		    args->dir_credit_high_watermark);
-	DLB_HW_INFO(hw, "\tDIR credit low watermark:  %d\n",
-		    args->dir_credit_low_watermark);
-	DLB_HW_INFO(hw, "\tDIR credit quantum:        %d\n",
-		    args->dir_credit_quantum);
-	DLB_HW_INFO(hw, "\tpop_count_address:         0x%"PRIx64"\n",
-		    pop_count_dma_base);
-	DLB_HW_INFO(hw, "\tCQ depth:                  %d\n",
-		    args->cq_depth);
-	DLB_HW_INFO(hw, "\tCQ hist list size:         %d\n",
-		    args->cq_history_list_size);
-	DLB_HW_INFO(hw, "\tCQ base address:           0x%"PRIx64"\n",
-		    cq_dma_base);
-}
-
-static struct dlb_credit_pool *
-dlb_get_domain_ldb_pool(u32 id, struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	struct dlb_credit_pool *pool;
-	RTE_SET_USED(iter);
-
-	if (id >= DLB_MAX_NUM_LDB_CREDIT_POOLS)
-		return NULL;
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter)
-		if (pool->id == id)
-			return pool;
-
-	return NULL;
-}
-
-static struct dlb_credit_pool *
-dlb_get_domain_dir_pool(u32 id, struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	struct dlb_credit_pool *pool;
-	RTE_SET_USED(iter);
-
-	if (id >= DLB_MAX_NUM_DIR_CREDIT_POOLS)
-		return NULL;
-
-	DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter)
-		if (pool->id == id)
-			return pool;
-
-	return NULL;
-}
-
-static int
-dlb_verify_create_ldb_port_args(struct dlb_hw *hw,
-				u32 domain_id,
-				u64 pop_count_dma_base,
-				u64 cq_dma_base,
-				struct dlb_create_ldb_port_args *args,
-				struct dlb_cmd_response *resp)
-{
-	struct dlb_domain *domain;
-	struct dlb_credit_pool *pool;
-
-	domain = dlb_get_domain_from_id(hw, domain_id);
-
-	if (domain == NULL) {
-		resp->status = DLB_ST_INVALID_DOMAIN_ID;
-		return -1;
-	}
-
-	if (!domain->configured) {
-		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
-		return -1;
-	}
-
-	if (domain->started) {
-		resp->status = DLB_ST_DOMAIN_STARTED;
-		return -1;
-	}
-
-	if (dlb_list_empty(&domain->avail_ldb_ports)) {
-		resp->status = DLB_ST_LDB_PORTS_UNAVAILABLE;
-		return -1;
-	}
-
-	/* If the scheduling domain has no LDB queues, we configure the
-	 * hardware to not supply the port with any LDB credits. In that
-	 * case, ignore the LDB credit arguments.
-	 */
-	if (!dlb_list_empty(&domain->used_ldb_queues) ||
-	    !dlb_list_empty(&domain->avail_ldb_queues)) {
-		pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
-					       domain);
-
-		if (pool  == NULL || !pool->configured ||
-		    pool->domain_id != domain->id) {
-			resp->status = DLB_ST_INVALID_LDB_CREDIT_POOL_ID;
-			return -1;
-		}
-
-		if (args->ldb_credit_high_watermark > pool->avail_credits) {
-			resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;
-			return -1;
-		}
-
-		if (args->ldb_credit_low_watermark >=
-		    args->ldb_credit_high_watermark) {
-			resp->status = DLB_ST_INVALID_LDB_CREDIT_LOW_WATERMARK;
-			return -1;
-		}
-
-		if (args->ldb_credit_quantum >=
-		    args->ldb_credit_high_watermark) {
-			resp->status = DLB_ST_INVALID_LDB_CREDIT_QUANTUM;
-			return -1;
-		}
-
-		if (args->ldb_credit_quantum > DLB_MAX_PORT_CREDIT_QUANTUM) {
-			resp->status = DLB_ST_INVALID_LDB_CREDIT_QUANTUM;
-			return -1;
-		}
-	}
-
-	/* Likewise, if the scheduling domain has no DIR queues, we configure
-	 * the hardware to not supply the port with any DIR credits. In that
-	 * case, ignore the DIR credit arguments.
-	 */
-	if (!dlb_list_empty(&domain->used_dir_pq_pairs) ||
-	    !dlb_list_empty(&domain->avail_dir_pq_pairs)) {
-		pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
-					       domain);
-
-		if (pool  == NULL || !pool->configured ||
-		    pool->domain_id != domain->id) {
-			resp->status = DLB_ST_INVALID_DIR_CREDIT_POOL_ID;
-			return -1;
-		}
-
-		if (args->dir_credit_high_watermark > pool->avail_credits) {
-			resp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;
-			return -1;
-		}
-
-		if (args->dir_credit_low_watermark >=
-		    args->dir_credit_high_watermark) {
-			resp->status = DLB_ST_INVALID_DIR_CREDIT_LOW_WATERMARK;
-			return -1;
-		}
-
-		if (args->dir_credit_quantum >=
-		    args->dir_credit_high_watermark) {
-			resp->status = DLB_ST_INVALID_DIR_CREDIT_QUANTUM;
-			return -1;
-		}
-
-		if (args->dir_credit_quantum > DLB_MAX_PORT_CREDIT_QUANTUM) {
-			resp->status = DLB_ST_INVALID_DIR_CREDIT_QUANTUM;
-			return -1;
-		}
-	}
-
-	/* Check cache-line alignment */
-	if ((pop_count_dma_base & 0x3F) != 0) {
-		resp->status = DLB_ST_INVALID_POP_COUNT_VIRT_ADDR;
-		return -1;
-	}
-
-	if ((cq_dma_base & 0x3F) != 0) {
-		resp->status = DLB_ST_INVALID_CQ_VIRT_ADDR;
-		return -1;
-	}
-
-	if (args->cq_depth != 1 &&
-	    args->cq_depth != 2 &&
-	    args->cq_depth != 4 &&
-	    args->cq_depth != 8 &&
-	    args->cq_depth != 16 &&
-	    args->cq_depth != 32 &&
-	    args->cq_depth != 64 &&
-	    args->cq_depth != 128 &&
-	    args->cq_depth != 256 &&
-	    args->cq_depth != 512 &&
-	    args->cq_depth != 1024) {
-		resp->status = DLB_ST_INVALID_CQ_DEPTH;
-		return -1;
-	}
-
-	/* The history list size must be >= 1 */
-	if (!args->cq_history_list_size) {
-		resp->status = DLB_ST_INVALID_HIST_LIST_DEPTH;
-		return -1;
-	}
-
-	if (args->cq_history_list_size > domain->avail_hist_list_entries) {
-		resp->status = DLB_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
-		return -1;
-	}
-
-	return 0;
-}
-
-static void dlb_ldb_pool_update_credit_count(struct dlb_hw *hw,
-					     u32 pool_id,
-					     u32 count)
-{
-	hw->rsrcs.ldb_credit_pools[pool_id].avail_credits -= count;
-}
-
-static void dlb_dir_pool_update_credit_count(struct dlb_hw *hw,
-					     u32 pool_id,
-					     u32 count)
-{
-	hw->rsrcs.dir_credit_pools[pool_id].avail_credits -= count;
-}
-
-static int dlb_ldb_port_configure_pp(struct dlb_hw *hw,
-				     struct dlb_domain *domain,
-				     struct dlb_ldb_port *port,
-				     struct dlb_create_ldb_port_args *args)
-{
-	union dlb_sys_ldb_pp2ldbpool r0 = { {0} };
-	union dlb_sys_ldb_pp2dirpool r1 = { {0} };
-	union dlb_sys_ldb_pp2vf_pf r2 = { {0} };
-	union dlb_sys_ldb_pp2vas r3 = { {0} };
-	union dlb_sys_ldb_pp_v r4 = { {0} };
-	union dlb_chp_ldb_pp_ldb_crd_hwm r6 = { {0} };
-	union dlb_chp_ldb_pp_dir_crd_hwm r7 = { {0} };
-	union dlb_chp_ldb_pp_ldb_crd_lwm r8 = { {0} };
-	union dlb_chp_ldb_pp_dir_crd_lwm r9 = { {0} };
-	union dlb_chp_ldb_pp_ldb_min_crd_qnt r10 = { {0} };
-	union dlb_chp_ldb_pp_dir_min_crd_qnt r11 = { {0} };
-	union dlb_chp_ldb_pp_ldb_crd_cnt r12 = { {0} };
-	union dlb_chp_ldb_pp_dir_crd_cnt r13 = { {0} };
-	union dlb_chp_ldb_ldb_pp2pool r14 = { {0} };
-	union dlb_chp_ldb_dir_pp2pool r15 = { {0} };
-	union dlb_chp_ldb_pp_crd_req_state r16 = { {0} };
-	union dlb_chp_ldb_pp_ldb_push_ptr r17 = { {0} };
-	union dlb_chp_ldb_pp_dir_push_ptr r18 = { {0} };
-
-	struct dlb_credit_pool *ldb_pool = NULL;
-	struct dlb_credit_pool *dir_pool = NULL;
-
-	if (port->ldb_pool_used) {
-		ldb_pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
-						   domain);
-		if (ldb_pool == NULL) {
-			DLB_HW_ERR(hw,
-				   "[%s()] Internal error: port validation failed\n",
-				   __func__);
-			return -EFAULT;
-		}
-	}
-
-	if (port->dir_pool_used) {
-		dir_pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
-						   domain);
-		if (dir_pool == NULL) {
-			DLB_HW_ERR(hw,
-				   "[%s()] Internal error: port validation failed\n",
-				   __func__);
-			return -EFAULT;
-		}
-	}
-
-	r0.field.ldbpool = (port->ldb_pool_used) ? ldb_pool->id : 0;
-
-	DLB_CSR_WR(hw, DLB_SYS_LDB_PP2LDBPOOL(port->id), r0.val);
-
-	r1.field.dirpool = (port->dir_pool_used) ? dir_pool->id : 0;
-
-	DLB_CSR_WR(hw, DLB_SYS_LDB_PP2DIRPOOL(port->id), r1.val);
-
-	r2.field.is_pf = 1;
-
-	DLB_CSR_WR(hw, DLB_SYS_LDB_PP2VF_PF(port->id), r2.val);
-
-	r3.field.vas = domain->id;
-
-	DLB_CSR_WR(hw, DLB_SYS_LDB_PP2VAS(port->id), r3.val);
-
-	r6.field.hwm = args->ldb_credit_high_watermark;
-
-	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_LDB_CRD_HWM(port->id), r6.val);
-
-	r7.field.hwm = args->dir_credit_high_watermark;
-
-	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_DIR_CRD_HWM(port->id), r7.val);
-
-	r8.field.lwm = args->ldb_credit_low_watermark;
-
-	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_LDB_CRD_LWM(port->id), r8.val);
-
-	r9.field.lwm = args->dir_credit_low_watermark;
-
-	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_DIR_CRD_LWM(port->id), r9.val);
-
-	r10.field.quanta = args->ldb_credit_quantum;
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_LDB_PP_LDB_MIN_CRD_QNT(port->id),
-		   r10.val);
-
-	r11.field.quanta = args->dir_credit_quantum;
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_LDB_PP_DIR_MIN_CRD_QNT(port->id),
-		   r11.val);
-
-	r12.field.count = args->ldb_credit_high_watermark;
-
-	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_LDB_CRD_CNT(port->id), r12.val);
-
-	r13.field.count = args->dir_credit_high_watermark;
-
-	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_DIR_CRD_CNT(port->id), r13.val);
-
-	r14.field.pool = (port->ldb_pool_used) ? ldb_pool->id : 0;
-
-	DLB_CSR_WR(hw, DLB_CHP_LDB_LDB_PP2POOL(port->id), r14.val);
-
-	r15.field.pool = (port->dir_pool_used) ? dir_pool->id : 0;
-
-	DLB_CSR_WR(hw, DLB_CHP_LDB_DIR_PP2POOL(port->id), r15.val);
-
-	r16.field.no_pp_credit_update = 0;
-
-	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_CRD_REQ_STATE(port->id), r16.val);
-
-	r17.field.push_pointer = 0;
-
-	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_LDB_PUSH_PTR(port->id), r17.val);
-
-	r18.field.push_pointer = 0;
-
-	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_DIR_PUSH_PTR(port->id), r18.val);
-
-	r4.field.pp_v = 1;
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_LDB_PP_V(port->id),
-		   r4.val);
-
-	return 0;
-}
-
-static int dlb_ldb_port_configure_cq(struct dlb_hw *hw,
-				     struct dlb_ldb_port *port,
-				     u64 pop_count_dma_base,
-				     u64 cq_dma_base,
-				     struct dlb_create_ldb_port_args *args)
-{
-	int i;
-
-	union dlb_sys_ldb_cq_addr_l r0 = { {0} };
-	union dlb_sys_ldb_cq_addr_u r1 = { {0} };
-	union dlb_sys_ldb_cq2vf_pf r2 = { {0} };
-	union dlb_chp_ldb_cq_tkn_depth_sel r3 = { {0} };
-	union dlb_chp_hist_list_lim r4 = { {0} };
-	union dlb_chp_hist_list_base r5 = { {0} };
-	union dlb_lsp_cq_ldb_infl_lim r6 = { {0} };
-	union dlb_lsp_cq2priov r7 = { {0} };
-	union dlb_chp_hist_list_push_ptr r8 = { {0} };
-	union dlb_chp_hist_list_pop_ptr r9 = { {0} };
-	union dlb_lsp_cq_ldb_tkn_depth_sel r10 = { {0} };
-	union dlb_sys_ldb_pp_addr_l r11 = { {0} };
-	union dlb_sys_ldb_pp_addr_u r12 = { {0} };
-
-	/* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
-	r0.field.addr_l = cq_dma_base >> 6;
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_LDB_CQ_ADDR_L(port->id),
-		   r0.val);
-
-	r1.field.addr_u = cq_dma_base >> 32;
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_LDB_CQ_ADDR_U(port->id),
-		   r1.val);
-
-	r2.field.is_pf = 1;
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_LDB_CQ2VF_PF(port->id),
-		   r2.val);
-
-	if (args->cq_depth <= 8) {
-		r3.field.token_depth_select = 1;
-	} else if (args->cq_depth == 16) {
-		r3.field.token_depth_select = 2;
-	} else if (args->cq_depth == 32) {
-		r3.field.token_depth_select = 3;
-	} else if (args->cq_depth == 64) {
-		r3.field.token_depth_select = 4;
-	} else if (args->cq_depth == 128) {
-		r3.field.token_depth_select = 5;
-	} else if (args->cq_depth == 256) {
-		r3.field.token_depth_select = 6;
-	} else if (args->cq_depth == 512) {
-		r3.field.token_depth_select = 7;
-	} else if (args->cq_depth == 1024) {
-		r3.field.token_depth_select = 8;
-	} else {
-		DLB_HW_ERR(hw, "[%s():%d] Internal error: invalid CQ depth\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id),
-		   r3.val);
-
-	r10.field.token_depth_select = r3.field.token_depth_select;
-	r10.field.ignore_depth = 0;
-	/* TDT algorithm: DLB must be able to write CQs with depth < 4 */
-	r10.field.enab_shallow_cq = 1;
-
-	DLB_CSR_WR(hw,
-		   DLB_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id),
-		   r10.val);
-
-	/* To support CQs with depth less than 8, program the token count
-	 * register with a non-zero initial value. Operations such as domain
-	 * reset must take this initial value into account when quiescing the
-	 * CQ.
-	 */
-	port->init_tkn_cnt = 0;
-
-	if (args->cq_depth < 8) {
-		union dlb_lsp_cq_ldb_tkn_cnt r12 = { {0} };
-
-		port->init_tkn_cnt = 8 - args->cq_depth;
-
-		r12.field.token_count = port->init_tkn_cnt;
-
-		DLB_CSR_WR(hw,
-			   DLB_LSP_CQ_LDB_TKN_CNT(port->id),
-			   r12.val);
-	}
-
-	r4.field.limit = port->hist_list_entry_limit - 1;
-
-	DLB_CSR_WR(hw, DLB_CHP_HIST_LIST_LIM(port->id), r4.val);
-
-	r5.field.base = port->hist_list_entry_base;
-
-	DLB_CSR_WR(hw, DLB_CHP_HIST_LIST_BASE(port->id), r5.val);
-
-	r8.field.push_ptr = r5.field.base;
-	r8.field.generation = 0;
-
-	DLB_CSR_WR(hw, DLB_CHP_HIST_LIST_PUSH_PTR(port->id), r8.val);
-
-	r9.field.pop_ptr = r5.field.base;
-	r9.field.generation = 0;
-
-	DLB_CSR_WR(hw, DLB_CHP_HIST_LIST_POP_PTR(port->id), r9.val);
-
-	/* The inflight limit sets a cap on the number of QEs for which this CQ
-	 * can owe completions at one time.
-	 */
-	r6.field.limit = args->cq_history_list_size;
-
-	DLB_CSR_WR(hw, DLB_LSP_CQ_LDB_INFL_LIM(port->id), r6.val);
-
-	/* Disable the port's QID mappings */
-	r7.field.v = 0;
-
-	DLB_CSR_WR(hw, DLB_LSP_CQ2PRIOV(port->id), r7.val);
-
-	/* Two cache lines (128B) are dedicated for the port's pop counts */
-	r11.field.addr_l = pop_count_dma_base >> 7;
-
-	DLB_CSR_WR(hw, DLB_SYS_LDB_PP_ADDR_L(port->id), r11.val);
-
-	r12.field.addr_u = pop_count_dma_base >> 32;
-
-	DLB_CSR_WR(hw, DLB_SYS_LDB_PP_ADDR_U(port->id), r12.val);
-
-	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++)
-		port->qid_map[i].state = DLB_QUEUE_UNMAPPED;
-
-	return 0;
-}
-
-static void dlb_update_ldb_arb_threshold(struct dlb_hw *hw)
-{
-	union dlb_lsp_ctrl_config_0 r0 = { {0} };
-
-	/* From the hardware spec:
-	 * "The optimal value for ldb_arb_threshold is in the region of {8 *
-	 * #CQs}. It is expected therefore that the PF will change this value
-	 * dynamically as the number of active ports changes."
-	 */
-	r0.val = DLB_CSR_RD(hw, DLB_LSP_CTRL_CONFIG_0);
-
-	r0.field.ldb_arb_threshold = hw->pf.num_enabled_ldb_ports * 8;
-	r0.field.ldb_arb_ignore_empty = 1;
-	r0.field.ldb_arb_mode = 1;
-
-	DLB_CSR_WR(hw, DLB_LSP_CTRL_CONFIG_0, r0.val);
-
-	dlb_flush_csr(hw);
-}
-
-static int dlb_configure_ldb_port(struct dlb_hw *hw,
-				  struct dlb_domain *domain,
-				  struct dlb_ldb_port *port,
-				  u64 pop_count_dma_base,
-				  u64 cq_dma_base,
-				  struct dlb_create_ldb_port_args *args)
-{
-	struct dlb_credit_pool *ldb_pool, *dir_pool;
-	int ret;
-
-	port->hist_list_entry_base = domain->hist_list_entry_base +
-				     domain->hist_list_entry_offset;
-	port->hist_list_entry_limit = port->hist_list_entry_base +
-				      args->cq_history_list_size;
-
-	domain->hist_list_entry_offset += args->cq_history_list_size;
-	domain->avail_hist_list_entries -= args->cq_history_list_size;
-
-	port->ldb_pool_used = !dlb_list_empty(&domain->used_ldb_queues) ||
-			      !dlb_list_empty(&domain->avail_ldb_queues);
-	port->dir_pool_used = !dlb_list_empty(&domain->used_dir_pq_pairs) ||
-			      !dlb_list_empty(&domain->avail_dir_pq_pairs);
-
-	if (port->ldb_pool_used) {
-		u32 cnt = args->ldb_credit_high_watermark;
-
-		ldb_pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
-						   domain);
-		if (ldb_pool == NULL) {
-			DLB_HW_ERR(hw,
-				   "[%s()] Internal error: port validation failed\n",
-				   __func__);
-			return -EFAULT;
-		}
-
-		dlb_ldb_pool_update_credit_count(hw, ldb_pool->id, cnt);
-	} else {
-		args->ldb_credit_high_watermark = 0;
-		args->ldb_credit_low_watermark = 0;
-		args->ldb_credit_quantum = 0;
-	}
-
-	if (port->dir_pool_used) {
-		u32 cnt = args->dir_credit_high_watermark;
-
-		dir_pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
-						   domain);
-		if (dir_pool == NULL) {
-			DLB_HW_ERR(hw,
-				   "[%s()] Internal error: port validation failed\n",
-				   __func__);
-			return -EFAULT;
-		}
-
-		dlb_dir_pool_update_credit_count(hw, dir_pool->id, cnt);
-	} else {
-		args->dir_credit_high_watermark = 0;
-		args->dir_credit_low_watermark = 0;
-		args->dir_credit_quantum = 0;
-	}
-
-	ret = dlb_ldb_port_configure_cq(hw,
-					port,
-					pop_count_dma_base,
-					cq_dma_base,
-					args);
-	if (ret < 0)
-		return ret;
-
-	ret = dlb_ldb_port_configure_pp(hw, domain, port, args);
-	if (ret < 0)
-		return ret;
-
-	dlb_ldb_port_cq_enable(hw, port);
-
-	port->num_mappings = 0;
-
-	port->enabled = true;
-
-	hw->pf.num_enabled_ldb_ports++;
-
-	dlb_update_ldb_arb_threshold(hw);
-
-	port->configured = true;
-
-	return 0;
-}
-
-/**
- * dlb_hw_create_ldb_port() - Allocate and initialize a load-balanced port and
- *	its resources.
- * @hw:	  Contains the current state of the DLB hardware.
- * @args: User-provided arguments.
- * @resp: Response to user.
- *
- * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
- * satisfy a request, resp->status will be set accordingly.
- */
-int dlb_hw_create_ldb_port(struct dlb_hw *hw,
-			   u32 domain_id,
-			   struct dlb_create_ldb_port_args *args,
-			   u64 pop_count_dma_base,
-			   u64 cq_dma_base,
-			   struct dlb_cmd_response *resp)
-{
-	struct dlb_ldb_port *port;
-	struct dlb_domain *domain;
-	int ret;
-
-	dlb_log_create_ldb_port_args(hw,
-				     domain_id,
-				     pop_count_dma_base,
-				     cq_dma_base,
-				     args);
-
-	/* Verify that hardware resources are available before attempting to
-	 * satisfy the request. This simplifies the error unwinding code.
-	 */
-	if (dlb_verify_create_ldb_port_args(hw,
-					    domain_id,
-					    pop_count_dma_base,
-					    cq_dma_base,
-					    args,
-					    resp))
-		return -EINVAL;
-
-	domain = dlb_get_domain_from_id(hw, domain_id);
-	if (domain == NULL) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: domain not found\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	port = DLB_DOM_LIST_HEAD(domain->avail_ldb_ports, typeof(*port));
-
-	/* Verification should catch this. */
-	if (port == NULL) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: no available ldb ports\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	if (port->configured) {
-		DLB_HW_ERR(hw,
-			   "[%s()] Internal error: avail_ldb_ports contains configured ports.\n",
-			   __func__);
-		return -EFAULT;
-	}
-
-	ret = dlb_configure_ldb_port(hw,
-				     domain,
-				     port,
-				     pop_count_dma_base,
-				     cq_dma_base,
-				     args);
-	if (ret < 0)
-		return ret;
-
-	/* Configuration succeeded, so move the resource from the 'avail' to
-	 * the 'used' list.
-	 */
-	dlb_list_del(&domain->avail_ldb_ports, &port->domain_list);
-
-	dlb_list_add(&domain->used_ldb_ports, &port->domain_list);
-
-	resp->status = 0;
-	resp->id = port->id;
-
-	return 0;
-}
-
-static void dlb_log_create_dir_port_args(struct dlb_hw *hw,
-					 u32 domain_id,
-					 u64 pop_count_dma_base,
-					 u64 cq_dma_base,
-					 struct dlb_create_dir_port_args *args)
-{
-	DLB_HW_INFO(hw, "DLB create directed port arguments:\n");
-	DLB_HW_INFO(hw, "\tDomain ID:                 %d\n",
-		    domain_id);
-	DLB_HW_INFO(hw, "\tLDB credit pool ID:        %d\n",
-		    args->ldb_credit_pool_id);
-	DLB_HW_INFO(hw, "\tLDB credit high watermark: %d\n",
-		    args->ldb_credit_high_watermark);
-	DLB_HW_INFO(hw, "\tLDB credit low watermark:  %d\n",
-		    args->ldb_credit_low_watermark);
-	DLB_HW_INFO(hw, "\tLDB credit quantum:        %d\n",
-		    args->ldb_credit_quantum);
-	DLB_HW_INFO(hw, "\tDIR credit pool ID:        %d\n",
-		    args->dir_credit_pool_id);
-	DLB_HW_INFO(hw, "\tDIR credit high watermark: %d\n",
-		    args->dir_credit_high_watermark);
-	DLB_HW_INFO(hw, "\tDIR credit low watermark:  %d\n",
-		    args->dir_credit_low_watermark);
-	DLB_HW_INFO(hw, "\tDIR credit quantum:        %d\n",
-		    args->dir_credit_quantum);
-	DLB_HW_INFO(hw, "\tpop_count_address:         0x%"PRIx64"\n",
-		    pop_count_dma_base);
-	DLB_HW_INFO(hw, "\tCQ depth:                  %d\n",
-		    args->cq_depth);
-	DLB_HW_INFO(hw, "\tCQ base address:           0x%"PRIx64"\n",
-		    cq_dma_base);
-}
-
-static int
-dlb_verify_create_dir_port_args(struct dlb_hw *hw,
-				u32 domain_id,
-				u64 pop_count_dma_base,
-				u64 cq_dma_base,
-				struct dlb_create_dir_port_args *args,
-				struct dlb_cmd_response *resp)
-{
-	struct dlb_domain *domain;
-	struct dlb_credit_pool *pool;
-
-	domain = dlb_get_domain_from_id(hw, domain_id);
-
-	if (domain == NULL) {
-		resp->status = DLB_ST_INVALID_DOMAIN_ID;
-		return -1;
-	}
-
-	if (!domain->configured) {
-		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
-		return -1;
-	}
-
-	if (domain->started) {
-		resp->status = DLB_ST_DOMAIN_STARTED;
-		return -1;
-	}
-
-	/* If the user claims the queue is already configured, validate
-	 * the queue ID, its domain, and whether the queue is configured.
-	 */
-	if (args->queue_id != -1) {
-		struct dlb_dir_pq_pair *queue;
-
-		queue = dlb_get_domain_used_dir_pq(args->queue_id,
-						   domain);
-
-		if (queue  == NULL || queue->domain_id != domain->id ||
-		    !queue->queue_configured) {
-			resp->status = DLB_ST_INVALID_DIR_QUEUE_ID;
-			return -1;
-		}
-	}
-
-	/* If the port's queue is not configured, validate that a free
-	 * port-queue pair is available.
-	 */
-	if (args->queue_id == -1 &&
-	    dlb_list_empty(&domain->avail_dir_pq_pairs)) {
-		resp->status = DLB_ST_DIR_PORTS_UNAVAILABLE;
-		return -1;
-	}
-
-	/* If the scheduling domain has no LDB queues, we configure the
-	 * hardware to not supply the port with any LDB credits. In that
-	 * case, ignore the LDB credit arguments.
-	 */
-	if (!dlb_list_empty(&domain->used_ldb_queues) ||
-	    !dlb_list_empty(&domain->avail_ldb_queues)) {
-		pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
-					       domain);
-
-		if (pool  == NULL || !pool->configured ||
-		    pool->domain_id != domain->id) {
-			resp->status = DLB_ST_INVALID_LDB_CREDIT_POOL_ID;
-			return -1;
-		}
-
-		if (args->ldb_credit_high_watermark > pool->avail_credits) {
-			resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;
-			return -1;
-		}
-
-		if (args->ldb_credit_low_watermark >=
-		    args->ldb_credit_high_watermark) {
-			resp->status = DLB_ST_INVALID_LDB_CREDIT_LOW_WATERMARK;
-			return -1;
-		}
-
-		if (args->ldb_credit_quantum >=
-		    args->ldb_credit_high_watermark) {
-			resp->status = DLB_ST_INVALID_LDB_CREDIT_QUANTUM;
-			return -1;
-		}
-
-		if (args->ldb_credit_quantum > DLB_MAX_PORT_CREDIT_QUANTUM) {
-			resp->status = DLB_ST_INVALID_LDB_CREDIT_QUANTUM;
-			return -1;
-		}
-	}
-
-	pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
-				       domain);
-
-	if (pool  == NULL || !pool->configured ||
-	    pool->domain_id != domain->id) {
-		resp->status = DLB_ST_INVALID_DIR_CREDIT_POOL_ID;
-		return -1;
-	}
-
-	if (args->dir_credit_high_watermark > pool->avail_credits) {
-		resp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;
-		return -1;
-	}
-
-	if (args->dir_credit_low_watermark >= args->dir_credit_high_watermark) {
-		resp->status = DLB_ST_INVALID_DIR_CREDIT_LOW_WATERMARK;
-		return -1;
-	}
-
-	if (args->dir_credit_quantum >= args->dir_credit_high_watermark) {
-		resp->status = DLB_ST_INVALID_DIR_CREDIT_QUANTUM;
-		return -1;
-	}
-
-	if (args->dir_credit_quantum > DLB_MAX_PORT_CREDIT_QUANTUM) {
-		resp->status = DLB_ST_INVALID_DIR_CREDIT_QUANTUM;
-		return -1;
-	}
-
-	/* Check cache-line alignment */
-	if ((pop_count_dma_base & 0x3F) != 0) {
-		resp->status = DLB_ST_INVALID_POP_COUNT_VIRT_ADDR;
-		return -1;
-	}
-
-	if ((cq_dma_base & 0x3F) != 0) {
-		resp->status = DLB_ST_INVALID_CQ_VIRT_ADDR;
-		return -1;
-	}
-
-	if (args->cq_depth != 8 &&
-	    args->cq_depth != 16 &&
-	    args->cq_depth != 32 &&
-	    args->cq_depth != 64 &&
-	    args->cq_depth != 128 &&
-	    args->cq_depth != 256 &&
-	    args->cq_depth != 512 &&
-	    args->cq_depth != 1024) {
-		resp->status = DLB_ST_INVALID_CQ_DEPTH;
-		return -1;
-	}
-
-	return 0;
-}
-
-static int dlb_dir_port_configure_pp(struct dlb_hw *hw,
-				     struct dlb_domain *domain,
-				     struct dlb_dir_pq_pair *port,
-				     struct dlb_create_dir_port_args *args)
-{
-	union dlb_sys_dir_pp2ldbpool r0 = { {0} };
-	union dlb_sys_dir_pp2dirpool r1 = { {0} };
-	union dlb_sys_dir_pp2vf_pf r2 = { {0} };
-	union dlb_sys_dir_pp2vas r3 = { {0} };
-	union dlb_sys_dir_pp_v r4 = { {0} };
-	union dlb_chp_dir_pp_ldb_crd_hwm r6 = { {0} };
-	union dlb_chp_dir_pp_dir_crd_hwm r7 = { {0} };
-	union dlb_chp_dir_pp_ldb_crd_lwm r8 = { {0} };
-	union dlb_chp_dir_pp_dir_crd_lwm r9 = { {0} };
-	union dlb_chp_dir_pp_ldb_min_crd_qnt r10 = { {0} };
-	union dlb_chp_dir_pp_dir_min_crd_qnt r11 = { {0} };
-	union dlb_chp_dir_pp_ldb_crd_cnt r12 = { {0} };
-	union dlb_chp_dir_pp_dir_crd_cnt r13 = { {0} };
-	union dlb_chp_dir_ldb_pp2pool r14 = { {0} };
-	union dlb_chp_dir_dir_pp2pool r15 = { {0} };
-	union dlb_chp_dir_pp_crd_req_state r16 = { {0} };
-	union dlb_chp_dir_pp_ldb_push_ptr r17 = { {0} };
-	union dlb_chp_dir_pp_dir_push_ptr r18 = { {0} };
-
-	struct dlb_credit_pool *ldb_pool = NULL;
-	struct dlb_credit_pool *dir_pool = NULL;
-
-	if (port->ldb_pool_used) {
-		ldb_pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
-						   domain);
-		if (ldb_pool == NULL) {
-			DLB_HW_ERR(hw,
-				   "[%s()] Internal error: port validation failed\n",
-				   __func__);
-			return -EFAULT;
-		}
-	}
-
-	if (port->dir_pool_used) {
-		dir_pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
-						   domain);
-		if (dir_pool == NULL) {
-			DLB_HW_ERR(hw,
-				   "[%s()] Internal error: port validation failed\n",
-				   __func__);
-			return -EFAULT;
-		}
-	}
-
-	r0.field.ldbpool = (port->ldb_pool_used) ? ldb_pool->id : 0;
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_DIR_PP2LDBPOOL(port->id),
-		   r0.val);
-
-	r1.field.dirpool = (port->dir_pool_used) ? dir_pool->id : 0;
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_DIR_PP2DIRPOOL(port->id),
-		   r1.val);
-
-	r2.field.is_pf = 1;
-	r2.field.is_hw_dsi = 0;
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_DIR_PP2VF_PF(port->id),
-		   r2.val);
-
-	r3.field.vas = domain->id;
-
-	DLB_CSR_WR(hw,
-		   DLB_SYS_DIR_PP2VAS(port->id),
-		   r3.val);
-
-	r6.field.hwm = args->ldb_credit_high_watermark;
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_PP_LDB_CRD_HWM(port->id),
-		   r6.val);
-
-	r7.field.hwm = args->dir_credit_high_watermark;
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_PP_DIR_CRD_HWM(port->id),
-		   r7.val);
-
-	r8.field.lwm = args->ldb_credit_low_watermark;
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_PP_LDB_CRD_LWM(port->id),
-		   r8.val);
-
-	r9.field.lwm = args->dir_credit_low_watermark;
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_PP_DIR_CRD_LWM(port->id),
-		   r9.val);
-
-	r10.field.quanta = args->ldb_credit_quantum;
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_PP_LDB_MIN_CRD_QNT(port->id),
-		   r10.val);
-
-	r11.field.quanta = args->dir_credit_quantum;
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_PP_DIR_MIN_CRD_QNT(port->id),
-		   r11.val);
-
-	r12.field.count = args->ldb_credit_high_watermark;
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_PP_LDB_CRD_CNT(port->id),
-		   r12.val);
-
-	r13.field.count = args->dir_credit_high_watermark;
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_PP_DIR_CRD_CNT(port->id),
-		   r13.val);
-
-	r14.field.pool = (port->ldb_pool_used) ? ldb_pool->id : 0;
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_LDB_PP2POOL(port->id),
-		   r14.val);
-
-	r15.field.pool = (port->dir_pool_used) ? dir_pool->id : 0;
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_DIR_PP2POOL(port->id),
-		   r15.val);
-
-	r16.field.no_pp_credit_update = 0;
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_PP_CRD_REQ_STATE(port->id),
-		   r16.val);
-
-	r17.field.push_pointer = 0;
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_PP_LDB_PUSH_PTR(port->id),
-		   r17.val);
-
-	r18.field.push_pointer = 0;
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_PP_DIR_PUSH_PTR(port->id),
-		   r18.val);
-
-	r4.field.pp_v = 1;
-	r4.field.mb_dm = 0;
-
-	DLB_CSR_WR(hw, DLB_SYS_DIR_PP_V(port->id), r4.val);
-
-	return 0;
-}
-
-static int dlb_dir_port_configure_cq(struct dlb_hw *hw,
-				     struct dlb_dir_pq_pair *port,
-				     u64 pop_count_dma_base,
-				     u64 cq_dma_base,
-				     struct dlb_create_dir_port_args *args)
-{
-	union dlb_sys_dir_cq_addr_l r0 = { {0} };
-	union dlb_sys_dir_cq_addr_u r1 = { {0} };
-	union dlb_sys_dir_cq2vf_pf r2 = { {0} };
-	union dlb_chp_dir_cq_tkn_depth_sel r3 = { {0} };
-	union dlb_lsp_cq_dir_tkn_depth_sel_dsi r4 = { {0} };
-	union dlb_sys_dir_pp_addr_l r5 = { {0} };
-	union dlb_sys_dir_pp_addr_u r6 = { {0} };
-
-	/* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
-	r0.field.addr_l = cq_dma_base >> 6;
-
-	DLB_CSR_WR(hw, DLB_SYS_DIR_CQ_ADDR_L(port->id), r0.val);
-
-	r1.field.addr_u = cq_dma_base >> 32;
-
-	DLB_CSR_WR(hw, DLB_SYS_DIR_CQ_ADDR_U(port->id), r1.val);
-
-	r2.field.is_pf = 1;
-
-	DLB_CSR_WR(hw, DLB_SYS_DIR_CQ2VF_PF(port->id), r2.val);
-
-	if (args->cq_depth == 8) {
-		r3.field.token_depth_select = 1;
-	} else if (args->cq_depth == 16) {
-		r3.field.token_depth_select = 2;
-	} else if (args->cq_depth == 32) {
-		r3.field.token_depth_select = 3;
-	} else if (args->cq_depth == 64) {
-		r3.field.token_depth_select = 4;
-	} else if (args->cq_depth == 128) {
-		r3.field.token_depth_select = 5;
-	} else if (args->cq_depth == 256) {
-		r3.field.token_depth_select = 6;
-	} else if (args->cq_depth == 512) {
-		r3.field.token_depth_select = 7;
-	} else if (args->cq_depth == 1024) {
-		r3.field.token_depth_select = 8;
-	} else {
-		DLB_HW_ERR(hw, "[%s():%d] Internal error: invalid CQ depth\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id),
-		   r3.val);
-
-	r4.field.token_depth_select = r3.field.token_depth_select;
-	r4.field.disable_wb_opt = 0;
-
-	DLB_CSR_WR(hw,
-		   DLB_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id),
-		   r4.val);
-
-	/* Two cache lines (128B) are dedicated for the port's pop counts */
-	r5.field.addr_l = pop_count_dma_base >> 7;
-
-	DLB_CSR_WR(hw, DLB_SYS_DIR_PP_ADDR_L(port->id), r5.val);
-
-	r6.field.addr_u = pop_count_dma_base >> 32;
-
-	DLB_CSR_WR(hw, DLB_SYS_DIR_PP_ADDR_U(port->id), r6.val);
-
-	return 0;
-}
-
-static int dlb_configure_dir_port(struct dlb_hw *hw,
-				  struct dlb_domain *domain,
-				  struct dlb_dir_pq_pair *port,
-				  u64 pop_count_dma_base,
-				  u64 cq_dma_base,
-				  struct dlb_create_dir_port_args *args)
-{
-	struct dlb_credit_pool *ldb_pool, *dir_pool;
-	int ret;
-
-	port->ldb_pool_used = !dlb_list_empty(&domain->used_ldb_queues) ||
-			      !dlb_list_empty(&domain->avail_ldb_queues);
-
-	/* Each directed port has a directed queue, hence this port requires
-	 * directed credits.
-	 */
-	port->dir_pool_used = true;
-
-	if (port->ldb_pool_used) {
-		u32 cnt = args->ldb_credit_high_watermark;
-
-		ldb_pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
-						   domain);
-		if (ldb_pool == NULL) {
-			DLB_HW_ERR(hw,
-				   "[%s()] Internal error: port validation failed\n",
-				   __func__);
-			return -EFAULT;
-		}
-
-		dlb_ldb_pool_update_credit_count(hw, ldb_pool->id, cnt);
-	} else {
-		args->ldb_credit_high_watermark = 0;
-		args->ldb_credit_low_watermark = 0;
-		args->ldb_credit_quantum = 0;
-	}
-
-	dir_pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id, domain);
-	if (dir_pool == NULL) {
-		DLB_HW_ERR(hw,
-			   "[%s()] Internal error: port validation failed\n",
-			   __func__);
-		return -EFAULT;
-	}
-
-	dlb_dir_pool_update_credit_count(hw,
-					 dir_pool->id,
-					 args->dir_credit_high_watermark);
-
-	ret = dlb_dir_port_configure_cq(hw,
-					port,
-					pop_count_dma_base,
-					cq_dma_base,
-					args);
-
-	if (ret < 0)
-		return ret;
-
-	ret = dlb_dir_port_configure_pp(hw, domain, port, args);
-	if (ret < 0)
-		return ret;
-
-	dlb_dir_port_cq_enable(hw, port);
-
-	port->enabled = true;
-
-	port->port_configured = true;
-
-	return 0;
-}
-
-/**
- * dlb_hw_create_dir_port() - Allocate and initialize a DLB directed port and
- *	queue. The port/queue pair have the same ID and name.
- * @hw:	  Contains the current state of the DLB hardware.
- * @args: User-provided arguments.
- * @resp: Response to user.
- *
- * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
- * satisfy a request, resp->status will be set accordingly.
- */
-int dlb_hw_create_dir_port(struct dlb_hw *hw,
-			   u32 domain_id,
-			   struct dlb_create_dir_port_args *args,
-			   u64 pop_count_dma_base,
-			   u64 cq_dma_base,
-			   struct dlb_cmd_response *resp)
-{
-	struct dlb_dir_pq_pair *port;
-	struct dlb_domain *domain;
-	int ret;
-
-	dlb_log_create_dir_port_args(hw,
-				     domain_id,
-				     pop_count_dma_base,
-				     cq_dma_base,
-				     args);
-
-	/* Verify that hardware resources are available before attempting to
-	 * satisfy the request. This simplifies the error unwinding code.
-	 */
-	if (dlb_verify_create_dir_port_args(hw,
-					    domain_id,
-					    pop_count_dma_base,
-					    cq_dma_base,
-					    args,
-					    resp))
-		return -EINVAL;
-
-	domain = dlb_get_domain_from_id(hw, domain_id);
-	if (domain == NULL) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: domain not found\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	if (args->queue_id != -1)
-		port = dlb_get_domain_used_dir_pq(args->queue_id,
-						  domain);
-	else
-		port = DLB_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
-					 typeof(*port));
-
-	/* Verification should catch this. */
-	if (port == NULL) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: no available dir ports\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	ret = dlb_configure_dir_port(hw,
-				     domain,
-				     port,
-				     pop_count_dma_base,
-				     cq_dma_base,
-				     args);
-	if (ret < 0)
-		return ret;
-
-	/* Configuration succeeded, so move the resource from the 'avail' to
-	 * the 'used' list (if it's not already there).
-	 */
-	if (args->queue_id == -1) {
-		dlb_list_del(&domain->avail_dir_pq_pairs, &port->domain_list);
-
-		dlb_list_add(&domain->used_dir_pq_pairs, &port->domain_list);
-	}
-
-	resp->status = 0;
-	resp->id = port->id;
-
-	return 0;
-}
-
-static struct dlb_ldb_port *
-dlb_get_domain_used_ldb_port(u32 id, struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	struct dlb_ldb_port *port;
-	RTE_SET_USED(iter);
-
-	if (id >= DLB_MAX_NUM_LDB_PORTS)
-		return NULL;
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
-		if (port->id == id)
-			return port;
-
-	DLB_DOM_LIST_FOR(domain->avail_ldb_ports, port, iter)
-		if (port->id == id)
-			return port;
-
-	return NULL;
-}
-
-static void
-dlb_log_pending_port_unmaps_args(struct dlb_hw *hw,
-				 struct dlb_pending_port_unmaps_args *args)
-{
-	DLB_HW_INFO(hw, "DLB pending port unmaps arguments:\n");
-	DLB_HW_INFO(hw, "\tPort ID: %d\n", args->port_id);
-}
-
-int dlb_hw_pending_port_unmaps(struct dlb_hw *hw,
-			       u32 domain_id,
-			       struct dlb_pending_port_unmaps_args *args,
-			       struct dlb_cmd_response *resp)
-{
-	struct dlb_domain *domain;
-	struct dlb_ldb_port *port;
-
-	dlb_log_pending_port_unmaps_args(hw, args);
-
-	domain = dlb_get_domain_from_id(hw, domain_id);
-
-	if (domain == NULL) {
-		resp->status = DLB_ST_INVALID_DOMAIN_ID;
-		return -EINVAL;
-	}
-
-	port = dlb_get_domain_used_ldb_port(args->port_id, domain);
-	if (port == NULL || !port->configured) {
-		resp->status = DLB_ST_INVALID_PORT_ID;
-		return -EINVAL;
-	}
-
-	resp->id = port->num_pending_removals;
-
-	return 0;
-}
-
-static void dlb_log_unmap_qid(struct dlb_hw *hw,
-			      u32 domain_id,
-			      struct dlb_unmap_qid_args *args)
-{
-	DLB_HW_INFO(hw, "DLB unmap QID arguments:\n");
-	DLB_HW_INFO(hw, "\tDomain ID: %d\n",
-		    domain_id);
-	DLB_HW_INFO(hw, "\tPort ID:   %d\n",
-		    args->port_id);
-	DLB_HW_INFO(hw, "\tQueue ID:  %d\n",
-		    args->qid);
-	if (args->qid < DLB_MAX_NUM_LDB_QUEUES)
-		DLB_HW_INFO(hw, "\tQueue's num mappings:  %d\n",
-			    hw->rsrcs.ldb_queues[args->qid].num_mappings);
-}
-
-static struct dlb_ldb_queue *dlb_get_domain_ldb_queue(u32 id,
-						      struct dlb_domain *domain)
-{
-	struct dlb_list_entry *iter;
-	struct dlb_ldb_queue *queue;
-	RTE_SET_USED(iter);
-
-	if (id >= DLB_MAX_NUM_LDB_QUEUES)
-		return NULL;
-
-	DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter)
-		if (queue->id == id)
-			return queue;
-
-	return NULL;
-}
-
-static bool
-dlb_port_find_slot_with_pending_map_queue(struct dlb_ldb_port *port,
-					  struct dlb_ldb_queue *queue,
-					  int *slot)
-{
-	int i;
-
-	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
-		struct dlb_ldb_port_qid_map *map = &port->qid_map[i];
-
-		if (map->state == DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP &&
-		    map->pending_qid == queue->id)
-			break;
-	}
-
-	*slot = i;
-
-	return (i < DLB_MAX_NUM_QIDS_PER_LDB_CQ);
-}
-
-static int dlb_verify_unmap_qid_args(struct dlb_hw *hw,
-				     u32 domain_id,
-				     struct dlb_unmap_qid_args *args,
-				     struct dlb_cmd_response *resp)
-{
-	enum dlb_qid_map_state state;
-	struct dlb_domain *domain;
-	struct dlb_ldb_port *port;
-	struct dlb_ldb_queue *queue;
-	int slot;
-	int id;
-
-	domain = dlb_get_domain_from_id(hw, domain_id);
-
-	if (domain == NULL) {
-		resp->status = DLB_ST_INVALID_DOMAIN_ID;
-		return -1;
-	}
-
-	if (!domain->configured) {
-		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
-		return -1;
-	}
-
-	id = args->port_id;
-
-	port = dlb_get_domain_used_ldb_port(id, domain);
-
-	if (port == NULL || !port->configured) {
-		resp->status = DLB_ST_INVALID_PORT_ID;
-		return -1;
-	}
-
-	if (port->domain_id != domain->id) {
-		resp->status = DLB_ST_INVALID_PORT_ID;
-		return -1;
-	}
-
-	queue = dlb_get_domain_ldb_queue(args->qid, domain);
-
-	if (queue == NULL || !queue->configured) {
-		DLB_HW_ERR(hw, "[%s()] Can't unmap unconfigured queue %d\n",
-			   __func__, args->qid);
-		resp->status = DLB_ST_INVALID_QID;
-		return -1;
-	}
-
-	/* Verify that the port has the queue mapped. From the application's
-	 * perspective a queue is mapped if it is actually mapped, the map is
-	 * in progress, or the map is blocked pending an unmap.
-	 */
-	state = DLB_QUEUE_MAPPED;
-	if (dlb_port_find_slot_queue(port, state, queue, &slot))
-		return 0;
-
-	state = DLB_QUEUE_MAP_IN_PROGRESS;
-	if (dlb_port_find_slot_queue(port, state, queue, &slot))
-		return 0;
-
-	if (dlb_port_find_slot_with_pending_map_queue(port, queue, &slot))
-		return 0;
-
-	resp->status = DLB_ST_INVALID_QID;
-	return -1;
-}
-
-int dlb_hw_unmap_qid(struct dlb_hw *hw,
-		     u32 domain_id,
-		     struct dlb_unmap_qid_args *args,
-		     struct dlb_cmd_response *resp)
-{
-	enum dlb_qid_map_state state;
-	struct dlb_ldb_queue *queue;
-	struct dlb_ldb_port *port;
-	struct dlb_domain *domain;
-	bool unmap_complete;
-	int i, ret, id;
-
-	dlb_log_unmap_qid(hw, domain_id, args);
-
-	/* Verify that hardware resources are available before attempting to
-	 * satisfy the request. This simplifies the error unwinding code.
-	 */
-	if (dlb_verify_unmap_qid_args(hw, domain_id, args, resp))
-		return -EINVAL;
-
-	domain = dlb_get_domain_from_id(hw, domain_id);
-	if (domain == NULL) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: domain not found\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	id = args->port_id;
-
-	port = dlb_get_domain_used_ldb_port(id, domain);
-	if (port == NULL) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: port not found\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	queue = dlb_get_domain_ldb_queue(args->qid, domain);
-	if (queue == NULL) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: queue not found\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	/* If the queue hasn't been mapped yet, we need to update the slot's
-	 * state and re-enable the queue's inflights.
-	 */
-	state = DLB_QUEUE_MAP_IN_PROGRESS;
-	if (dlb_port_find_slot_queue(port, state, queue, &i)) {
-		if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
-			DLB_HW_ERR(hw,
-				   "[%s():%d] Internal error: port slot tracking failed\n",
-				   __func__, __LINE__);
-			return -EFAULT;
-		}
-
-		/* Since the in-progress map was aborted, re-enable the QID's
-		 * inflights.
-		 */
-		if (queue->num_pending_additions == 0)
-			dlb_ldb_queue_set_inflight_limit(hw, queue);
-
-		state = DLB_QUEUE_UNMAPPED;
-		ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
-		if (ret)
-			return ret;
-
-		goto unmap_qid_done;
-	}
-
-	/* If the queue mapping is on hold pending an unmap, we simply need to
-	 * update the slot's state.
-	 */
-	if (dlb_port_find_slot_with_pending_map_queue(port, queue, &i)) {
-		if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
-			DLB_HW_ERR(hw,
-				   "[%s():%d] Internal error: port slot tracking failed\n",
-				   __func__, __LINE__);
-			return -EFAULT;
-		}
-
-		state = DLB_QUEUE_UNMAP_IN_PROGRESS;
-		ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
-		if (ret)
-			return ret;
-
-		goto unmap_qid_done;
-	}
-
-	state = DLB_QUEUE_MAPPED;
-	if (!dlb_port_find_slot_queue(port, state, queue, &i)) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: no available CQ slots\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: port slot tracking failed\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	/* QID->CQ mapping removal is an asynchronous procedure. It requires
-	 * stopping the DLB from scheduling this CQ, draining all inflights
-	 * from the CQ, then unmapping the queue from the CQ. This function
-	 * simply marks the port as needing the queue unmapped, and (if
-	 * necessary) starts the unmapping worker thread.
-	 */
-	dlb_ldb_port_cq_disable(hw, port);
-
-	state = DLB_QUEUE_UNMAP_IN_PROGRESS;
-	ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
-	if (ret)
-		return ret;
-
-	/* Attempt to finish the unmapping now, in case the port has no
-	 * outstanding inflights. If that's not the case, this will fail and
-	 * the unmapping will be completed at a later time.
-	 */
-	unmap_complete = dlb_domain_finish_unmap_port(hw, domain, port);
-
-	/* If the unmapping couldn't complete immediately, launch the worker
-	 * thread (if it isn't already launched) to finish it later.
-	 */
-	if (!unmap_complete && !os_worker_active(hw))
-		os_schedule_work(hw);
-
-unmap_qid_done:
-	resp->status = 0;
-
-	return 0;
-}
-
-static void dlb_log_map_qid(struct dlb_hw *hw,
-			    u32 domain_id,
-			    struct dlb_map_qid_args *args)
-{
-	DLB_HW_INFO(hw, "DLB map QID arguments:\n");
-	DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
-	DLB_HW_INFO(hw, "\tPort ID:   %d\n", args->port_id);
-	DLB_HW_INFO(hw, "\tQueue ID:  %d\n", args->qid);
-	DLB_HW_INFO(hw, "\tPriority:  %d\n", args->priority);
-}
-
-static int dlb_verify_map_qid_args(struct dlb_hw *hw,
-				   u32 domain_id,
-				   struct dlb_map_qid_args *args,
-				   struct dlb_cmd_response *resp)
-{
-	struct dlb_domain *domain;
-	struct dlb_ldb_port *port;
-	struct dlb_ldb_queue *queue;
-	int id;
-
-	domain = dlb_get_domain_from_id(hw, domain_id);
-
-	if (domain == NULL) {
-		resp->status = DLB_ST_INVALID_DOMAIN_ID;
-		return -1;
-	}
-
-	if (!domain->configured) {
-		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
-		return -1;
-	}
-
-	id = args->port_id;
-
-	port = dlb_get_domain_used_ldb_port(id, domain);
-
-	if (port  == NULL || !port->configured) {
-		resp->status = DLB_ST_INVALID_PORT_ID;
-		return -1;
-	}
-
-	if (args->priority >= DLB_QID_PRIORITIES) {
-		resp->status = DLB_ST_INVALID_PRIORITY;
-		return -1;
-	}
-
-	queue = dlb_get_domain_ldb_queue(args->qid, domain);
-
-	if (queue  == NULL || !queue->configured) {
-		resp->status = DLB_ST_INVALID_QID;
-		return -1;
-	}
-
-	if (queue->domain_id != domain->id) {
-		resp->status = DLB_ST_INVALID_QID;
-		return -1;
-	}
-
-	if (port->domain_id != domain->id) {
-		resp->status = DLB_ST_INVALID_PORT_ID;
-		return -1;
-	}
-
-	return 0;
-}
-
-static int dlb_verify_start_domain_args(struct dlb_hw *hw,
-					u32 domain_id,
-					struct dlb_cmd_response *resp)
-{
-	struct dlb_domain *domain;
-
-	domain = dlb_get_domain_from_id(hw, domain_id);
-
-	if (domain == NULL) {
-		resp->status = DLB_ST_INVALID_DOMAIN_ID;
-		return -1;
-	}
-
-	if (!domain->configured) {
-		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
-		return -1;
-	}
-
-	if (domain->started) {
-		resp->status = DLB_ST_DOMAIN_STARTED;
-		return -1;
-	}
-
-	return 0;
-}
-
-static int dlb_verify_map_qid_slot_available(struct dlb_ldb_port *port,
-					     struct dlb_ldb_queue *queue,
-					     struct dlb_cmd_response *resp)
-{
-	enum dlb_qid_map_state state;
-	int i;
-
-	/* Unused slot available? */
-	if (port->num_mappings < DLB_MAX_NUM_QIDS_PER_LDB_CQ)
-		return 0;
-
-	/* If the queue is already mapped (from the application's perspective),
-	 * this is simply a priority update.
-	 */
-	state = DLB_QUEUE_MAPPED;
-	if (dlb_port_find_slot_queue(port, state, queue, &i))
-		return 0;
-
-	state = DLB_QUEUE_MAP_IN_PROGRESS;
-	if (dlb_port_find_slot_queue(port, state, queue, &i))
-		return 0;
-
-	if (dlb_port_find_slot_with_pending_map_queue(port, queue, &i))
-		return 0;
-
-	/* If the slot contains an unmap in progress, it's considered
-	 * available.
-	 */
-	state = DLB_QUEUE_UNMAP_IN_PROGRESS;
-	if (dlb_port_find_slot(port, state, &i))
-		return 0;
-
-	state = DLB_QUEUE_UNMAPPED;
-	if (dlb_port_find_slot(port, state, &i))
-		return 0;
-
-	resp->status = DLB_ST_NO_QID_SLOTS_AVAILABLE;
-	return -EINVAL;
-}
-
-static void dlb_ldb_port_change_qid_priority(struct dlb_hw *hw,
-					     struct dlb_ldb_port *port,
-					     int slot,
-					     struct dlb_map_qid_args *args)
-{
-	union dlb_lsp_cq2priov r0;
-
-	/* Read-modify-write the priority and valid bit register */
-	r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ2PRIOV(port->id));
-
-	r0.field.v |= 1 << slot;
-	r0.field.prio |= (args->priority & 0x7) << slot * 3;
-
-	DLB_CSR_WR(hw, DLB_LSP_CQ2PRIOV(port->id), r0.val);
-
-	dlb_flush_csr(hw);
-
-	port->qid_map[slot].priority = args->priority;
-}
-
-int dlb_hw_map_qid(struct dlb_hw *hw,
-		   u32 domain_id,
-		   struct dlb_map_qid_args *args,
-		   struct dlb_cmd_response *resp)
-{
-	enum dlb_qid_map_state state;
-	struct dlb_ldb_queue *queue;
-	struct dlb_ldb_port *port;
-	struct dlb_domain *domain;
-	int ret, i, id;
-	u8 prio;
-
-	dlb_log_map_qid(hw, domain_id, args);
-
-	/* Verify that hardware resources are available before attempting to
-	 * satisfy the request. This simplifies the error unwinding code.
-	 */
-	if (dlb_verify_map_qid_args(hw, domain_id, args, resp))
-		return -EINVAL;
-
-	prio = args->priority;
-
-	domain = dlb_get_domain_from_id(hw, domain_id);
-	if (domain == NULL) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: domain not found\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	id = args->port_id;
-
-	port = dlb_get_domain_used_ldb_port(id, domain);
-	if (port == NULL) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: port not found\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	queue = dlb_get_domain_ldb_queue(args->qid, domain);
-	if (queue == NULL) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: queue not found\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	/* If there are any outstanding detach operations for this port,
-	 * attempt to complete them. This may be necessary to free up a QID
-	 * slot for this requested mapping.
-	 */
-	if (port->num_pending_removals)
-		dlb_domain_finish_unmap_port(hw, domain, port);
-
-	ret = dlb_verify_map_qid_slot_available(port, queue, resp);
-	if (ret)
-		return ret;
-
-	/* Hardware requires disabling the CQ before mapping QIDs. */
-	if (port->enabled)
-		dlb_ldb_port_cq_disable(hw, port);
-
-	/* If this is only a priority change, don't perform the full QID->CQ
-	 * mapping procedure
-	 */
-	state = DLB_QUEUE_MAPPED;
-	if (dlb_port_find_slot_queue(port, state, queue, &i)) {
-		if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
-			DLB_HW_ERR(hw,
-				   "[%s():%d] Internal error: port slot tracking failed\n",
-				   __func__, __LINE__);
-			return -EFAULT;
-		}
-
-		if (prio != port->qid_map[i].priority) {
-			dlb_ldb_port_change_qid_priority(hw, port, i, args);
-			DLB_HW_INFO(hw, "DLB map: priority change only\n");
-		}
-
-		state = DLB_QUEUE_MAPPED;
-		ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
-		if (ret)
-			return ret;
-
-		goto map_qid_done;
-	}
-
-	state = DLB_QUEUE_UNMAP_IN_PROGRESS;
-	if (dlb_port_find_slot_queue(port, state, queue, &i)) {
-		if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
-			DLB_HW_ERR(hw,
-				   "[%s():%d] Internal error: port slot tracking failed\n",
-				   __func__, __LINE__);
-			return -EFAULT;
-		}
-
-		if (prio != port->qid_map[i].priority) {
-			dlb_ldb_port_change_qid_priority(hw, port, i, args);
-			DLB_HW_INFO(hw, "DLB map: priority change only\n");
-		}
-
-		state = DLB_QUEUE_MAPPED;
-		ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
-		if (ret)
-			return ret;
-
-		goto map_qid_done;
-	}
-
-	/* If this is a priority change on an in-progress mapping, don't
-	 * perform the full QID->CQ mapping procedure.
-	 */
-	state = DLB_QUEUE_MAP_IN_PROGRESS;
-	if (dlb_port_find_slot_queue(port, state, queue, &i)) {
-		if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
-			DLB_HW_ERR(hw,
-				   "[%s():%d] Internal error: port slot tracking failed\n",
-				   __func__, __LINE__);
-			return -EFAULT;
-		}
-
-		port->qid_map[i].priority = prio;
-
-		DLB_HW_INFO(hw, "DLB map: priority change only\n");
-
-		goto map_qid_done;
-	}
-
-	/* If this is a priority change on a pending mapping, update the
-	 * pending priority
-	 */
-	if (dlb_port_find_slot_with_pending_map_queue(port, queue, &i)) {
-		if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
-			DLB_HW_ERR(hw,
-				   "[%s():%d] Internal error: port slot tracking failed\n",
-				   __func__, __LINE__);
-			return -EFAULT;
-		}
-
-		port->qid_map[i].pending_priority = prio;
-
-		DLB_HW_INFO(hw, "DLB map: priority change only\n");
-
-		goto map_qid_done;
-	}
-
-	/* If all the CQ's slots are in use, then there's an unmap in progress
-	 * (guaranteed by dlb_verify_map_qid_slot_available()), so add this
-	 * mapping to pending_map and return. When the removal is completed for
-	 * the slot's current occupant, this mapping will be performed.
-	 */
-	if (!dlb_port_find_slot(port, DLB_QUEUE_UNMAPPED, &i)) {
-		if (dlb_port_find_slot(port, DLB_QUEUE_UNMAP_IN_PROGRESS, &i)) {
-			enum dlb_qid_map_state state;
-
-			if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
-				DLB_HW_ERR(hw,
-					   "[%s():%d] Internal error: port slot tracking failed\n",
-					   __func__, __LINE__);
-				return -EFAULT;
-			}
-
-			port->qid_map[i].pending_qid = queue->id;
-			port->qid_map[i].pending_priority = prio;
-
-			state = DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP;
-
-			ret = dlb_port_slot_state_transition(hw, port, queue,
-							     i, state);
-			if (ret)
-				return ret;
-
-			DLB_HW_INFO(hw, "DLB map: map pending removal\n");
-
-			goto map_qid_done;
-		}
-	}
-
-	/* If the domain has started, a special "dynamic" CQ->queue mapping
-	 * procedure is required in order to safely update the CQ<->QID tables.
-	 * The "static" procedure cannot be used when traffic is flowing,
-	 * because the CQ<->QID tables cannot be updated atomically and the
-	 * scheduler won't see the new mapping unless the queue's if_status
-	 * changes, which isn't guaranteed.
-	 */
-	ret = dlb_ldb_port_map_qid(hw, domain, port, queue, prio);
-
-	/* If ret is less than zero, it's due to an internal error */
-	if (ret < 0)
-		return ret;
-
-map_qid_done:
-	if (port->enabled)
-		dlb_ldb_port_cq_enable(hw, port);
-
-	resp->status = 0;
-
-	return 0;
-}
-
-static void dlb_log_start_domain(struct dlb_hw *hw, u32 domain_id)
-{
-	DLB_HW_INFO(hw, "DLB start domain arguments:\n");
-	DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
-}
-
-static void dlb_ldb_pool_write_credit_count_reg(struct dlb_hw *hw,
-						u32 pool_id)
-{
-	union dlb_chp_ldb_pool_crd_cnt r0 = { {0} };
-	struct dlb_credit_pool *pool;
-
-	pool = &hw->rsrcs.ldb_credit_pools[pool_id];
-
-	r0.field.count = pool->avail_credits;
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_LDB_POOL_CRD_CNT(pool->id),
-		   r0.val);
-}
-
-static void dlb_dir_pool_write_credit_count_reg(struct dlb_hw *hw,
-						u32 pool_id)
-{
-	union dlb_chp_dir_pool_crd_cnt r0 = { {0} };
-	struct dlb_credit_pool *pool;
-
-	pool = &hw->rsrcs.dir_credit_pools[pool_id];
-
-	r0.field.count = pool->avail_credits;
-
-	DLB_CSR_WR(hw,
-		   DLB_CHP_DIR_POOL_CRD_CNT(pool->id),
-		   r0.val);
-}
-
-/**
- * dlb_hw_start_domain() - Lock the domain configuration
- * @hw:	  Contains the current state of the DLB hardware.
- * @args: User-provided arguments.
- * @resp: Response to user.
- *
- * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
- * satisfy a request, resp->status will be set accordingly.
- */
-int dlb_hw_start_domain(struct dlb_hw *hw,
-			u32 domain_id,
-			struct dlb_start_domain_args *arg,
-			struct dlb_cmd_response *resp)
-{
-	struct dlb_list_entry *iter;
-	struct dlb_dir_pq_pair *dir_queue;
-	struct dlb_ldb_queue *ldb_queue;
-	struct dlb_credit_pool *pool;
-	struct dlb_domain *domain;
-	RTE_SET_USED(arg);
-	RTE_SET_USED(iter);
-
-	dlb_log_start_domain(hw, domain_id);
-
-	if (dlb_verify_start_domain_args(hw, domain_id, resp))
-		return -EINVAL;
-
-	domain = dlb_get_domain_from_id(hw, domain_id);
-	if (domain == NULL) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: domain not found\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	/* Write the domain's pool credit counts, which have been updated
-	 * during port configuration. The sum of the pool credit count plus
-	 * each producer port's credit count must equal the pool's credit
-	 * allocation *before* traffic is sent.
-	 */
-	DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter)
-		dlb_ldb_pool_write_credit_count_reg(hw, pool->id);
-
-	DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter)
-		dlb_dir_pool_write_credit_count_reg(hw, pool->id);
-
-	/* Enable load-balanced and directed queue write permissions for the
-	 * queues this domain owns. Without this, the DLB will drop all
-	 * incoming traffic to those queues.
-	 */
-	DLB_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {
-		union dlb_sys_ldb_vasqid_v r0 = { {0} };
-		unsigned int offs;
-
-		r0.field.vasqid_v = 1;
-
-		offs = domain->id * DLB_MAX_NUM_LDB_QUEUES + ldb_queue->id;
-
-		DLB_CSR_WR(hw, DLB_SYS_LDB_VASQID_V(offs), r0.val);
-	}
-
-	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) {
-		union dlb_sys_dir_vasqid_v r0 = { {0} };
-		unsigned int offs;
-
-		r0.field.vasqid_v = 1;
-
-		offs = domain->id * DLB_MAX_NUM_DIR_PORTS + dir_queue->id;
-
-		DLB_CSR_WR(hw, DLB_SYS_DIR_VASQID_V(offs), r0.val);
-	}
-
-	dlb_flush_csr(hw);
-
-	domain->started = true;
-
-	resp->status = 0;
-
-	return 0;
-}
-
-static void dlb_log_get_dir_queue_depth(struct dlb_hw *hw,
-					u32 domain_id,
-					u32 queue_id)
-{
-	DLB_HW_INFO(hw, "DLB get directed queue depth:\n");
-	DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
-	DLB_HW_INFO(hw, "\tQueue ID: %d\n", queue_id);
-}
-
-int dlb_hw_get_dir_queue_depth(struct dlb_hw *hw,
-			       u32 domain_id,
-			       struct dlb_get_dir_queue_depth_args *args,
-			       struct dlb_cmd_response *resp)
-{
-	struct dlb_dir_pq_pair *queue;
-	struct dlb_domain *domain;
-	int id;
-
-	id = domain_id;
-
-	dlb_log_get_dir_queue_depth(hw, domain_id, args->queue_id);
-
-	domain = dlb_get_domain_from_id(hw, id);
-	if (domain == NULL) {
-		resp->status = DLB_ST_INVALID_DOMAIN_ID;
-		return -EINVAL;
-	}
-
-	id = args->queue_id;
-
-	queue = dlb_get_domain_used_dir_pq(args->queue_id, domain);
-	if (queue == NULL) {
-		resp->status = DLB_ST_INVALID_QID;
-		return -EINVAL;
-	}
-
-	resp->id = dlb_dir_queue_depth(hw, queue);
-
-	return 0;
-}
-
-static void dlb_log_get_ldb_queue_depth(struct dlb_hw *hw,
-					u32 domain_id,
-					u32 queue_id)
-{
-	DLB_HW_INFO(hw, "DLB get load-balanced queue depth:\n");
-	DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
-	DLB_HW_INFO(hw, "\tQueue ID: %d\n", queue_id);
-}
-
-int dlb_hw_get_ldb_queue_depth(struct dlb_hw *hw,
-			       u32 domain_id,
-			       struct dlb_get_ldb_queue_depth_args *args,
-			       struct dlb_cmd_response *resp)
-{
-	union dlb_lsp_qid_aqed_active_cnt r0;
-	union dlb_lsp_qid_atq_enqueue_cnt r1;
-	union dlb_lsp_qid_ldb_enqueue_cnt r2;
-	struct dlb_ldb_queue *queue;
-	struct dlb_domain *domain;
-
-	dlb_log_get_ldb_queue_depth(hw, domain_id, args->queue_id);
-
-	domain = dlb_get_domain_from_id(hw, domain_id);
-	if (domain == NULL) {
-		resp->status = DLB_ST_INVALID_DOMAIN_ID;
-		return -EINVAL;
-	}
-
-	queue = dlb_get_domain_ldb_queue(args->queue_id, domain);
-	if (queue == NULL) {
-		resp->status = DLB_ST_INVALID_QID;
-		return -EINVAL;
-	}
-
-	r0.val = DLB_CSR_RD(hw,
-			    DLB_LSP_QID_AQED_ACTIVE_CNT(queue->id));
-
-	r1.val = DLB_CSR_RD(hw,
-			    DLB_LSP_QID_ATQ_ENQUEUE_CNT(queue->id));
-
-	r2.val = DLB_CSR_RD(hw,
-			    DLB_LSP_QID_LDB_ENQUEUE_CNT(queue->id));
-
-	resp->id = r0.val + r1.val + r2.val;
-
-	return 0;
-}
diff --git a/drivers/event/dlb/pf/base/dlb_resource.h b/drivers/event/dlb/pf/base/dlb_resource.h
deleted file mode 100644
index 4f48b73fd..000000000
--- a/drivers/event/dlb/pf/base/dlb_resource.h
+++ /dev/null
@@ -1,876 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#ifndef __DLB_RESOURCE_H
-#define __DLB_RESOURCE_H
-
-#include "dlb_hw_types.h"
-#include "dlb_osdep_types.h"
-
-/**
- * dlb_resource_init() - initialize the device
- * @hw: pointer to struct dlb_hw.
- *
- * This function initializes the device's software state (pointed to by the hw
- * argument) and programs global scheduling QoS registers. This function should
- * be called during driver initialization.
- *
- * The dlb_hw struct must be unique per DLB device and persist until the device
- * is reset.
- *
- * Return:
- * Returns 0 upon success, -1 otherwise.
- */
-int dlb_resource_init(struct dlb_hw *hw);
-
-/**
- * dlb_resource_free() - free device state memory
- * @hw: dlb_hw handle for a particular device.
- *
- * This function frees software state pointed to by dlb_hw. This function
- * should be called when resetting the device or unloading the driver.
- */
-void dlb_resource_free(struct dlb_hw *hw);
-
-/**
- * dlb_resource_reset() - reset in-use resources to their initial state
- * @hw: dlb_hw handle for a particular device.
- *
- * This function resets in-use resources, and makes them available for use.
- */
-void dlb_resource_reset(struct dlb_hw *hw);
-
-/**
- * dlb_hw_create_sched_domain() - create a scheduling domain
- * @hw: dlb_hw handle for a particular device.
- * @args: scheduling domain creation arguments.
- * @resp: response structure.
- *
- * This function creates a scheduling domain containing the resources specified
- * in args. The individual resources (queues, ports, credit pools) can be
- * configured after creating a scheduling domain.
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
- * assigned a detailed error code from enum dlb_error. If successful, resp->id
- * contains the domain ID.
- *
- * Errors:
- * EINVAL - A requested resource is unavailable, or the requested domain name
- *	    is already in use.
- * EFAULT - Internal error (resp->status not set).
- */
-int dlb_hw_create_sched_domain(struct dlb_hw *hw,
-			       struct dlb_create_sched_domain_args *args,
-			       struct dlb_cmd_response *resp);
-
-/**
- * dlb_hw_create_ldb_pool() - create a load-balanced credit pool
- * @hw: dlb_hw handle for a particular device.
- * @domain_id: domain ID.
- * @args: credit pool creation arguments.
- * @resp: response structure.
- *
- * This function creates a load-balanced credit pool containing the number of
- * requested credits.
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
- * assigned a detailed error code from enum dlb_error. If successful, resp->id
- * contains the pool ID.
- *
- * Errors:
- * EINVAL - A requested resource is unavailable, the domain is not configured,
- *	    or the domain has already been started.
- * EFAULT - Internal error (resp->status not set).
- */
-int dlb_hw_create_ldb_pool(struct dlb_hw *hw,
-			   u32 domain_id,
-			   struct dlb_create_ldb_pool_args *args,
-			   struct dlb_cmd_response *resp);
-
-/**
- * dlb_hw_create_dir_pool() - create a directed credit pool
- * @hw: dlb_hw handle for a particular device.
- * @domain_id: domain ID.
- * @args: credit pool creation arguments.
- * @resp: response structure.
- *
- * This function creates a directed credit pool containing the number of
- * requested credits.
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
- * assigned a detailed error code from enum dlb_error. If successful, resp->id
- * contains the pool ID.
- *
- * Errors:
- * EINVAL - A requested resource is unavailable, the domain is not configured,
- *	    or the domain has already been started.
- * EFAULT - Internal error (resp->status not set).
- */
-int dlb_hw_create_dir_pool(struct dlb_hw *hw,
-			   u32 domain_id,
-			   struct dlb_create_dir_pool_args *args,
-			   struct dlb_cmd_response *resp);
-
-/**
- * dlb_hw_create_ldb_queue() - create a load-balanced queue
- * @hw: dlb_hw handle for a particular device.
- * @domain_id: domain ID.
- * @args: queue creation arguments.
- * @resp: response structure.
- *
- * This function creates a load-balanced queue.
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
- * assigned a detailed error code from enum dlb_error. If successful, resp->id
- * contains the queue ID.
- *
- * Errors:
- * EINVAL - A requested resource is unavailable, the domain is not configured,
- *	    the domain has already been started, or the requested queue name is
- *	    already in use.
- * EFAULT - Internal error (resp->status not set).
- */
-int dlb_hw_create_ldb_queue(struct dlb_hw *hw,
-			    u32 domain_id,
-			    struct dlb_create_ldb_queue_args *args,
-			    struct dlb_cmd_response *resp);
-
-/**
- * dlb_hw_create_dir_queue() - create a directed queue
- * @hw: dlb_hw handle for a particular device.
- * @domain_id: domain ID.
- * @args: queue creation arguments.
- * @resp: response structure.
- *
- * This function creates a directed queue.
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
- * assigned a detailed error code from enum dlb_error. If successful, resp->id
- * contains the queue ID.
- *
- * Errors:
- * EINVAL - A requested resource is unavailable, the domain is not configured,
- *	    or the domain has already been started.
- * EFAULT - Internal error (resp->status not set).
- */
-int dlb_hw_create_dir_queue(struct dlb_hw *hw,
-			    u32 domain_id,
-			    struct dlb_create_dir_queue_args *args,
-			    struct dlb_cmd_response *resp);
-
-/**
- * dlb_hw_create_dir_port() - create a directed port
- * @hw: dlb_hw handle for a particular device.
- * @domain_id: domain ID.
- * @args: port creation arguments.
- * @pop_count_dma_base: base address of the pop count memory. This can be
- *			a PA or an IOVA.
- * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA.
- * @resp: response structure.
- *
- * This function creates a directed port.
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
- * assigned a detailed error code from enum dlb_error. If successful, resp->id
- * contains the port ID.
- *
- * Errors:
- * EINVAL - A requested resource is unavailable, a credit setting is invalid, a
- *	    pool ID is invalid, a pointer address is not properly aligned, the
- *	    domain is not configured, or the domain has already been started.
- * EFAULT - Internal error (resp->status not set).
- */
-int dlb_hw_create_dir_port(struct dlb_hw *hw,
-			   u32 domain_id,
-			   struct dlb_create_dir_port_args *args,
-			   u64 pop_count_dma_base,
-			   u64 cq_dma_base,
-			   struct dlb_cmd_response *resp);
-
-/**
- * dlb_hw_create_ldb_port() - create a load-balanced port
- * @hw: dlb_hw handle for a particular device.
- * @domain_id: domain ID.
- * @args: port creation arguments.
- * @pop_count_dma_base: base address of the pop count memory. This can be
- *			 a PA or an IOVA.
- * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA.
- * @resp: response structure.
- *
- * This function creates a load-balanced port.
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
- * assigned a detailed error code from enum dlb_error. If successful, resp->id
- * contains the port ID.
- *
- * Errors:
- * EINVAL - A requested resource is unavailable, a credit setting is invalid, a
- *	    pool ID is invalid, a pointer address is not properly aligned, the
- *	    domain is not configured, or the domain has already been started.
- * EFAULT - Internal error (resp->status not set).
- */
-int dlb_hw_create_ldb_port(struct dlb_hw *hw,
-			   u32 domain_id,
-			   struct dlb_create_ldb_port_args *args,
-			   u64 pop_count_dma_base,
-			   u64 cq_dma_base,
-			   struct dlb_cmd_response *resp);
-
-/**
- * dlb_hw_start_domain() - start a scheduling domain
- * @hw: dlb_hw handle for a particular device.
- * @domain_id: domain ID.
- * @args: start domain arguments.
- * @resp: response structure.
- *
- * This function starts a scheduling domain, which allows applications to send
- * traffic through it. Once a domain is started, its resources can no longer be
- * configured (besides QID remapping and port enable/disable).
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
- * assigned a detailed error code from enum dlb_error.
- *
- * Errors:
- * EINVAL - the domain is not configured, or the domain is already started.
- */
-int dlb_hw_start_domain(struct dlb_hw *hw,
-			u32 domain_id,
-			struct dlb_start_domain_args *args,
-			struct dlb_cmd_response *resp);
-
-/**
- * dlb_hw_map_qid() - map a load-balanced queue to a load-balanced port
- * @hw: dlb_hw handle for a particular device.
- * @domain_id: domain ID.
- * @args: map QID arguments.
- * @resp: response structure.
- *
- * This function configures the DLB to schedule QEs from the specified queue to
- * the specified port. Each load-balanced port can be mapped to up to 8 queues;
- * each load-balanced queue can potentially map to all the load-balanced ports.
- *
- * A successful return does not necessarily mean the mapping was configured. If
- * this function is unable to immediately map the queue to the port, it will
- * add the requested operation to a per-port list of pending map/unmap
- * operations, and (if it's not already running) launch a kernel thread that
- * periodically attempts to process all pending operations. In a sense, this is
- * an asynchronous function.
- *
- * This asynchronicity creates two views of the state of hardware: the actual
- * hardware state and the requested state (as if every request completed
- * immediately). If there are any pending map/unmap operations, the requested
- * state will differ from the actual state. All validation is performed with
- * respect to the pending state; for instance, if there are 8 pending map
- * operations for port X, a request for a 9th will fail because a load-balanced
- * port can only map up to 8 queues.
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
- * assigned a detailed error code from enum dlb_error.
- *
- * Errors:
- * EINVAL - A requested resource is unavailable, invalid port or queue ID, or
- *	    the domain is not configured.
- * EFAULT - Internal error (resp->status not set).
- */
-int dlb_hw_map_qid(struct dlb_hw *hw,
-		   u32 domain_id,
-		   struct dlb_map_qid_args *args,
-		   struct dlb_cmd_response *resp);
-
-/**
- * dlb_hw_unmap_qid() - Unmap a load-balanced queue from a load-balanced port
- * @hw: dlb_hw handle for a particular device.
- * @domain_id: domain ID.
- * @args: unmap QID arguments.
- * @resp: response structure.
- *
- * This function configures the DLB to stop scheduling QEs from the specified
- * queue to the specified port.
- *
- * A successful return does not necessarily mean the mapping was removed. If
- * this function is unable to immediately unmap the queue from the port, it
- * will add the requested operation to a per-port list of pending map/unmap
- * operations, and (if it's not already running) launch a kernel thread that
- * periodically attempts to process all pending operations. See
- * dlb_hw_map_qid() for more details.
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
- * assigned a detailed error code from enum dlb_error.
- *
- * Errors:
- * EINVAL - A requested resource is unavailable, invalid port or queue ID, or
- *	    the domain is not configured.
- * EFAULT - Internal error (resp->status not set).
- */
-int dlb_hw_unmap_qid(struct dlb_hw *hw,
-		     u32 domain_id,
-		     struct dlb_unmap_qid_args *args,
-		     struct dlb_cmd_response *resp);
-
-/**
- * dlb_finish_unmap_qid_procedures() - finish any pending unmap procedures
- * @hw: dlb_hw handle for a particular device.
- *
- * This function attempts to finish any outstanding unmap procedures.
- * This function should be called by the kernel thread responsible for
- * finishing map/unmap procedures.
- *
- * Return:
- * Returns the number of procedures that weren't completed.
- */
-unsigned int dlb_finish_unmap_qid_procedures(struct dlb_hw *hw);
-
-/**
- * dlb_finish_map_qid_procedures() - finish any pending map procedures
- * @hw: dlb_hw handle for a particular device.
- *
- * This function attempts to finish any outstanding map procedures.
- * This function should be called by the kernel thread responsible for
- * finishing map/unmap procedures.
- *
- * Return:
- * Returns the number of procedures that weren't completed.
- */
-unsigned int dlb_finish_map_qid_procedures(struct dlb_hw *hw);
-
-/**
- * dlb_hw_enable_ldb_port() - enable a load-balanced port for scheduling
- * @hw: dlb_hw handle for a particular device.
- * @domain_id: domain ID.
- * @args: port enable arguments.
- * @resp: response structure.
- *
- * This function configures the DLB to schedule QEs to a load-balanced port.
- * Ports are enabled by default.
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
- * assigned a detailed error code from enum dlb_error.
- *
- * Errors:
- * EINVAL - The port ID is invalid or the domain is not configured.
- * EFAULT - Internal error (resp->status not set).
- */
-int dlb_hw_enable_ldb_port(struct dlb_hw *hw,
-			   u32 domain_id,
-			   struct dlb_enable_ldb_port_args *args,
-			   struct dlb_cmd_response *resp);
-
-/**
- * dlb_hw_disable_ldb_port() - disable a load-balanced port for scheduling
- * @hw: dlb_hw handle for a particular device.
- * @domain_id: domain ID.
- * @args: port disable arguments.
- * @resp: response structure.
- *
- * This function configures the DLB to stop scheduling QEs to a load-balanced
- * port. Ports are enabled by default.
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
- * assigned a detailed error code from enum dlb_error.
- *
- * Errors:
- * EINVAL - The port ID is invalid or the domain is not configured.
- * EFAULT - Internal error (resp->status not set).
- */
-int dlb_hw_disable_ldb_port(struct dlb_hw *hw,
-			    u32 domain_id,
-			    struct dlb_disable_ldb_port_args *args,
-			    struct dlb_cmd_response *resp);
-
-/**
- * dlb_hw_enable_dir_port() - enable a directed port for scheduling
- * @hw: dlb_hw handle for a particular device.
- * @domain_id: domain ID.
- * @args: port enable arguments.
- * @resp: response structure.
- *
- * This function configures the DLB to schedule QEs to a directed port.
- * Ports are enabled by default.
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
- * assigned a detailed error code from enum dlb_error.
- *
- * Errors:
- * EINVAL - The port ID is invalid or the domain is not configured.
- * EFAULT - Internal error (resp->status not set).
- */
-int dlb_hw_enable_dir_port(struct dlb_hw *hw,
-			   u32 domain_id,
-			   struct dlb_enable_dir_port_args *args,
-			   struct dlb_cmd_response *resp);
-
-/**
- * dlb_hw_disable_dir_port() - disable a directed port for scheduling
- * @hw: dlb_hw handle for a particular device.
- * @domain_id: domain ID.
- * @args: port disable arguments.
- * @resp: response structure.
- *
- * This function configures the DLB to stop scheduling QEs to a directed port.
- * Ports are enabled by default.
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
- * assigned a detailed error code from enum dlb_error.
- *
- * Errors:
- * EINVAL - The port ID is invalid or the domain is not configured.
- * EFAULT - Internal error (resp->status not set).
- */
-int dlb_hw_disable_dir_port(struct dlb_hw *hw,
-			    u32 domain_id,
-			    struct dlb_disable_dir_port_args *args,
-			    struct dlb_cmd_response *resp);
-
-/**
- * dlb_configure_ldb_cq_interrupt() - configure load-balanced CQ for interrupts
- * @hw: dlb_hw handle for a particular device.
- * @port_id: load-balancd port ID.
- * @vector: interrupt vector ID. Should be 0 for MSI or compressed MSI-X mode,
- *	    else a value up to 64.
- * @mode: interrupt type (DLB_CQ_ISR_MODE_MSI or DLB_CQ_ISR_MODE_MSIX)
- * @threshold: the minimum CQ depth at which the interrupt can fire. Must be
- *	greater than 0.
- *
- * This function configures the DLB registers for load-balanced CQ's interrupts.
- * This doesn't enable the CQ's interrupt; that can be done with
- * dlb_arm_cq_interrupt() or through an interrupt arm QE.
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise.
- *
- * Errors:
- * EINVAL - The port ID is invalid.
- */
-int dlb_configure_ldb_cq_interrupt(struct dlb_hw *hw,
-				   int port_id,
-				   int vector,
-				   int mode,
-				   u16 threshold);
-
-/**
- * dlb_configure_dir_cq_interrupt() - configure directed CQ for interrupts
- * @hw: dlb_hw handle for a particular device.
- * @port_id: load-balancd port ID.
- * @vector: interrupt vector ID. Should be 0 for MSI or compressed MSI-X mode,
- *	    else a value up to 64.
- * @mode: interrupt type (DLB_CQ_ISR_MODE_MSI or DLB_CQ_ISR_MODE_MSIX)
- * @threshold: the minimum CQ depth at which the interrupt can fire. Must be
- *	greater than 0.
- *
- * This function configures the DLB registers for directed CQ's interrupts.
- * This doesn't enable the CQ's interrupt; that can be done with
- * dlb_arm_cq_interrupt() or through an interrupt arm QE.
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise.
- *
- * Errors:
- * EINVAL - The port ID is invalid.
- */
-int dlb_configure_dir_cq_interrupt(struct dlb_hw *hw,
-				   int port_id,
-				   int vector,
-				   int mode,
-				   u16 threshold);
-
-/**
- * dlb_enable_alarm_interrupts() - enable certain hardware alarm interrupts
- * @hw: dlb_hw handle for a particular device.
- *
- * This function configures the ingress error alarm. (Other alarms are enabled
- * by default.)
- */
-void dlb_enable_alarm_interrupts(struct dlb_hw *hw);
-
-/**
- * dlb_disable_alarm_interrupts() - disable certain hardware alarm interrupts
- * @hw: dlb_hw handle for a particular device.
- *
- * This function configures the ingress error alarm. (Other alarms are disabled
- * by default.)
- */
-void dlb_disable_alarm_interrupts(struct dlb_hw *hw);
-
-/**
- * dlb_set_msix_mode() - enable certain hardware alarm interrupts
- * @hw: dlb_hw handle for a particular device.
- * @mode: MSI-X mode (DLB_MSIX_MODE_PACKED or DLB_MSIX_MODE_COMPRESSED)
- *
- * This function configures the hardware to use either packed or compressed
- * mode. This function should not be called if using MSI interrupts.
- */
-void dlb_set_msix_mode(struct dlb_hw *hw, int mode);
-
-/**
- * dlb_arm_cq_interrupt() - arm a CQ's interrupt
- * @hw: dlb_hw handle for a particular device.
- * @port_id: port ID
- * @is_ldb: true for load-balanced port, false for a directed port
- *
- * This function arms the CQ's interrupt. The CQ must be configured prior to
- * calling this function.
- *
- * The function does no parameter validation; that is the caller's
- * responsibility.
- *
- * Return: returns 0 upon success, <0 otherwise.
- *
- * EINVAL - Invalid port ID.
- */
-int dlb_arm_cq_interrupt(struct dlb_hw *hw, int port_id, bool is_ldb);
-
-/**
- * dlb_read_compressed_cq_intr_status() - read compressed CQ interrupt status
- * @hw: dlb_hw handle for a particular device.
- * @ldb_interrupts: 2-entry array of u32 bitmaps
- * @dir_interrupts: 4-entry array of u32 bitmaps
- *
- * This function can be called from a compressed CQ interrupt handler to
- * determine which CQ interrupts have fired. The caller should take appropriate
- * (such as waking threads blocked on a CQ's interrupt) then ack the interrupts
- * with dlb_ack_compressed_cq_intr().
- */
-void dlb_read_compressed_cq_intr_status(struct dlb_hw *hw,
-					u32 *ldb_interrupts,
-					u32 *dir_interrupts);
-
-/**
- * dlb_ack_compressed_cq_intr_status() - ack compressed CQ interrupts
- * @hw: dlb_hw handle for a particular device.
- * @ldb_interrupts: 2-entry array of u32 bitmaps
- * @dir_interrupts: 4-entry array of u32 bitmaps
- *
- * This function ACKs compressed CQ interrupts. Its arguments should be the
- * same ones passed to dlb_read_compressed_cq_intr_status().
- */
-void dlb_ack_compressed_cq_intr(struct dlb_hw *hw,
-				u32 *ldb_interrupts,
-				u32 *dir_interrupts);
-
-/**
- * dlb_process_alarm_interrupt() - process an alarm interrupt
- * @hw: dlb_hw handle for a particular device.
- *
- * This function reads the alarm syndrome, logs its, and acks the interrupt.
- * This function should be called from the alarm interrupt handler when
- * interrupt vector DLB_INT_ALARM fires.
- */
-void dlb_process_alarm_interrupt(struct dlb_hw *hw);
-
-/**
- * dlb_process_ingress_error_interrupt() - process ingress error interrupts
- * @hw: dlb_hw handle for a particular device.
- *
- * This function reads the alarm syndrome, logs it, notifies user-space, and
- * acks the interrupt. This function should be called from the alarm interrupt
- * handler when interrupt vector DLB_INT_INGRESS_ERROR fires.
- */
-void dlb_process_ingress_error_interrupt(struct dlb_hw *hw);
-
-/**
- * dlb_get_group_sequence_numbers() - return a group's number of SNs per queue
- * @hw: dlb_hw handle for a particular device.
- * @group_id: sequence number group ID.
- *
- * This function returns the configured number of sequence numbers per queue
- * for the specified group.
- *
- * Return:
- * Returns -EINVAL if group_id is invalid, else the group's SNs per queue.
- */
-int dlb_get_group_sequence_numbers(struct dlb_hw *hw, unsigned int group_id);
-
-/**
- * dlb_get_group_sequence_number_occupancy() - return a group's in-use slots
- * @hw: dlb_hw handle for a particular device.
- * @group_id: sequence number group ID.
- *
- * This function returns the group's number of in-use slots (i.e. load-balanced
- * queues using the specified group).
- *
- * Return:
- * Returns -EINVAL if group_id is invalid, else the group's occupancy.
- */
-int dlb_get_group_sequence_number_occupancy(struct dlb_hw *hw,
-					    unsigned int group_id);
-
-/**
- * dlb_set_group_sequence_numbers() - assign a group's number of SNs per queue
- * @hw: dlb_hw handle for a particular device.
- * @group_id: sequence number group ID.
- * @val: requested amount of sequence numbers per queue.
- *
- * This function configures the group's number of sequence numbers per queue.
- * val can be a power-of-two between 32 and 1024, inclusive. This setting can
- * be configured until the first ordered load-balanced queue is configured, at
- * which point the configuration is locked.
- *
- * Return:
- * Returns 0 upon success; -EINVAL if group_id or val is invalid, -EPERM if an
- * ordered queue is configured.
- */
-int dlb_set_group_sequence_numbers(struct dlb_hw *hw,
-				   unsigned int group_id,
-				   unsigned long val);
-
-/**
- * dlb_reset_domain() - reset a scheduling domain
- * @hw: dlb_hw handle for a particular device.
- * @domain_id: domain ID.
- *
- * This function resets and frees a DLB scheduling domain and its associated
- * resources.
- *
- * Pre-condition: the driver must ensure software has stopped sending QEs
- * through this domain's producer ports before invoking this function, or
- * undefined behavior will result.
- *
- * Return:
- * Returns 0 upon success, -1 otherwise.
- *
- * EINVAL - Invalid domain ID, or the domain is not configured.
- * EFAULT - Internal error. (Possibly caused if software is the pre-condition
- *	    is not met.)
- * ETIMEDOUT - Hardware component didn't reset in the expected time.
- */
-int dlb_reset_domain(struct dlb_hw *hw, u32 domain_id);
-
-/**
- * dlb_ldb_port_owned_by_domain() - query whether a port is owned by a domain
- * @hw: dlb_hw handle for a particular device.
- * @domain_id: domain ID.
- * @port_id: port ID.
- *
- * This function returns whether a load-balanced port is owned by a specified
- * domain.
- *
- * Return:
- * Returns 0 if false, 1 if true, <0 otherwise.
- *
- * EINVAL - Invalid domain or port ID, or the domain is not configured.
- */
-int dlb_ldb_port_owned_by_domain(struct dlb_hw *hw,
-				 u32 domain_id,
-				 u32 port_id);
-
-/**
- * dlb_dir_port_owned_by_domain() - query whether a port is owned by a domain
- * @hw: dlb_hw handle for a particular device.
- * @domain_id: domain ID.
- * @port_id: port ID.
- *
- * This function returns whether a directed port is owned by a specified
- * domain.
- *
- * Return:
- * Returns 0 if false, 1 if true, <0 otherwise.
- *
- * EINVAL - Invalid domain or port ID, or the domain is not configured.
- */
-int dlb_dir_port_owned_by_domain(struct dlb_hw *hw,
-				 u32 domain_id,
-				 u32 port_id);
-
-/**
- * dlb_hw_get_num_resources() - query the PCI function's available resources
- * @arg: pointer to resource counts.
- *
- * This function returns the number of available resources for the PF.
- */
-void dlb_hw_get_num_resources(struct dlb_hw *hw,
-			      struct dlb_get_num_resources_args *arg);
-
-/**
- * dlb_hw_get_num_used_resources() - query the PCI function's used resources
- * @arg: pointer to resource counts.
- *
- * This function returns the number of resources in use by the PF. It fills in
- * the fields that args points to, except the following:
- * - max_contiguous_atomic_inflights
- * - max_contiguous_hist_list_entries
- * - max_contiguous_ldb_credits
- * - max_contiguous_dir_credits
- */
-void dlb_hw_get_num_used_resources(struct dlb_hw *hw,
-				   struct dlb_get_num_resources_args *arg);
-
-/**
- * dlb_disable_dp_vasr_feature() - disable directed pipe VAS reset hardware
- * @hw: dlb_hw handle for a particular device.
- *
- * This function disables certain hardware in the directed pipe,
- * necessary to workaround a DLB VAS reset issue.
- */
-void dlb_disable_dp_vasr_feature(struct dlb_hw *hw);
-
-/**
- * dlb_enable_excess_tokens_alarm() - enable interrupts for the excess token
- * pop alarm
- * @hw: dlb_hw handle for a particular device.
- *
- * This function enables the PF ingress error alarm interrupt to fire when an
- * excess token pop occurs.
- */
-void dlb_enable_excess_tokens_alarm(struct dlb_hw *hw);
-
-/**
- * dlb_disable_excess_tokens_alarm() - disable interrupts for the excess token
- * pop alarm
- * @hw: dlb_hw handle for a particular device.
- *
- * This function disables the PF ingress error alarm interrupt to fire when an
- * excess token pop occurs.
- */
-void dlb_disable_excess_tokens_alarm(struct dlb_hw *hw);
-
-/**
- * dlb_hw_get_ldb_queue_depth() - returns the depth of a load-balanced queue
- * @hw: dlb_hw handle for a particular device.
- * @domain_id: domain ID.
- * @args: queue depth args
- *
- * This function returns the depth of a load-balanced queue.
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
- * assigned a detailed error code from enum dlb_error. If successful, resp->id
- * contains the depth.
- *
- * Errors:
- * EINVAL - Invalid domain ID or queue ID.
- */
-int dlb_hw_get_ldb_queue_depth(struct dlb_hw *hw,
-			       u32 domain_id,
-			       struct dlb_get_ldb_queue_depth_args *args,
-			       struct dlb_cmd_response *resp);
-
-/**
- * dlb_hw_get_dir_queue_depth() - returns the depth of a directed queue
- * @hw: dlb_hw handle for a particular device.
- * @domain_id: domain ID.
- * @args: queue depth args
- *
- * This function returns the depth of a directed queue.
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
- * assigned a detailed error code from enum dlb_error. If successful, resp->id
- * contains the depth.
- *
- * Errors:
- * EINVAL - Invalid domain ID or queue ID.
- */
-int dlb_hw_get_dir_queue_depth(struct dlb_hw *hw,
-			       u32 domain_id,
-			       struct dlb_get_dir_queue_depth_args *args,
-			       struct dlb_cmd_response *resp);
-
-/**
- * dlb_hw_pending_port_unmaps() - returns the number of unmap operations in
- *	progress for a load-balanced port.
- * @hw: dlb_hw handle for a particular device.
- * @domain_id: domain ID.
- * @args: number of unmaps in progress args
- *
- * Return:
- * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
- * assigned a detailed error code from enum dlb_error. If successful, resp->id
- * contains the number of unmaps in progress.
- *
- * Errors:
- * EINVAL - Invalid port ID.
- */
-int dlb_hw_pending_port_unmaps(struct dlb_hw *hw,
-			       u32 domain_id,
-			       struct dlb_pending_port_unmaps_args *args,
-			       struct dlb_cmd_response *resp);
-
-/**
- * dlb_hw_enable_sparse_ldb_cq_mode() - enable sparse mode for load-balanced
- *	ports.
- * @hw: dlb_hw handle for a particular device.
- *
- * This function must be called prior to configuring scheduling domains.
- */
-void dlb_hw_enable_sparse_ldb_cq_mode(struct dlb_hw *hw);
-
-/**
- * dlb_hw_enable_sparse_dir_cq_mode() - enable sparse mode for directed ports
- * @hw: dlb_hw handle for a particular device.
- *
- * This function must be called prior to configuring scheduling domains.
- */
-void dlb_hw_enable_sparse_dir_cq_mode(struct dlb_hw *hw);
-
-/**
- * dlb_hw_set_qe_arbiter_weights() - program QE arbiter weights
- * @hw: dlb_hw handle for a particular device.
- * @weight: 8-entry array of arbiter weights.
- *
- * weight[N] programs priority N's weight. In cases where the 8 priorities are
- * reduced to 4 bins, the mapping is:
- * - weight[1] programs bin 0
- * - weight[3] programs bin 1
- * - weight[5] programs bin 2
- * - weight[7] programs bin 3
- */
-void dlb_hw_set_qe_arbiter_weights(struct dlb_hw *hw, u8 weight[8]);
-
-/**
- * dlb_hw_set_qid_arbiter_weights() - program QID arbiter weights
- * @hw: dlb_hw handle for a particular device.
- * @weight: 8-entry array of arbiter weights.
- *
- * weight[N] programs priority N's weight. In cases where the 8 priorities are
- * reduced to 4 bins, the mapping is:
- * - weight[1] programs bin 0
- * - weight[3] programs bin 1
- * - weight[5] programs bin 2
- * - weight[7] programs bin 3
- */
-void dlb_hw_set_qid_arbiter_weights(struct dlb_hw *hw, u8 weight[8]);
-
-/**
- * dlb_hw_enable_pp_sw_alarms() - enable out-of-credit alarm for all producer
- * ports
- * @hw: dlb_hw handle for a particular device.
- */
-void dlb_hw_enable_pp_sw_alarms(struct dlb_hw *hw);
-
-/**
- * dlb_hw_disable_pp_sw_alarms() - disable out-of-credit alarm for all producer
- * ports
- * @hw: dlb_hw handle for a particular device.
- */
-void dlb_hw_disable_pp_sw_alarms(struct dlb_hw *hw);
-
-/**
- * dlb_hw_disable_pf_to_vf_isr_pend_err() - disable alarm triggered by PF
- *	access to VF's ISR pending register
- * @hw: dlb_hw handle for a particular device.
- */
-void dlb_hw_disable_pf_to_vf_isr_pend_err(struct dlb_hw *hw);
-
-/**
- * dlb_hw_disable_vf_to_pf_isr_pend_err() - disable alarm triggered by VF
- *	access to PF's ISR pending register
- * @hw: dlb_hw handle for a particular device.
- */
-void dlb_hw_disable_vf_to_pf_isr_pend_err(struct dlb_hw *hw);
-
-#endif /* __DLB_RESOURCE_H */
diff --git a/drivers/event/dlb/pf/dlb_main.c b/drivers/event/dlb/pf/dlb_main.c
deleted file mode 100644
index 264350e28..000000000
--- a/drivers/event/dlb/pf/dlb_main.c
+++ /dev/null
@@ -1,552 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#include <stdint.h>
-#include <stdbool.h>
-#include <stdio.h>
-#include <errno.h>
-#include <assert.h>
-#include <unistd.h>
-#include <string.h>
-
-#include <rte_malloc.h>
-#include <rte_errno.h>
-
-#include "base/dlb_resource.h"
-#include "base/dlb_osdep.h"
-#include "base/dlb_regs.h"
-#include "../dlb_priv.h"
-#include "../dlb_inline_fns.h"
-#include "../dlb_user.h"
-#include "dlb_main.h"
-
-unsigned int dlb_unregister_timeout_s = DLB_DEFAULT_UNREGISTER_TIMEOUT_S;
-
-#define DLB_PCI_CAP_POINTER 0x34
-#define DLB_PCI_CAP_NEXT(hdr) (((hdr) >> 8) & 0xFC)
-#define DLB_PCI_CAP_ID(hdr) ((hdr) & 0xFF)
-#define DLB_PCI_ERR_UNCOR_MASK 8
-#define DLB_PCI_ERR_UNC_UNSUP  0x00100000
-
-#define DLB_PCI_LNKCTL 16
-#define DLB_PCI_SLTCTL 24
-#define DLB_PCI_RTCTL 28
-#define DLB_PCI_EXP_DEVCTL2 40
-#define DLB_PCI_LNKCTL2 48
-#define DLB_PCI_SLTCTL2 56
-#define DLB_PCI_CMD 4
-#define DLB_PCI_EXP_DEVSTA 10
-#define DLB_PCI_EXP_DEVSTA_TRPND 0x20
-#define DLB_PCI_EXP_DEVCTL_BCR_FLR 0x8000
-
-#define DLB_PCI_CAP_ID_EXP       0x10
-#define DLB_PCI_CAP_ID_MSIX      0x11
-#define DLB_PCI_EXT_CAP_ID_PRI   0x13
-#define DLB_PCI_EXT_CAP_ID_ACS   0xD
-
-#define DLB_PCI_PRI_CTRL_ENABLE         0x1
-#define DLB_PCI_PRI_ALLOC_REQ           0xC
-#define DLB_PCI_PRI_CTRL                0x4
-#define DLB_PCI_MSIX_FLAGS              0x2
-#define DLB_PCI_MSIX_FLAGS_ENABLE       0x8000
-#define DLB_PCI_MSIX_FLAGS_MASKALL      0x4000
-#define DLB_PCI_ERR_ROOT_STATUS         0x30
-#define DLB_PCI_ERR_COR_STATUS          0x10
-#define DLB_PCI_ERR_UNCOR_STATUS        0x4
-#define DLB_PCI_COMMAND_INTX_DISABLE    0x400
-#define DLB_PCI_ACS_CAP                 0x4
-#define DLB_PCI_ACS_CTRL                0x6
-#define DLB_PCI_ACS_SV                  0x1
-#define DLB_PCI_ACS_RR                  0x4
-#define DLB_PCI_ACS_CR                  0x8
-#define DLB_PCI_ACS_UF                  0x10
-#define DLB_PCI_ACS_EC                  0x20
-
-static int dlb_pci_find_capability(struct rte_pci_device *pdev, uint32_t id)
-{
-	uint8_t pos;
-	int ret;
-	uint16_t hdr;
-
-	ret = rte_pci_read_config(pdev, &pos, 1, DLB_PCI_CAP_POINTER);
-	pos &= 0xFC;
-
-	if (ret != 1)
-		return -1;
-
-	while (pos > 0x3F) {
-		ret = rte_pci_read_config(pdev, &hdr, 2, pos);
-		if (ret != 2)
-			return -1;
-
-		if (DLB_PCI_CAP_ID(hdr) == id)
-			return pos;
-
-		if (DLB_PCI_CAP_ID(hdr) == 0xFF)
-			return -1;
-
-		pos = DLB_PCI_CAP_NEXT(hdr);
-	}
-
-	return -1;
-}
-
-static int dlb_mask_ur_err(struct rte_pci_device *pdev)
-{
-	uint32_t mask;
-	size_t sz = sizeof(mask);
-	int pos = rte_pci_find_ext_capability(pdev, RTE_PCI_EXT_CAP_ID_ERR);
-
-	if (pos < 0) {
-		DLB_LOG_ERR("[%s()] failed to find the aer capability\n",
-		       __func__);
-		return pos;
-	}
-
-	pos += DLB_PCI_ERR_UNCOR_MASK;
-
-	if (rte_pci_read_config(pdev, &mask, sz, pos) != (int)sz) {
-		DLB_LOG_ERR("[%s()] Failed to read uncorrectable error mask reg\n",
-		       __func__);
-		return -1;
-	}
-
-	/* Mask Unsupported Request errors */
-	mask |= DLB_PCI_ERR_UNC_UNSUP;
-
-	if (rte_pci_write_config(pdev, &mask, sz, pos) != (int)sz) {
-		DLB_LOG_ERR("[%s()] Failed to write uncorrectable error mask reg at offset %d\n",
-		       __func__, pos);
-		return -1;
-	}
-
-	return 0;
-}
-
-struct dlb_dev *
-dlb_probe(struct rte_pci_device *pdev)
-{
-	struct dlb_dev *dlb_dev;
-	int ret = 0;
-
-	DLB_INFO(dlb_dev, "probe\n");
-
-	dlb_dev = rte_malloc("DLB_PF", sizeof(struct dlb_dev),
-			     RTE_CACHE_LINE_SIZE);
-
-	if (dlb_dev == NULL) {
-		ret = -ENOMEM;
-		goto dlb_dev_malloc_fail;
-	}
-
-	/* PCI Bus driver has already mapped bar space into process.
-	 * Save off our IO register and FUNC addresses.
-	 */
-
-	/* BAR 0 */
-	if (pdev->mem_resource[0].addr == NULL) {
-		DLB_ERR(dlb_dev, "probe: BAR 0 addr (csr_kva) is NULL\n");
-		ret = -EINVAL;
-		goto pci_mmap_bad_addr;
-	}
-	dlb_dev->hw.func_kva = (void *)(uintptr_t)pdev->mem_resource[0].addr;
-	dlb_dev->hw.func_phys_addr = pdev->mem_resource[0].phys_addr;
-
-	DLB_INFO(dlb_dev, "DLB FUNC VA=%p, PA=%p, len=%"PRIu64"\n",
-		 (void *)dlb_dev->hw.func_kva,
-		 (void *)dlb_dev->hw.func_phys_addr,
-		 pdev->mem_resource[0].len);
-
-	/* BAR 2 */
-	if (pdev->mem_resource[2].addr == NULL) {
-		DLB_ERR(dlb_dev, "probe: BAR 2 addr (func_kva) is NULL\n");
-		ret = -EINVAL;
-		goto pci_mmap_bad_addr;
-	}
-	dlb_dev->hw.csr_kva = (void *)(uintptr_t)pdev->mem_resource[2].addr;
-	dlb_dev->hw.csr_phys_addr = pdev->mem_resource[2].phys_addr;
-
-	DLB_INFO(dlb_dev, "DLB CSR VA=%p, PA=%p, len=%"PRIu64"\n",
-		 (void *)dlb_dev->hw.csr_kva,
-		 (void *)dlb_dev->hw.csr_phys_addr,
-		 pdev->mem_resource[2].len);
-
-	dlb_dev->pdev = pdev;
-
-	ret = dlb_pf_reset(dlb_dev);
-	if (ret)
-		goto dlb_reset_fail;
-
-	/* DLB incorrectly sends URs in response to certain messages. Mask UR
-	 * errors to prevent these from being propagated to the MCA.
-	 */
-	ret = dlb_mask_ur_err(pdev);
-	if (ret)
-		goto mask_ur_err_fail;
-
-	ret = dlb_pf_init_driver_state(dlb_dev);
-	if (ret)
-		goto init_driver_state_fail;
-
-	ret = dlb_resource_init(&dlb_dev->hw);
-	if (ret)
-		goto resource_init_fail;
-
-	dlb_dev->revision = os_get_dev_revision(&dlb_dev->hw);
-
-	dlb_pf_init_hardware(dlb_dev);
-
-	return dlb_dev;
-
-resource_init_fail:
-	dlb_resource_free(&dlb_dev->hw);
-init_driver_state_fail:
-mask_ur_err_fail:
-dlb_reset_fail:
-pci_mmap_bad_addr:
-	rte_free(dlb_dev);
-dlb_dev_malloc_fail:
-	rte_errno = ret;
-	return NULL;
-}
-
-int
-dlb_pf_reset(struct dlb_dev *dlb_dev)
-{
-	int msix_cap_offset, err_cap_offset, acs_cap_offset, wait_count;
-	uint16_t dev_ctl_word, dev_ctl2_word, lnk_word, lnk_word2;
-	uint16_t rt_ctl_word, pri_ctrl_word;
-	struct rte_pci_device *pdev = dlb_dev->pdev;
-	uint16_t devsta_busy_word, devctl_word;
-	int pcie_cap_offset, pri_cap_offset;
-	uint16_t slt_word, slt_word2, cmd;
-	int ret = 0, i = 0;
-	uint32_t dword[16], pri_reqs_dword;
-	off_t off;
-
-	/* Save PCI config state */
-
-	for (i = 0; i < 16; i++) {
-		if (rte_pci_read_config(pdev, &dword[i], 4, i * 4) != 4)
-			return ret;
-	}
-
-	pcie_cap_offset = dlb_pci_find_capability(pdev, DLB_PCI_CAP_ID_EXP);
-
-	if (pcie_cap_offset < 0) {
-		DLB_LOG_ERR("[%s()] failed to find the pcie capability\n",
-		       __func__);
-		return pcie_cap_offset;
-	}
-
-	off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
-	if (rte_pci_read_config(pdev, &dev_ctl_word, 2, off) != 2)
-		dev_ctl_word = 0;
-
-	off = pcie_cap_offset + DLB_PCI_LNKCTL;
-	if (rte_pci_read_config(pdev, &lnk_word, 2, off) != 2)
-		lnk_word = 0;
-
-	off = pcie_cap_offset + DLB_PCI_SLTCTL;
-	if (rte_pci_read_config(pdev, &slt_word, 2, off) != 2)
-		slt_word = 0;
-
-	off = pcie_cap_offset + DLB_PCI_RTCTL;
-	if (rte_pci_read_config(pdev, &rt_ctl_word, 2, off) != 2)
-		rt_ctl_word = 0;
-
-	off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL2;
-	if (rte_pci_read_config(pdev, &dev_ctl2_word, 2, off) != 2)
-		dev_ctl2_word = 0;
-
-	off = pcie_cap_offset + DLB_PCI_LNKCTL2;
-	if (rte_pci_read_config(pdev, &lnk_word2, 2, off) != 2)
-		lnk_word2 = 0;
-
-	off = pcie_cap_offset + DLB_PCI_SLTCTL2;
-	if (rte_pci_read_config(pdev, &slt_word2, 2, off) != 2)
-		slt_word2 = 0;
-
-	pri_cap_offset = rte_pci_find_ext_capability(pdev,
-						     DLB_PCI_EXT_CAP_ID_PRI);
-	if (pri_cap_offset >= 0) {
-		off = pri_cap_offset + DLB_PCI_PRI_ALLOC_REQ;
-		if (rte_pci_read_config(pdev, &pri_reqs_dword, 4, off) != 4)
-			pri_reqs_dword = 0;
-	}
-
-	/* clear the PCI command register before issuing the FLR */
-
-	off = DLB_PCI_CMD;
-	cmd = 0;
-	if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
-		DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
-		       __func__, (int)off);
-		return -1;
-	}
-
-	/* issue the FLR */
-	for (wait_count = 0; wait_count < 4; wait_count++) {
-		int sleep_time;
-
-		off = pcie_cap_offset + DLB_PCI_EXP_DEVSTA;
-		ret = rte_pci_read_config(pdev, &devsta_busy_word, 2, off);
-		if (ret != 2) {
-			DLB_LOG_ERR("[%s()] failed to read the pci device status\n",
-			       __func__);
-			return ret;
-		}
-
-		if (!(devsta_busy_word & DLB_PCI_EXP_DEVSTA_TRPND))
-			break;
-
-		sleep_time = (1 << (wait_count)) * 100;
-		rte_delay_ms(sleep_time);
-	}
-
-	if (wait_count == 4) {
-		DLB_LOG_ERR("[%s()] wait for pci pending transactions timed out\n",
-		       __func__);
-		return -1;
-	}
-
-	off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
-	ret = rte_pci_read_config(pdev, &devctl_word, 2, off);
-	if (ret != 2) {
-		DLB_LOG_ERR("[%s()] failed to read the pcie device control\n",
-		       __func__);
-		return ret;
-	}
-
-	devctl_word |= DLB_PCI_EXP_DEVCTL_BCR_FLR;
-
-	if (rte_pci_write_config(pdev, &devctl_word, 2, off) != 2) {
-		DLB_LOG_ERR("[%s()] failed to write the pcie device control at offset %d\n",
-		       __func__, (int)off);
-		return -1;
-	}
-
-	rte_delay_ms(100);
-
-	/* Restore PCI config state */
-
-	if (pcie_cap_offset >= 0) {
-		off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
-		if (rte_pci_write_config(pdev, &dev_ctl_word, 2, off) != 2) {
-			DLB_LOG_ERR("[%s()] failed to write the pcie device control at offset %d\n",
-			       __func__, (int)off);
-			return -1;
-		}
-
-		off = pcie_cap_offset + DLB_PCI_LNKCTL;
-		if (rte_pci_write_config(pdev, &lnk_word, 2, off) != 2) {
-			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
-			       __func__, (int)off);
-			return -1;
-		}
-
-		off = pcie_cap_offset + DLB_PCI_SLTCTL;
-		if (rte_pci_write_config(pdev, &slt_word, 2, off) != 2) {
-			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
-			       __func__, (int)off);
-			return -1;
-		}
-
-		off = pcie_cap_offset + DLB_PCI_RTCTL;
-		if (rte_pci_write_config(pdev, &rt_ctl_word, 2, off) != 2) {
-			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
-			       __func__, (int)off);
-			return -1;
-		}
-
-		off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL2;
-		if (rte_pci_write_config(pdev, &dev_ctl2_word, 2, off) != 2) {
-			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
-			       __func__, (int)off);
-			return -1;
-		}
-
-		off = pcie_cap_offset + DLB_PCI_LNKCTL2;
-		if (rte_pci_write_config(pdev, &lnk_word2, 2, off) != 2) {
-			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
-			       __func__, (int)off);
-			return -1;
-		}
-
-		off = pcie_cap_offset + DLB_PCI_SLTCTL2;
-		if (rte_pci_write_config(pdev, &slt_word2, 2, off) != 2) {
-			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
-			       __func__, (int)off);
-			return -1;
-		}
-	}
-
-	if (pri_cap_offset >= 0) {
-		pri_ctrl_word = DLB_PCI_PRI_CTRL_ENABLE;
-
-		off = pri_cap_offset + DLB_PCI_PRI_ALLOC_REQ;
-		if (rte_pci_write_config(pdev, &pri_reqs_dword, 4, off) != 4) {
-			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
-			       __func__, (int)off);
-			return -1;
-		}
-
-		off = pri_cap_offset + DLB_PCI_PRI_CTRL;
-		if (rte_pci_write_config(pdev, &pri_ctrl_word, 2, off) != 2) {
-			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
-			       __func__, (int)off);
-			return -1;
-		}
-	}
-
-	err_cap_offset = rte_pci_find_ext_capability(pdev,
-						     RTE_PCI_EXT_CAP_ID_ERR);
-	if (err_cap_offset >= 0) {
-		uint32_t tmp;
-
-		off = err_cap_offset + DLB_PCI_ERR_ROOT_STATUS;
-		if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
-			tmp = 0;
-
-		if (rte_pci_write_config(pdev, &tmp, 4, off) != 4) {
-			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
-			       __func__, (int)off);
-			return -1;
-		}
-
-		off = err_cap_offset + DLB_PCI_ERR_COR_STATUS;
-		if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
-			tmp = 0;
-
-		if (rte_pci_write_config(pdev, &tmp, 4, off) != 4) {
-			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
-			       __func__, (int)off);
-			return -1;
-		}
-
-		off = err_cap_offset + DLB_PCI_ERR_UNCOR_STATUS;
-		if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
-			tmp = 0;
-
-		if (rte_pci_write_config(pdev, &tmp, 4, off) != 4) {
-			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
-			       __func__, (int)off);
-			return -1;
-		}
-	}
-
-	for (i = 16; i > 0; i--) {
-		off = (i - 1) * 4;
-		if (rte_pci_write_config(pdev, &dword[i - 1], 4, off) != 4) {
-			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
-			       __func__, (int)off);
-			return -1;
-		}
-	}
-
-	off = DLB_PCI_CMD;
-	if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
-		cmd &= ~DLB_PCI_COMMAND_INTX_DISABLE;
-		if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
-			DLB_LOG_ERR("[%s()] failed to write pci config space\n",
-			       __func__);
-			return -1;
-		}
-	}
-
-	msix_cap_offset = dlb_pci_find_capability(pdev, DLB_PCI_CAP_ID_MSIX);
-	if (msix_cap_offset >= 0) {
-		off = msix_cap_offset + DLB_PCI_MSIX_FLAGS;
-		if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
-			cmd |= DLB_PCI_MSIX_FLAGS_ENABLE;
-			cmd |= DLB_PCI_MSIX_FLAGS_MASKALL;
-			if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
-				DLB_LOG_ERR("[%s()] failed to write msix flags\n",
-				       __func__);
-				return -1;
-			}
-		}
-
-		off = msix_cap_offset + DLB_PCI_MSIX_FLAGS;
-		if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
-			cmd &= ~DLB_PCI_MSIX_FLAGS_MASKALL;
-			if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
-				DLB_LOG_ERR("[%s()] failed to write msix flags\n",
-				       __func__);
-				return -1;
-			}
-		}
-	}
-
-	acs_cap_offset = rte_pci_find_ext_capability(pdev,
-						     DLB_PCI_EXT_CAP_ID_ACS);
-	if (acs_cap_offset >= 0) {
-		uint16_t acs_cap, acs_ctrl, acs_mask;
-		off = acs_cap_offset + DLB_PCI_ACS_CAP;
-		if (rte_pci_read_config(pdev, &acs_cap, 2, off) != 2)
-			acs_cap = 0;
-
-		off = acs_cap_offset + DLB_PCI_ACS_CTRL;
-		if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)
-			acs_ctrl = 0;
-
-		acs_mask = DLB_PCI_ACS_SV | DLB_PCI_ACS_RR;
-		acs_mask |= (DLB_PCI_ACS_CR | DLB_PCI_ACS_UF);
-		acs_ctrl |= (acs_cap & acs_mask);
-
-		if (rte_pci_write_config(pdev, &acs_ctrl, 2, off) != 2) {
-			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
-			       __func__, (int)off);
-			return -1;
-		}
-
-		off = acs_cap_offset + DLB_PCI_ACS_CTRL;
-		if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)
-			acs_ctrl = 0;
-
-		acs_mask = DLB_PCI_ACS_RR | DLB_PCI_ACS_CR | DLB_PCI_ACS_EC;
-		acs_ctrl &= ~acs_mask;
-
-		off = acs_cap_offset + DLB_PCI_ACS_CTRL;
-		if (rte_pci_write_config(pdev, &acs_ctrl, 2, off) != 2) {
-			DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
-			       __func__, (int)off);
-			return -1;
-		}
-	}
-
-	return 0;
-}
-
-/*******************************/
-/****** Driver management ******/
-/*******************************/
-
-int
-dlb_pf_init_driver_state(struct dlb_dev *dlb_dev)
-{
-	/* Initialize software state */
-	rte_spinlock_init(&dlb_dev->resource_mutex);
-	rte_spinlock_init(&dlb_dev->measurement_lock);
-
-	return 0;
-}
-
-void
-dlb_pf_init_hardware(struct dlb_dev *dlb_dev)
-{
-	dlb_disable_dp_vasr_feature(&dlb_dev->hw);
-
-	dlb_enable_excess_tokens_alarm(&dlb_dev->hw);
-
-	if (dlb_dev->revision >= DLB_REV_B0) {
-		dlb_hw_enable_sparse_ldb_cq_mode(&dlb_dev->hw);
-		dlb_hw_enable_sparse_dir_cq_mode(&dlb_dev->hw);
-	}
-
-	if (dlb_dev->revision >= DLB_REV_B0) {
-		dlb_hw_disable_pf_to_vf_isr_pend_err(&dlb_dev->hw);
-		dlb_hw_disable_vf_to_pf_isr_pend_err(&dlb_dev->hw);
-	}
-}
diff --git a/drivers/event/dlb/pf/dlb_main.h b/drivers/event/dlb/pf/dlb_main.h
deleted file mode 100644
index 22e215223..000000000
--- a/drivers/event/dlb/pf/dlb_main.h
+++ /dev/null
@@ -1,47 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#ifndef __DLB_MAIN_H
-#define __DLB_MAIN_H
-
-#include <rte_debug.h>
-#include <rte_log.h>
-#include <rte_spinlock.h>
-#include <rte_pci.h>
-#include <rte_bus_pci.h>
-
-#ifndef PAGE_SIZE
-#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
-#endif
-
-#include "base/dlb_hw_types.h"
-#include "../dlb_user.h"
-
-#define DLB_DEFAULT_UNREGISTER_TIMEOUT_S 5
-
-struct dlb_dev {
-	struct rte_pci_device *pdev;
-	struct dlb_hw hw;
-	/* struct list_head list; */
-	struct device *dlb_device;
-	bool domain_reset_failed;
-	/* The resource mutex serializes access to driver data structures and
-	 * hardware registers.
-	 */
-	rte_spinlock_t resource_mutex;
-	rte_spinlock_t measurement_lock;
-	bool worker_launched;
-	u8 revision;
-};
-
-struct dlb_dev *dlb_probe(struct rte_pci_device *pdev);
-void dlb_reset_done(struct dlb_dev *dlb_dev);
-
-/* pf_ops */
-int dlb_pf_init_driver_state(struct dlb_dev *dev);
-void dlb_pf_free_driver_state(struct dlb_dev *dev);
-void dlb_pf_init_hardware(struct dlb_dev *dev);
-int dlb_pf_reset(struct dlb_dev *dlb_dev);
-
-#endif /* __DLB_MAIN_H */
diff --git a/drivers/event/dlb/pf/dlb_pf.c b/drivers/event/dlb/pf/dlb_pf.c
deleted file mode 100644
index 3aeef6f91..000000000
--- a/drivers/event/dlb/pf/dlb_pf.c
+++ /dev/null
@@ -1,752 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#include <stdint.h>
-#include <stdbool.h>
-#include <stdio.h>
-#include <sys/mman.h>
-#include <sys/fcntl.h>
-#include <sys/time.h>
-#include <errno.h>
-#include <assert.h>
-#include <unistd.h>
-#include <string.h>
-#include <rte_debug.h>
-#include <rte_log.h>
-#include <rte_dev.h>
-#include <rte_devargs.h>
-#include <rte_mbuf.h>
-#include <rte_ring.h>
-#include <rte_errno.h>
-#include <rte_kvargs.h>
-#include <rte_malloc.h>
-#include <rte_cycles.h>
-#include <rte_io.h>
-#include <rte_memory.h>
-#include <rte_string_fns.h>
-
-#include "../dlb_priv.h"
-#include "../dlb_iface.h"
-#include "../dlb_inline_fns.h"
-#include "dlb_main.h"
-#include "base/dlb_hw_types.h"
-#include "base/dlb_osdep.h"
-#include "base/dlb_resource.h"
-
-static void
-dlb_pf_low_level_io_init(struct dlb_eventdev *dlb __rte_unused)
-{
-	int i;
-
-	/* Addresses will be initialized at port create */
-	for (i = 0; i < DLB_MAX_NUM_PORTS; i++) {
-		/* First directed ports */
-
-		/* producer port */
-		dlb_port[i][DLB_DIR].pp_addr = NULL;
-
-		/* popcount */
-		dlb_port[i][DLB_DIR].ldb_popcount = NULL;
-		dlb_port[i][DLB_DIR].dir_popcount = NULL;
-
-		/* consumer queue */
-		dlb_port[i][DLB_DIR].cq_base = NULL;
-		dlb_port[i][DLB_DIR].mmaped = true;
-
-		/* Now load balanced ports */
-
-		/* producer port */
-		dlb_port[i][DLB_LDB].pp_addr = NULL;
-
-		/* popcount */
-		dlb_port[i][DLB_LDB].ldb_popcount = NULL;
-		dlb_port[i][DLB_LDB].dir_popcount = NULL;
-
-		/* consumer queue */
-		dlb_port[i][DLB_LDB].cq_base = NULL;
-		dlb_port[i][DLB_LDB].mmaped = true;
-	}
-}
-
-static int
-dlb_pf_open(struct dlb_hw_dev *handle, const char *name)
-{
-	RTE_SET_USED(handle);
-	RTE_SET_USED(name);
-
-	return 0;
-}
-
-static void
-dlb_pf_domain_close(struct dlb_eventdev *dlb)
-{
-	struct dlb_dev *dlb_dev = (struct dlb_dev *)dlb->qm_instance.pf_dev;
-	int ret;
-
-	ret = dlb_reset_domain(&dlb_dev->hw, dlb->qm_instance.domain_id);
-	if (ret)
-		DLB_LOG_ERR("dlb_pf_reset_domain err %d", ret);
-}
-
-static int
-dlb_pf_get_device_version(struct dlb_hw_dev *handle,
-			  uint8_t *revision)
-{
-	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
-
-	*revision = dlb_dev->revision;
-
-	return 0;
-}
-
-static int
-dlb_pf_get_num_resources(struct dlb_hw_dev *handle,
-			 struct dlb_get_num_resources_args *rsrcs)
-{
-	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
-
-	dlb_hw_get_num_resources(&dlb_dev->hw, rsrcs);
-
-	return 0;
-}
-
-static int
-dlb_pf_sched_domain_create(struct dlb_hw_dev *handle,
-			   struct dlb_create_sched_domain_args *arg)
-{
-	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
-	struct dlb_cmd_response response = {0};
-	int ret;
-
-	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
-
-	if (dlb_dev->domain_reset_failed) {
-		response.status = DLB_ST_DOMAIN_RESET_FAILED;
-		ret = -EINVAL;
-		goto done;
-	}
-
-	ret = dlb_hw_create_sched_domain(&dlb_dev->hw, arg, &response);
-	if (ret)
-		goto done;
-
-done:
-
-	*(struct dlb_cmd_response *)arg->response = response;
-
-	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
-
-	return ret;
-}
-
-static int
-dlb_pf_ldb_credit_pool_create(struct dlb_hw_dev *handle,
-			      struct dlb_create_ldb_pool_args *cfg)
-{
-	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
-	struct dlb_cmd_response response = {0};
-	int ret;
-
-	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
-
-	ret = dlb_hw_create_ldb_pool(&dlb_dev->hw,
-				     handle->domain_id,
-				     cfg,
-				     &response);
-
-	*(struct dlb_cmd_response *)cfg->response = response;
-
-	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
-
-	return ret;
-}
-
-static int
-dlb_pf_dir_credit_pool_create(struct dlb_hw_dev *handle,
-			      struct dlb_create_dir_pool_args *cfg)
-{
-	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
-	struct dlb_cmd_response response = {0};
-	int ret;
-
-	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
-
-	ret = dlb_hw_create_dir_pool(&dlb_dev->hw,
-				     handle->domain_id,
-				     cfg,
-				     &response);
-
-	*(struct dlb_cmd_response *)cfg->response = response;
-
-	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
-
-	return ret;
-}
-
-static int
-dlb_pf_get_cq_poll_mode(struct dlb_hw_dev *handle,
-			enum dlb_cq_poll_modes *mode)
-{
-	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
-
-	if (dlb_dev->revision >= DLB_REV_B0)
-		*mode = DLB_CQ_POLL_MODE_SPARSE;
-	else
-		*mode = DLB_CQ_POLL_MODE_STD;
-
-	return 0;
-}
-
-static int
-dlb_pf_ldb_queue_create(struct dlb_hw_dev *handle,
-			struct dlb_create_ldb_queue_args *cfg)
-{
-	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
-	struct dlb_cmd_response response = {0};
-	int ret;
-
-	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
-
-	ret = dlb_hw_create_ldb_queue(&dlb_dev->hw,
-				      handle->domain_id,
-				      cfg,
-				      &response);
-
-	*(struct dlb_cmd_response *)cfg->response = response;
-
-	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
-
-	return ret;
-}
-
-static int
-dlb_pf_dir_queue_create(struct dlb_hw_dev *handle,
-			struct dlb_create_dir_queue_args *cfg)
-{
-	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
-	struct dlb_cmd_response response = {0};
-	int ret;
-
-	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
-
-	ret = dlb_hw_create_dir_queue(&dlb_dev->hw,
-				      handle->domain_id,
-				      cfg,
-				      &response);
-
-	*(struct dlb_cmd_response *)cfg->response = response;
-
-	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
-
-	return ret;
-}
-
-static void *
-dlb_alloc_coherent_aligned(const struct rte_memzone **mz, rte_iova_t *phys,
-			   size_t size, int align)
-{
-	char mz_name[RTE_MEMZONE_NAMESIZE];
-	uint32_t core_id = rte_lcore_id();
-	unsigned int socket_id;
-
-	snprintf(mz_name, sizeof(mz_name) - 1, "event_dlb_port_mem_%lx",
-		 (unsigned long)rte_get_timer_cycles());
-	if (core_id == (unsigned int)LCORE_ID_ANY)
-		core_id = rte_get_main_lcore();
-	socket_id = rte_lcore_to_socket_id(core_id);
-	*mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
-					 RTE_MEMZONE_IOVA_CONTIG, align);
-	if (*mz == NULL) {
-		DLB_LOG_ERR("Unable to allocate DMA memory of size %zu bytes\n",
-			    size);
-		*phys = 0;
-		return NULL;
-	}
-	*phys = (*mz)->iova;
-	return (*mz)->addr;
-}
-
-static int
-dlb_pf_ldb_port_create(struct dlb_hw_dev *handle,
-		       struct dlb_create_ldb_port_args *cfg,
-		       enum dlb_cq_poll_modes poll_mode)
-{
-	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
-	struct dlb_cmd_response response = {0};
-	int ret;
-	uint8_t *port_base;
-	const struct rte_memzone *mz;
-	int alloc_sz, qe_sz, cq_alloc_depth;
-	rte_iova_t pp_dma_base;
-	rte_iova_t pc_dma_base;
-	rte_iova_t cq_dma_base;
-	int is_dir = false;
-
-	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
-
-	if (poll_mode == DLB_CQ_POLL_MODE_STD)
-		qe_sz = sizeof(struct dlb_dequeue_qe);
-	else
-		qe_sz = RTE_CACHE_LINE_SIZE;
-
-	/* The hardware always uses a CQ depth of at least
-	 * DLB_MIN_HARDWARE_CQ_DEPTH, even though from the user
-	 * perspective we support a depth as low as 1 for LDB ports.
-	 */
-	cq_alloc_depth = RTE_MAX(cfg->cq_depth, DLB_MIN_HARDWARE_CQ_DEPTH);
-
-	/* Calculate the port memory required, including two cache lines for
-	 * credit pop counts. Round up to the nearest cache line.
-	 */
-	alloc_sz = 2 * RTE_CACHE_LINE_SIZE + cq_alloc_depth * qe_sz;
-	alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
-
-	port_base = dlb_alloc_coherent_aligned(&mz, &pc_dma_base,
-					       alloc_sz, PAGE_SIZE);
-	if (port_base == NULL)
-		return -ENOMEM;
-
-	/* Lock the page in memory */
-	ret = rte_mem_lock_page(port_base);
-	if (ret < 0) {
-		DLB_LOG_ERR("dlb pf pmd could not lock page for device i/o\n");
-		goto create_port_err;
-	}
-
-	memset(port_base, 0, alloc_sz);
-	cq_dma_base = (uintptr_t)(pc_dma_base + (2 * RTE_CACHE_LINE_SIZE));
-
-	ret = dlb_hw_create_ldb_port(&dlb_dev->hw,
-				     handle->domain_id,
-				     cfg,
-				     pc_dma_base,
-				     cq_dma_base,
-				     &response);
-	if (ret)
-		goto create_port_err;
-
-	pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);
-	dlb_port[response.id][DLB_LDB].pp_addr =
-		(void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id));
-
-	dlb_port[response.id][DLB_LDB].cq_base =
-		(void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));
-
-	dlb_port[response.id][DLB_LDB].ldb_popcount =
-		(void *)(uintptr_t)port_base;
-	dlb_port[response.id][DLB_LDB].dir_popcount = (void *)(uintptr_t)
-		(port_base + RTE_CACHE_LINE_SIZE);
-	dlb_port[response.id][DLB_LDB].mz = mz;
-
-	*(struct dlb_cmd_response *)cfg->response = response;
-
-	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
-	return 0;
-
-create_port_err:
-
-	rte_memzone_free(mz);
-
-	return ret;
-}
-
-static int
-dlb_pf_dir_port_create(struct dlb_hw_dev *handle,
-		       struct dlb_create_dir_port_args *cfg,
-		       enum dlb_cq_poll_modes poll_mode)
-{
-	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
-	struct dlb_cmd_response response = {0};
-	int ret;
-	uint8_t *port_base;
-	const struct rte_memzone *mz;
-	int alloc_sz, qe_sz;
-	rte_iova_t pp_dma_base;
-	rte_iova_t pc_dma_base;
-	rte_iova_t cq_dma_base;
-	int is_dir = true;
-
-	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
-
-	if (poll_mode == DLB_CQ_POLL_MODE_STD)
-		qe_sz = sizeof(struct dlb_dequeue_qe);
-	else
-		qe_sz = RTE_CACHE_LINE_SIZE;
-
-	/* Calculate the port memory required, including two cache lines for
-	 * credit pop counts. Round up to the nearest cache line.
-	 */
-	alloc_sz = 2 * RTE_CACHE_LINE_SIZE + cfg->cq_depth * qe_sz;
-	alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
-
-	port_base = dlb_alloc_coherent_aligned(&mz, &pc_dma_base,
-					       alloc_sz, PAGE_SIZE);
-	if (port_base == NULL)
-		return -ENOMEM;
-
-	/* Lock the page in memory */
-	ret = rte_mem_lock_page(port_base);
-	if (ret < 0) {
-		DLB_LOG_ERR("dlb pf pmd could not lock page for device i/o\n");
-		goto create_port_err;
-	}
-
-	memset(port_base, 0, alloc_sz);
-	cq_dma_base = (uintptr_t)(pc_dma_base + (2 * RTE_CACHE_LINE_SIZE));
-
-	ret = dlb_hw_create_dir_port(&dlb_dev->hw,
-				     handle->domain_id,
-				     cfg,
-				     pc_dma_base,
-				     cq_dma_base,
-				     &response);
-	if (ret)
-		goto create_port_err;
-
-	pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);
-	dlb_port[response.id][DLB_DIR].pp_addr =
-		(void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id));
-
-	dlb_port[response.id][DLB_DIR].cq_base =
-		(void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));
-
-	dlb_port[response.id][DLB_DIR].ldb_popcount =
-		(void *)(uintptr_t)port_base;
-	dlb_port[response.id][DLB_DIR].dir_popcount = (void *)(uintptr_t)
-		(port_base + RTE_CACHE_LINE_SIZE);
-	dlb_port[response.id][DLB_DIR].mz = mz;
-
-	*(struct dlb_cmd_response *)cfg->response = response;
-
-	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
-	return 0;
-
-create_port_err:
-
-	rte_memzone_free(mz);
-
-	return ret;
-}
-
-static int
-dlb_pf_get_sn_allocation(struct dlb_hw_dev *handle,
-			 struct dlb_get_sn_allocation_args *args)
-{
-	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
-	struct dlb_cmd_response response = {0};
-	int ret;
-
-	ret = dlb_get_group_sequence_numbers(&dlb_dev->hw, args->group);
-
-	response.id = ret;
-	response.status = 0;
-
-	*(struct dlb_cmd_response *)args->response = response;
-
-	return ret;
-}
-
-static int
-dlb_pf_set_sn_allocation(struct dlb_hw_dev *handle,
-			 struct dlb_set_sn_allocation_args *args)
-{
-	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
-	struct dlb_cmd_response response = {0};
-	int ret;
-
-	ret = dlb_set_group_sequence_numbers(&dlb_dev->hw, args->group,
-					     args->num);
-
-	response.status = 0;
-
-	*(struct dlb_cmd_response *)args->response = response;
-
-	return ret;
-}
-
-static int
-dlb_pf_get_sn_occupancy(struct dlb_hw_dev *handle,
-			struct dlb_get_sn_occupancy_args *args)
-{
-	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
-	struct dlb_cmd_response response = {0};
-	int ret;
-
-	ret = dlb_get_group_sequence_number_occupancy(&dlb_dev->hw,
-						      args->group);
-
-	response.id = ret;
-	response.status = 0;
-
-	*(struct dlb_cmd_response *)args->response = response;
-
-	return ret;
-}
-
-static int
-dlb_pf_sched_domain_start(struct dlb_hw_dev *handle,
-			  struct dlb_start_domain_args *cfg)
-{
-	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
-	struct dlb_cmd_response response = {0};
-	int ret;
-
-	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
-
-	ret = dlb_hw_start_domain(&dlb_dev->hw,
-				  handle->domain_id,
-				  cfg,
-				  &response);
-
-	*(struct dlb_cmd_response *)cfg->response = response;
-
-	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
-
-	return ret;
-}
-
-static int
-dlb_pf_pending_port_unmaps(struct dlb_hw_dev *handle,
-			   struct dlb_pending_port_unmaps_args *args)
-{
-	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
-	struct dlb_cmd_response response = {0};
-	int ret;
-
-	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
-
-	ret = dlb_hw_pending_port_unmaps(&dlb_dev->hw,
-					 handle->domain_id,
-					 args,
-					 &response);
-
-	*(struct dlb_cmd_response *)args->response = response;
-
-	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
-
-	return ret;
-}
-
-static int
-dlb_pf_map_qid(struct dlb_hw_dev *handle,
-	       struct dlb_map_qid_args *cfg)
-{
-	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
-	struct dlb_cmd_response response = {0};
-	int ret;
-
-	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
-
-	ret = dlb_hw_map_qid(&dlb_dev->hw,
-			     handle->domain_id,
-			     cfg,
-			     &response);
-
-	*(struct dlb_cmd_response *)cfg->response = response;
-
-	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
-
-	return ret;
-}
-
-static int
-dlb_pf_unmap_qid(struct dlb_hw_dev *handle,
-		 struct dlb_unmap_qid_args *cfg)
-{
-	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
-	struct dlb_cmd_response response = {0};
-	int ret;
-
-	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
-
-	ret = dlb_hw_unmap_qid(&dlb_dev->hw,
-			       handle->domain_id,
-			       cfg,
-			       &response);
-
-	*(struct dlb_cmd_response *)cfg->response = response;
-
-	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
-
-	return ret;
-}
-
-static int
-dlb_pf_get_ldb_queue_depth(struct dlb_hw_dev *handle,
-			   struct dlb_get_ldb_queue_depth_args *args)
-{
-	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
-	struct dlb_cmd_response response = {0};
-	int ret;
-
-	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
-
-	ret = dlb_hw_get_ldb_queue_depth(&dlb_dev->hw,
-					 handle->domain_id,
-					 args,
-					 &response);
-
-	*(struct dlb_cmd_response *)args->response = response;
-
-	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
-
-	return ret;
-}
-
-static int
-dlb_pf_get_dir_queue_depth(struct dlb_hw_dev *handle,
-			   struct dlb_get_dir_queue_depth_args *args)
-{
-	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
-	struct dlb_cmd_response response = {0};
-	int ret = 0;
-
-	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
-
-	ret = dlb_hw_get_dir_queue_depth(&dlb_dev->hw,
-					 handle->domain_id,
-					 args,
-					 &response);
-
-	*(struct dlb_cmd_response *)args->response = response;
-
-	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
-
-	return ret;
-}
-
-static void
-dlb_pf_iface_fn_ptrs_init(void)
-{
-	dlb_iface_low_level_io_init = dlb_pf_low_level_io_init;
-	dlb_iface_open = dlb_pf_open;
-	dlb_iface_domain_close = dlb_pf_domain_close;
-	dlb_iface_get_device_version = dlb_pf_get_device_version;
-	dlb_iface_get_num_resources = dlb_pf_get_num_resources;
-	dlb_iface_sched_domain_create = dlb_pf_sched_domain_create;
-	dlb_iface_ldb_credit_pool_create = dlb_pf_ldb_credit_pool_create;
-	dlb_iface_dir_credit_pool_create = dlb_pf_dir_credit_pool_create;
-	dlb_iface_ldb_queue_create = dlb_pf_ldb_queue_create;
-	dlb_iface_dir_queue_create = dlb_pf_dir_queue_create;
-	dlb_iface_ldb_port_create = dlb_pf_ldb_port_create;
-	dlb_iface_dir_port_create = dlb_pf_dir_port_create;
-	dlb_iface_map_qid = dlb_pf_map_qid;
-	dlb_iface_unmap_qid = dlb_pf_unmap_qid;
-	dlb_iface_sched_domain_start = dlb_pf_sched_domain_start;
-	dlb_iface_pending_port_unmaps = dlb_pf_pending_port_unmaps;
-	dlb_iface_get_ldb_queue_depth = dlb_pf_get_ldb_queue_depth;
-	dlb_iface_get_dir_queue_depth = dlb_pf_get_dir_queue_depth;
-	dlb_iface_get_cq_poll_mode = dlb_pf_get_cq_poll_mode;
-	dlb_iface_get_sn_allocation = dlb_pf_get_sn_allocation;
-	dlb_iface_set_sn_allocation = dlb_pf_set_sn_allocation;
-	dlb_iface_get_sn_occupancy = dlb_pf_get_sn_occupancy;
-
-}
-
-/* PCI DEV HOOKS */
-static int
-dlb_eventdev_pci_init(struct rte_eventdev *eventdev)
-{
-	int ret = 0;
-	struct rte_pci_device *pci_dev;
-	struct dlb_devargs dlb_args = {
-		.socket_id = rte_socket_id(),
-		.max_num_events = DLB_MAX_NUM_LDB_CREDITS,
-		.num_dir_credits_override = -1,
-		.defer_sched = 0,
-		.num_atm_inflights = DLB_NUM_ATOMIC_INFLIGHTS_PER_QUEUE,
-	};
-	struct dlb_eventdev *dlb;
-
-	DLB_LOG_DBG("Enter with dev_id=%d socket_id=%d",
-		    eventdev->data->dev_id, eventdev->data->socket_id);
-
-	dlb_entry_points_init(eventdev);
-
-	dlb_pf_iface_fn_ptrs_init();
-
-	pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
-
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		dlb = dlb_pmd_priv(eventdev); /* rte_zmalloc_socket mem */
-
-		/* Probe the DLB PF layer */
-		dlb->qm_instance.pf_dev = dlb_probe(pci_dev);
-
-		if (dlb->qm_instance.pf_dev == NULL) {
-			DLB_LOG_ERR("DLB PF Probe failed with error %d\n",
-				    rte_errno);
-			ret = -rte_errno;
-			goto dlb_probe_failed;
-		}
-
-		/* Were we invoked with runtime parameters? */
-		if (pci_dev->device.devargs) {
-			ret = dlb_parse_params(pci_dev->device.devargs->args,
-					       pci_dev->device.devargs->name,
-					       &dlb_args);
-			if (ret) {
-				DLB_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d\n",
-					    ret, rte_errno);
-				goto dlb_probe_failed;
-			}
-		}
-
-		ret = dlb_primary_eventdev_probe(eventdev,
-						 EVDEV_DLB_NAME_PMD_STR,
-						 &dlb_args);
-	} else {
-		ret = dlb_secondary_eventdev_probe(eventdev,
-						   EVDEV_DLB_NAME_PMD_STR);
-	}
-	if (ret)
-		goto dlb_probe_failed;
-
-	DLB_LOG_INFO("DLB PF Probe success\n");
-
-	return 0;
-
-dlb_probe_failed:
-
-	DLB_LOG_INFO("DLB PF Probe failed, ret=%d\n", ret);
-
-	return ret;
-}
-
-#define EVENTDEV_INTEL_VENDOR_ID 0x8086
-
-static const struct rte_pci_id pci_id_dlb_map[] = {
-	{
-		RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
-			       DLB_PF_DEV_ID)
-	},
-	{
-		.vendor_id = 0,
-	},
-};
-
-static int
-event_dlb_pci_probe(struct rte_pci_driver *pci_drv,
-		    struct rte_pci_device *pci_dev)
-{
-	return rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
-		sizeof(struct dlb_eventdev), dlb_eventdev_pci_init,
-		EVDEV_DLB_NAME_PMD_STR);
-}
-
-static int
-event_dlb_pci_remove(struct rte_pci_device *pci_dev)
-{
-	return rte_event_pmd_pci_remove(pci_dev, NULL);
-}
-
-static struct rte_pci_driver pci_eventdev_dlb_pmd = {
-	.id_table = pci_id_dlb_map,
-	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
-	.probe = event_dlb_pci_probe,
-	.remove = event_dlb_pci_remove,
-};
-
-RTE_PMD_REGISTER_PCI(event_dlb_pf, pci_eventdev_dlb_pmd);
-RTE_PMD_REGISTER_PCI_TABLE(event_dlb_pf, pci_id_dlb_map);
diff --git a/drivers/event/dlb/rte_pmd_dlb.c b/drivers/event/dlb/rte_pmd_dlb.c
deleted file mode 100644
index 8f56dc306..000000000
--- a/drivers/event/dlb/rte_pmd_dlb.c
+++ /dev/null
@@ -1,38 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2020 Intel Corporation
- */
-
-#include "rte_eventdev.h"
-#include "eventdev_pmd.h"
-#include "rte_pmd_dlb.h"
-#include "dlb_priv.h"
-#include "dlb_inline_fns.h"
-
-int
-rte_pmd_dlb_set_token_pop_mode(uint8_t dev_id,
-			       uint8_t port_id,
-			       enum dlb_token_pop_mode mode)
-{
-	struct dlb_eventdev *dlb;
-	struct rte_eventdev *dev;
-
-	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
-	dev = &rte_eventdevs[dev_id];
-
-	dlb = dlb_pmd_priv(dev);
-
-	if (mode >= NUM_TOKEN_POP_MODES)
-		return -EINVAL;
-
-	/* The event device must be configured, but not yet started */
-	if (!dlb->configured || dlb->run_state != DLB_RUN_STATE_STOPPED)
-		return -EINVAL;
-
-	/* The token pop mode must be set before configuring the port */
-	if (port_id >= dlb->num_ports || dlb->ev_ports[port_id].setup_done)
-		return -EINVAL;
-
-	dlb->ev_ports[port_id].qm_port.token_pop_mode = mode;
-
-	return 0;
-}
diff --git a/drivers/event/dlb/rte_pmd_dlb.h b/drivers/event/dlb/rte_pmd_dlb.h
deleted file mode 100644
index 9cf6dd338..000000000
--- a/drivers/event/dlb/rte_pmd_dlb.h
+++ /dev/null
@@ -1,77 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019-2020 Intel Corporation
- */
-
-/*!
- *  @file      rte_pmd_dlb.h
- *
- *  @brief     DLB PMD-specific functions
- *
- */
-
-#ifndef _RTE_PMD_DLB_H_
-#define _RTE_PMD_DLB_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stdint.h>
-
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
- *
- * Selects the token pop mode for an DLB port.
- */
-enum dlb_token_pop_mode {
-	/* Pop the CQ tokens immediately after dequeueing. */
-	AUTO_POP,
-	/* Pop CQ tokens after (dequeue_depth - 1) events are released.
-	 * Supported on load-balanced ports only.
-	 */
-	DELAYED_POP,
-	/* Pop the CQ tokens during next dequeue operation. */
-	DEFERRED_POP,
-
-	/* NUM_TOKEN_POP_MODES must be last */
-	NUM_TOKEN_POP_MODES
-};
-
-/*!
- * @warning
- * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
- *
- * Configure the token pop mode for an DLB port. By default, all ports use
- * AUTO_POP. This function must be called before calling rte_event_port_setup()
- * for the port, but after calling rte_event_dev_configure().
- *
- * @note
- *    The defer_sched vdev arg, which configures all load-balanced ports with
- *    dequeue_depth == 1 for DEFERRED_POP mode, takes precedence over this
- *    function.
- *
- * @param dev_id
- *    The identifier of the event device.
- * @param port_id
- *    The identifier of the event port.
- * @param mode
- *    The token pop mode.
- *
- * @return
- * - 0: Success
- * - EINVAL: Invalid dev_id, port_id, or mode
- * - EINVAL: The DLB is not configured, is already running, or the port is
- *   already setup
- */
-
-__rte_experimental
-int
-rte_pmd_dlb_set_token_pop_mode(uint8_t dev_id,
-			       uint8_t port_id,
-			       enum dlb_token_pop_mode mode);
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_PMD_DLB_H_ */
diff --git a/drivers/event/dlb/version.map b/drivers/event/dlb/version.map
deleted file mode 100644
index 3338a22c1..000000000
--- a/drivers/event/dlb/version.map
+++ /dev/null
@@ -1,9 +0,0 @@ 
-DPDK_21 {
-	local: *;
-};
-
-EXPERIMENTAL {
-	global:
-
-	rte_pmd_dlb_set_token_pop_mode;
-};
diff --git a/drivers/event/meson.build b/drivers/event/meson.build
index a49288a5d..b7f9bf7c6 100644
--- a/drivers/event/meson.build
+++ b/drivers/event/meson.build
@@ -5,7 +5,7 @@  if is_windows
 	subdir_done()
 endif
 
-drivers = ['dlb', 'dlb2', 'dpaa', 'dpaa2', 'octeontx2', 'opdl', 'skeleton', 'sw',
+drivers = ['dlb2', 'dpaa', 'dpaa2', 'octeontx2', 'opdl', 'skeleton', 'sw',
 	   'dsw']
 if not (toolchain == 'gcc' and cc.version().version_compare('<4.8.6') and
 	dpdk_conf.has('RTE_ARCH_ARM64'))