[dpdk-dev,v3,01/20] thunderx/nicvf/base: add hardware API for ThunderX nicvf inbuilt NIC

Message ID 1465317632-11471-2-git-send-email-jerin.jacob@caviumnetworks.com (mailing list archive)
State Superseded, archived
Delegated to: Bruce Richardson
Headers

Commit Message

Jerin Jacob June 7, 2016, 4:40 p.m. UTC
  Adds hardware specific API for ThunderX nicvf inbuilt NIC device under
drivers/net/thunderx/nicvf/base directory.

Signed-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Signed-off-by: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>
Signed-off-by: Kamil Rytarowski <Kamil.Rytarowski@caviumnetworks.com>
Signed-off-by: Zyta Szpak <zyta.szpak@semihalf.com>
Signed-off-by: Slawomir Rosek <slawomir.rosek@semihalf.com>
Signed-off-by: Radoslaw Biernacki <rad@semihalf.com>
---
 drivers/net/thunderx/base/nicvf_hw.c      |  908 +++++++++++++++++++++
 drivers/net/thunderx/base/nicvf_hw.h      |  240 ++++++
 drivers/net/thunderx/base/nicvf_hw_defs.h | 1216 +++++++++++++++++++++++++++++
 drivers/net/thunderx/base/nicvf_mbox.c    |  416 ++++++++++
 drivers/net/thunderx/base/nicvf_mbox.h    |  232 ++++++
 drivers/net/thunderx/base/nicvf_plat.h    |  132 ++++
 6 files changed, 3144 insertions(+)
 create mode 100644 drivers/net/thunderx/base/nicvf_hw.c
 create mode 100644 drivers/net/thunderx/base/nicvf_hw.h
 create mode 100644 drivers/net/thunderx/base/nicvf_hw_defs.h
 create mode 100644 drivers/net/thunderx/base/nicvf_mbox.c
 create mode 100644 drivers/net/thunderx/base/nicvf_mbox.h
 create mode 100644 drivers/net/thunderx/base/nicvf_plat.h
  

Comments

Ferruh Yigit June 8, 2016, 12:18 p.m. UTC | #1
On 6/7/2016 5:40 PM, Jerin Jacob wrote:
> Adds hardware specific API for ThunderX nicvf inbuilt NIC device under
> drivers/net/thunderx/nicvf/base directory.
> 
> Signed-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
> Signed-off-by: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>
> Signed-off-by: Kamil Rytarowski <Kamil.Rytarowski@caviumnetworks.com>
> Signed-off-by: Zyta Szpak <zyta.szpak@semihalf.com>
> Signed-off-by: Slawomir Rosek <slawomir.rosek@semihalf.com>
> Signed-off-by: Radoslaw Biernacki <rad@semihalf.com>
> ---

...

> +
> +struct pf_rq_cfg { union { struct {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +	uint64_t reserverd1:1;
doesn't really matter but, as a detail, s/reserved/reserverd ? A few
more occurrence below.

> +	uint64_t reserverd0:34;
> +	uint64_t strip_pre_l2:1;
> +	uint64_t caching:2;
> +	uint64_t cq_qs:7;
> +	uint64_t cq_idx:3;
> +	uint64_t rbdr_cont_qs:7;
> +	uint64_t rbdr_cont_idx:1;
> +	uint64_t rbdr_strt_qs:7;
> +	uint64_t rbdr_strt_idx:1;
> +#else
> +	uint64_t rbdr_strt_idx:1;
> +	uint64_t rbdr_strt_qs:7;
> +	uint64_t rbdr_cont_idx:1;
> +	uint64_t rbdr_cont_qs:7;
> +	uint64_t cq_idx:3;
> +	uint64_t cq_qs:7;
> +	uint64_t caching:2;
> +	uint64_t strip_pre_l2:1;
> +	uint64_t reserverd0:34;
> +	uint64_t reserverd1:1;
> +#endif
> +	};
> +	uint64_t value;
> +}; };
> +

...
  
Ferruh Yigit June 8, 2016, 3:45 p.m. UTC | #2
On 6/7/2016 5:40 PM, Jerin Jacob wrote:
> Adds hardware specific API for ThunderX nicvf inbuilt NIC device under
> drivers/net/thunderx/nicvf/base directory.
> 
> Signed-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
> Signed-off-by: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>
> Signed-off-by: Kamil Rytarowski <Kamil.Rytarowski@caviumnetworks.com>
> Signed-off-by: Zyta Szpak <zyta.szpak@semihalf.com>
> Signed-off-by: Slawomir Rosek <slawomir.rosek@semihalf.com>
> Signed-off-by: Radoslaw Biernacki <rad@semihalf.com>
> ---
>  drivers/net/thunderx/base/nicvf_hw.c      |  908 +++++++++++++++++++++
>  drivers/net/thunderx/base/nicvf_hw.h      |  240 ++++++
>  drivers/net/thunderx/base/nicvf_hw_defs.h | 1216 +++++++++++++++++++++++++++++
>  drivers/net/thunderx/base/nicvf_mbox.c    |  416 ++++++++++
>  drivers/net/thunderx/base/nicvf_mbox.h    |  232 ++++++
>  drivers/net/thunderx/base/nicvf_plat.h    |  132 ++++
>  6 files changed, 3144 insertions(+)
>  create mode 100644 drivers/net/thunderx/base/nicvf_hw.c
>  create mode 100644 drivers/net/thunderx/base/nicvf_hw.h
>  create mode 100644 drivers/net/thunderx/base/nicvf_hw_defs.h
>  create mode 100644 drivers/net/thunderx/base/nicvf_mbox.c
>  create mode 100644 drivers/net/thunderx/base/nicvf_mbox.h
>  create mode 100644 drivers/net/thunderx/base/nicvf_plat.h
> 
> diff --git a/drivers/net/thunderx/base/nicvf_hw.c b/drivers/net/thunderx/base/nicvf_hw.c
> new file mode 100644
> index 0000000..24fe77d
> --- /dev/null
> +++ b/drivers/net/thunderx/base/nicvf_hw.c
> @@ -0,0 +1,908 @@
> +/*
> + *   BSD LICENSE
> + *
> + *   Copyright (C) Cavium networks Ltd. 2016.
> + *
> + *   Redistribution and use in source and binary forms, with or without
> + *   modification, are permitted provided that the following conditions
> + *   are met:
> + *
> + *     * Redistributions of source code must retain the above copyright
> + *       notice, this list of conditions and the following disclaimer.
> + *     * Redistributions in binary form must reproduce the above copyright
> + *       notice, this list of conditions and the following disclaimer in
> + *       the documentation and/or other materials provided with the
> + *       distribution.
> + *     * Neither the name of Cavium networks nor the names of its
> + *       contributors may be used to endorse or promote products derived
> + *       from this software without specific prior written permission.
> + *
> + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
> + */
> +
> +#include <unistd.h>
> +#include <math.h>
> +#include <errno.h>
> +#include <stdarg.h>
> +#include <stdint.h>
> +#include <stdio.h>
> +#include <stdlib.h>
> +#include <string.h>
> +#include <assert.h>
> +
> +#include "nicvf_plat.h"
> +
> +struct nicvf_reg_info {
> +	uint32_t offset;
> +	const char *name;
> +};
> +
> +#define NICVF_REG_INFO(reg) {reg, #reg}
> +
> +static const struct nicvf_reg_info nicvf_reg_tbl[] = {
> +	NICVF_REG_INFO(NIC_VF_CFG),
> +	NICVF_REG_INFO(NIC_VF_PF_MAILBOX_0_1),
> +	NICVF_REG_INFO(NIC_VF_INT),
> +	NICVF_REG_INFO(NIC_VF_INT_W1S),
> +	NICVF_REG_INFO(NIC_VF_ENA_W1C),
> +	NICVF_REG_INFO(NIC_VF_ENA_W1S),
> +	NICVF_REG_INFO(NIC_VNIC_RSS_CFG),
> +	NICVF_REG_INFO(NIC_VNIC_RQ_GEN_CFG),
> +};
> +
> +static const struct nicvf_reg_info nicvf_multi_reg_tbl[] = {
> +	{NIC_VNIC_RSS_KEY_0_4 + 0,  "NIC_VNIC_RSS_KEY_0"},
> +	{NIC_VNIC_RSS_KEY_0_4 + 8,  "NIC_VNIC_RSS_KEY_1"},
> +	{NIC_VNIC_RSS_KEY_0_4 + 16, "NIC_VNIC_RSS_KEY_2"},
> +	{NIC_VNIC_RSS_KEY_0_4 + 24, "NIC_VNIC_RSS_KEY_3"},
> +	{NIC_VNIC_RSS_KEY_0_4 + 32, "NIC_VNIC_RSS_KEY_4"},
> +	{NIC_VNIC_TX_STAT_0_4 + 0,  "NIC_VNIC_STAT_TX_OCTS"},
> +	{NIC_VNIC_TX_STAT_0_4 + 8,  "NIC_VNIC_STAT_TX_UCAST"},
> +	{NIC_VNIC_TX_STAT_0_4 + 16,  "NIC_VNIC_STAT_TX_BCAST"},
> +	{NIC_VNIC_TX_STAT_0_4 + 24,  "NIC_VNIC_STAT_TX_MCAST"},
> +	{NIC_VNIC_TX_STAT_0_4 + 32,  "NIC_VNIC_STAT_TX_DROP"},
> +	{NIC_VNIC_RX_STAT_0_13 + 0,  "NIC_VNIC_STAT_RX_OCTS"},
> +	{NIC_VNIC_RX_STAT_0_13 + 8,  "NIC_VNIC_STAT_RX_UCAST"},
> +	{NIC_VNIC_RX_STAT_0_13 + 16, "NIC_VNIC_STAT_RX_BCAST"},
> +	{NIC_VNIC_RX_STAT_0_13 + 24, "NIC_VNIC_STAT_RX_MCAST"},
> +	{NIC_VNIC_RX_STAT_0_13 + 32, "NIC_VNIC_STAT_RX_RED"},
> +	{NIC_VNIC_RX_STAT_0_13 + 40, "NIC_VNIC_STAT_RX_RED_OCTS"},
> +	{NIC_VNIC_RX_STAT_0_13 + 48, "NIC_VNIC_STAT_RX_ORUN"},
> +	{NIC_VNIC_RX_STAT_0_13 + 56, "NIC_VNIC_STAT_RX_ORUN_OCTS"},
> +	{NIC_VNIC_RX_STAT_0_13 + 64, "NIC_VNIC_STAT_RX_FCS"},
> +	{NIC_VNIC_RX_STAT_0_13 + 72, "NIC_VNIC_STAT_RX_L2ERR"},
> +	{NIC_VNIC_RX_STAT_0_13 + 80, "NIC_VNIC_STAT_RX_DRP_BCAST"},
> +	{NIC_VNIC_RX_STAT_0_13 + 88, "NIC_VNIC_STAT_RX_DRP_MCAST"},
> +	{NIC_VNIC_RX_STAT_0_13 + 96, "NIC_VNIC_STAT_RX_DRP_L3BCAST"},
> +	{NIC_VNIC_RX_STAT_0_13 + 104, "NIC_VNIC_STAT_RX_DRP_L3MCAST"},
> +};
> +
> +static const struct nicvf_reg_info nicvf_qset_cq_reg_tbl[] = {
> +	NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG),
> +	NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG2),
> +	NICVF_REG_INFO(NIC_QSET_CQ_0_7_THRESH),
> +	NICVF_REG_INFO(NIC_QSET_CQ_0_7_BASE),
> +	NICVF_REG_INFO(NIC_QSET_CQ_0_7_HEAD),
> +	NICVF_REG_INFO(NIC_QSET_CQ_0_7_TAIL),
> +	NICVF_REG_INFO(NIC_QSET_CQ_0_7_DOOR),
> +	NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS),
> +	NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS2),
> +	NICVF_REG_INFO(NIC_QSET_CQ_0_7_DEBUG),
> +};
> +
> +static const struct nicvf_reg_info nicvf_qset_rq_reg_tbl[] = {
> +	NICVF_REG_INFO(NIC_QSET_RQ_0_7_CFG),
> +	NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS0),
> +	NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS1),
> +};
> +
> +static const struct nicvf_reg_info nicvf_qset_sq_reg_tbl[] = {
> +	NICVF_REG_INFO(NIC_QSET_SQ_0_7_CFG),
> +	NICVF_REG_INFO(NIC_QSET_SQ_0_7_THRESH),
> +	NICVF_REG_INFO(NIC_QSET_SQ_0_7_BASE),
> +	NICVF_REG_INFO(NIC_QSET_SQ_0_7_HEAD),
> +	NICVF_REG_INFO(NIC_QSET_SQ_0_7_TAIL),
> +	NICVF_REG_INFO(NIC_QSET_SQ_0_7_DOOR),
> +	NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS),
> +	NICVF_REG_INFO(NIC_QSET_SQ_0_7_DEBUG),
> +	NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS0),
> +	NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS1),
> +};
> +
> +static const struct nicvf_reg_info nicvf_qset_rbdr_reg_tbl[] = {
> +	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_CFG),
> +	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_THRESH),
> +	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_BASE),
> +	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_HEAD),
> +	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_TAIL),
> +	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_DOOR),
> +	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS0),
> +	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS1),
> +	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_PRFCH_STATUS),
> +};
> +
> +int
> +nicvf_base_init(struct nicvf *nic)
> +{
> +	nic->hwcap = 0;
> +	if (nic->subsystem_device_id == 0)
> +		return NICVF_ERR_BASE_INIT;
> +
> +	if (nicvf_hw_version(nic) == NICVF_PASS2)
> +		nic->hwcap |= NICVF_CAP_TUNNEL_PARSING;
> +
> +	return NICVF_OK;
> +}
> +
> +/* dump on stdout if data is NULL */
> +int
> +nicvf_reg_dump(struct nicvf *nic,  uint64_t *data)
> +{
> +	uint32_t i, q;
> +	bool dump_stdout;
> +
> +	dump_stdout = data ? 0 : 1;
> +
> +	for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_reg_tbl); i++)
> +		if (dump_stdout)
> +			nicvf_log("%24s  = 0x%" PRIx64 "\n",
> +				nicvf_reg_tbl[i].name,
> +				nicvf_reg_read(nic, nicvf_reg_tbl[i].offset));
> +		else
> +			*data++ = nicvf_reg_read(nic, nicvf_reg_tbl[i].offset);
> +
> +	for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl); i++)
> +		if (dump_stdout)
> +			nicvf_log("%24s  = 0x%" PRIx64 "\n",
> +				nicvf_multi_reg_tbl[i].name,
> +				nicvf_reg_read(nic,
> +					nicvf_multi_reg_tbl[i].offset));
> +		else
> +			*data++ = nicvf_reg_read(nic,
> +					nicvf_multi_reg_tbl[i].offset);
> +
> +	for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++)
> +		for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl); i++)
> +			if (dump_stdout)
> +				nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
> +					nicvf_qset_cq_reg_tbl[i].name, q,
> +					nicvf_queue_reg_read(nic,
> +					nicvf_qset_cq_reg_tbl[i].offset, q));
> +			else
> +				*data++ = nicvf_queue_reg_read(nic,
> +					nicvf_qset_cq_reg_tbl[i].offset, q);
> +
> +	for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++)
> +		for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl); i++)
> +			if (dump_stdout)
> +				nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
> +					nicvf_qset_rq_reg_tbl[i].name, q,
> +					nicvf_queue_reg_read(nic,
> +					nicvf_qset_rq_reg_tbl[i].offset, q));
> +			else
> +				*data++ = nicvf_queue_reg_read(nic,
> +					nicvf_qset_rq_reg_tbl[i].offset, q);
> +
> +	for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++)
> +		for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl); i++)
> +			if (dump_stdout)
> +				nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
> +					nicvf_qset_sq_reg_tbl[i].name, q,
> +					nicvf_queue_reg_read(nic,
> +					nicvf_qset_sq_reg_tbl[i].offset, q));
> +			else
> +				*data++ = nicvf_queue_reg_read(nic,
> +					nicvf_qset_sq_reg_tbl[i].offset, q);
> +
> +	for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++)
> +		for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl); i++)
> +			if (dump_stdout)
> +				nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
> +					nicvf_qset_rbdr_reg_tbl[i].name, q,
> +					nicvf_queue_reg_read(nic,
> +					nicvf_qset_rbdr_reg_tbl[i].offset, q));
> +			else
> +				*data++ = nicvf_queue_reg_read(nic,
> +					nicvf_qset_rbdr_reg_tbl[i].offset, q);
> +	return 0;
> +}
> +
> +int
> +nicvf_reg_get_count(void)
> +{
> +	int nr_regs;
> +
> +	nr_regs = NICVF_ARRAY_SIZE(nicvf_reg_tbl);
> +	nr_regs += NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl);
> +	nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl) *
> +			MAX_CMP_QUEUES_PER_QS;
> +	nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl) *
> +			MAX_RCV_QUEUES_PER_QS;
> +	nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl) *
> +			MAX_SND_QUEUES_PER_QS;
> +	nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl) *
> +			MAX_RCV_BUF_DESC_RINGS_PER_QS;
> +
> +	return nr_regs;
> +}
> +
> +static int
> +nicvf_qset_config_internal(struct nicvf *nic, bool enable)
> +{
> +	int ret;
> +	struct pf_qs_cfg pf_qs_cfg = {.value = 0};
> +
> +	pf_qs_cfg.ena = enable ? 1 : 0;
> +	pf_qs_cfg.vnic = nic->vf_id;
> +	ret = nicvf_mbox_qset_config(nic, &pf_qs_cfg);
> +	return ret ? NICVF_ERR_SET_QS : 0;
> +}
> +
> +/* Requests PF to assign and enable Qset */
> +int
> +nicvf_qset_config(struct nicvf *nic)
> +{
> +	/* Enable Qset */
> +	return nicvf_qset_config_internal(nic, true);
> +}
> +
> +int
> +nicvf_qset_reclaim(struct nicvf *nic)
> +{
> +	/* Disable Qset */
> +	return nicvf_qset_config_internal(nic, false);
> +}
> +
> +static int
> +cmpfunc(const void *a, const void *b)
> +{
> +	return (*(const uint32_t *)a - *(const uint32_t *)b);
> +}
> +
> +static uint32_t
> +nicvf_roundup_list(uint32_t val, uint32_t list[], uint32_t entries)
> +{
> +	uint32_t i;
> +
> +	qsort(list, entries, sizeof(uint32_t), cmpfunc);
> +	for (i = 0; i < entries; i++)
> +		if (val <= list[i])
> +			break;
> +	/* Not in the list */
> +	if (i >= entries)
> +		return 0;
> +	else
> +		return list[i];
> +}
> +
> +static void
> +nicvf_handle_qset_err_intr(struct nicvf *nic)
> +{
> +	uint16_t qidx;
> +	uint64_t status;
> +
> +	nicvf_log("%s (VF%d)\n", __func__, nic->vf_id);
> +	nicvf_reg_dump(nic, NULL);
> +
> +	for (qidx = 0; qidx < MAX_CMP_QUEUES_PER_QS; qidx++) {
> +		status = nicvf_queue_reg_read(
> +				nic, NIC_QSET_CQ_0_7_STATUS, qidx);
> +		if (!(status & NICVF_CQ_ERR_MASK))
> +			continue;
> +
> +		if (status & NICVF_CQ_WR_FULL)
> +			nicvf_log("[%d]NICVF_CQ_WR_FULL\n", qidx);
> +		if (status & NICVF_CQ_WR_DISABLE)
> +			nicvf_log("[%d]NICVF_CQ_WR_DISABLE\n", qidx);
> +		if (status & NICVF_CQ_WR_FAULT)
> +			nicvf_log("[%d]NICVF_CQ_WR_FAULT\n", qidx);
> +		nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_STATUS, qidx, 0);
> +	}
> +
> +	for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
> +		status = nicvf_queue_reg_read(
> +				nic, NIC_QSET_SQ_0_7_STATUS, qidx);
> +		if (!(status & NICVF_SQ_ERR_MASK))
> +			continue;
> +
> +		if (status & NICVF_SQ_ERR_STOPPED)
> +			nicvf_log("[%d]NICVF_SQ_ERR_STOPPED\n", qidx);
> +		if (status & NICVF_SQ_ERR_SEND)
> +			nicvf_log("[%d]NICVF_SQ_ERR_SEND\n", qidx);
> +		if (status & NICVF_SQ_ERR_DPE)
> +			nicvf_log("[%d]NICVF_SQ_ERR_DPE\n", qidx);
> +		nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_STATUS, qidx, 0);
> +	}
> +
> +	for (qidx = 0; qidx < MAX_RCV_BUF_DESC_RINGS_PER_QS; qidx++) {
> +		status = nicvf_queue_reg_read(nic,
> +					NIC_QSET_RBDR_0_1_STATUS0, qidx);
extra tab ?

> +		status &= NICVF_RBDR_FIFO_STATE_MASK;
> +		status >>= NICVF_RBDR_FIFO_STATE_SHIFT;
> +
> +		if (status == RBDR_FIFO_STATE_FAIL)
> +			nicvf_log("[%d]RBDR_FIFO_STATE_FAIL\n", qidx);
> +		nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx, 0);
> +	}
> +
> +	nicvf_disable_all_interrupts(nic);
> +	abort();
> +}
> +
> +/*
> + * Handle poll mode driver interested "mbox" and "queue-set error" interrupts.
> + * This function is not re-entrant.
> + * The caller should provide proper serialization.
> + */
> +int
> +nicvf_reg_poll_interrupts(struct nicvf *nic)
> +{
> +	int msg = 0;
> +	uint64_t intr;
> +
> +	intr = nicvf_reg_read(nic, NIC_VF_INT);
> +	if (intr & NICVF_INTR_MBOX_MASK) {
> +		nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_MBOX_MASK);
> +		msg = nicvf_handle_mbx_intr(nic);
> +	}
> +	if (intr & NICVF_INTR_QS_ERR_MASK) {
> +		nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_QS_ERR_MASK);
> +		nicvf_handle_qset_err_intr(nic);
> +	}
> +	return msg;
> +}
> +
> +static int
> +nicvf_qset_poll_reg(struct nicvf *nic, uint16_t qidx, uint32_t offset,
> +		    uint32_t bit_pos, uint32_t bits, uint64_t val)
> +{
> +	uint64_t bit_mask;
> +	uint64_t reg_val;
> +	int timeout = 10;
Does it make sense to convert hardcoded value to a macro

> +
> +	bit_mask = (1ULL << bits) - 1;
> +	bit_mask = (bit_mask << bit_pos);
> +
> +	while (timeout) {
> +		reg_val = nicvf_queue_reg_read(nic, offset, qidx);
> +		if (((reg_val & bit_mask) >> bit_pos) == val)
> +			return NICVF_OK;
> +		nicvf_delay_us(2000);
hardcoded value

> +		timeout--;
> +	}
> +	return NICVF_ERR_REG_POLL;
> +}
> +
> +int
> +nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx)
> +{
> +	uint64_t status;
> +	int timeout = 10;
hardcoded value

> +	struct nicvf_rbdr *rbdr = nic->rbdr;
> +
> +	/* Save head and tail pointers for freeing up buffers */
> +	if (rbdr) {
> +		rbdr->head = nicvf_queue_reg_read(nic,
> +					NIC_QSET_RBDR_0_1_HEAD,
> +					qidx) >> 3;
extra tabs, there are more this kind of usage, I won't notify further

> +		rbdr->tail = nicvf_queue_reg_read(nic,
> +					NIC_QSET_RBDR_0_1_TAIL,
> +					qidx) >> 3;
> +		rbdr->next_tail = rbdr->tail;
> +	}
> +
> +	/* Reset RBDR */
> +	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
> +				NICVF_RBDR_RESET);
> +
> +	/* Disable RBDR */
> +	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
> +	if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0,
> +				62, 2, 0x00))
> +		return NICVF_ERR_RBDR_DISABLE;
> +
> +	while (1) {
> +		status = nicvf_queue_reg_read(nic,
> +				NIC_QSET_RBDR_0_1_PRFCH_STATUS,	qidx);
> +		if ((status & 0xFFFFFFFF) == ((status >> 32) & 0xFFFFFFFF))
> +			break;
> +		nicvf_delay_us(2000);
hardcoded sleep value

> +		timeout--;
> +		if (!timeout)
> +			return NICVF_ERR_RBDR_PREFETCH;
> +	}
> +
> +	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
> +			NICVF_RBDR_RESET);
> +	if (nicvf_qset_poll_reg(nic, qidx,
> +				NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
> +		return NICVF_ERR_RBDR_RESET1;
> +
> +	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
> +	if (nicvf_qset_poll_reg(nic, qidx,
> +				NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
> +		return NICVF_ERR_RBDR_RESET2;
> +
> +	return NICVF_OK;
> +}
> +
> +static int
> +nicvf_qsize_regbit(uint32_t len, uint32_t len_shift)
> +{
> +	int val;
> +
> +	val = ((uint32_t)log2(len) - len_shift);
> +	assert(val >= 0);
> +	assert(val <= 6);
hardcoded values for assertion

> +	return val;
> +}
> +
> +int
> +nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx)
> +{
> +	int ret;
> +	uint64_t head, tail;
> +	struct nicvf_rbdr *rbdr = nic->rbdr;
> +	struct rbdr_cfg rbdr_cfg = {.value = 0};
> +
> +	ret = nicvf_qset_rbdr_reclaim(nic, qidx);
> +	if (ret)
> +		return ret;
> +
> +	/* Set descriptor base address */
> +	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, rbdr->phys);
> +
> +	/* Enable RBDR  & set queue size */
> +	rbdr_cfg.reserved_45_63 = 0,

Intended to ";" ?

> +	rbdr_cfg.ena = 1;
> +	rbdr_cfg.reset = 0;
> +	rbdr_cfg.ldwb = 0;
> +	rbdr_cfg.reserved_36_41 = 0;

No need these 0 assignments, assignment in deceleration does this.

> +	rbdr_cfg.qsize = nicvf_qsize_regbit(rbdr->qlen_mask + 1,
> +					RBDR_SIZE_SHIFT);
> +	rbdr_cfg.reserved_25_31 = 0;
> +	rbdr_cfg.avg_con = 0;
> +	rbdr_cfg.reserved_12_15 = 0;
> +	rbdr_cfg.lines = rbdr->buffsz / 128;
> +
> +	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, rbdr_cfg.value);
> +
> +	/* Verify proper RBDR reset */
> +	head = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx);
> +	tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx);
> +
> +	if (head | tail)
> +		return NICVF_ERR_RBDR_RESET;
> +
> +	return NICVF_OK;
> +}
> +
> +uint32_t
> +nicvf_qsize_rbdr_roundup(uint32_t val)
> +{
> +	uint32_t list[] = {RBDR_QUEUE_SZ_8K, RBDR_QUEUE_SZ_16K,
> +				RBDR_QUEUE_SZ_32K, RBDR_QUEUE_SZ_64K,
> +				RBDR_QUEUE_SZ_128K, RBDR_QUEUE_SZ_256K,
> +				RBDR_QUEUE_SZ_512K};
> +	return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
> +}
> +
> +int
> +nicvf_qset_rbdr_precharge(struct nicvf *nic, uint16_t ridx,
> +			  rbdr_pool_get_handler handler,
> +			  void *opaque, uint32_t max_buffs)
> +{
> +	struct rbdr_entry_t *desc, *desc0;
> +	struct nicvf_rbdr *rbdr = nic->rbdr;
> +	uint32_t count;
> +	nicvf_phys_addr_t phy;
> +
> +	assert(rbdr != NULL);
> +	desc = rbdr->desc;
> +	count = 0;
> +	/* Don't fill beyond max numbers of desc */
> +	while (count < (rbdr->qlen_mask)) {
extra paranthesis

> +		if (count >= max_buffs)
> +			break;
> +		desc0 = desc + count;
> +		phy = handler(opaque);
> +		if (phy) {
> +			desc0->full_addr = phy;
> +			count++;
> +		} else {
> +			break;
> +		}
> +	}
> +	nicvf_smp_wmb();
> +	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, ridx, count);
> +	rbdr->tail = nicvf_queue_reg_read(nic,
> +				NIC_QSET_RBDR_0_1_TAIL, ridx) >> 3;
> +	rbdr->next_tail = rbdr->tail;
> +	nicvf_smp_rmb();
> +	return 0;
> +}
> +
> +int nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx)
return type should be one line above

> +{
> +	return nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
> +}
> +
> +int
> +nicvf_qset_sq_reclaim(struct nicvf *nic, uint16_t qidx)
> +{
> +	uint64_t head, tail;
> +	struct sq_cfg sq_cfg;
> +
> +	sq_cfg.value = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
> +
> +	/* Disable send queue */
> +	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
> +
> +	/* Check if SQ is stopped */
> +	if (sq_cfg.ena && nicvf_qset_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS,
> +				NICVF_SQ_STATUS_STOPPED_BIT, 1, 0x01))
> +		return NICVF_ERR_SQ_DISABLE;
> +
> +	/* Reset send queue */
> +	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
> +	head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
> +	tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
> +	if (head | tail)
> +		return  NICVF_ERR_SQ_RESET;
> +
> +	return 0;
> +}
> +
> +int
> +nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_txq *txq)
> +{
> +	int ret;
> +	struct sq_cfg sq_cfg = {.value = 0};
> +
> +	ret = nicvf_qset_sq_reclaim(nic, qidx);
> +	if (ret)
> +		return ret;
> +
> +	/* Send a mailbox msg to PF to config SQ */
> +	if (nicvf_mbox_sq_config(nic, qidx))
> +		return  NICVF_ERR_SQ_PF_CFG;
> +
> +	/* Set queue base address */
> +	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, txq->phys);
> +
> +	/* Enable send queue  & set queue size */
> +	sq_cfg.ena = 1;
> +	sq_cfg.reset = 0;
> +	sq_cfg.ldwb = 0;
> +	sq_cfg.qsize = nicvf_qsize_regbit(txq->qlen_mask + 1, SND_QSIZE_SHIFT);
> +	sq_cfg.tstmp_bgx_intf = 0;
> +	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg.value);
> +
> +	/* Ring doorbell so that H/W restarts processing SQEs */
> +	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
> +
> +	return 0;
> +}
> +
> +uint32_t
> +nicvf_qsize_sq_roundup(uint32_t val)
> +{
> +	uint32_t list[] = {SND_QUEUE_SZ_1K, SND_QUEUE_SZ_2K,
> +				SND_QUEUE_SZ_4K, SND_QUEUE_SZ_8K,
> +				SND_QUEUE_SZ_16K, SND_QUEUE_SZ_32K,
> +				SND_QUEUE_SZ_64K};
> +	return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
> +}
> +
> +int
> +nicvf_qset_rq_reclaim(struct nicvf *nic, uint16_t qidx)
> +{
> +	/* Disable receive queue */
> +	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
> +	return nicvf_mbox_rq_sync(nic);
> +}
> +
> +int
> +nicvf_qset_rq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
> +{
> +	struct pf_rq_cfg pf_rq_cfg = {.value = 0};
> +	struct rq_cfg rq_cfg = {.value = 0};
> +
> +	if (nicvf_qset_rq_reclaim(nic, qidx))
> +		return NICVF_ERR_RQ_CLAIM;
> +
> +	pf_rq_cfg.strip_pre_l2 = 0;
> +	/* First cache line of RBDR data will be allocated into L2C */
> +	pf_rq_cfg.caching = RQ_CACHE_ALLOC_FIRST;
> +	pf_rq_cfg.cq_qs = nic->vf_id;
> +	pf_rq_cfg.cq_idx = qidx;
> +	pf_rq_cfg.rbdr_cont_qs = nic->vf_id;
> +	pf_rq_cfg.rbdr_cont_idx = 0;
> +	pf_rq_cfg.rbdr_strt_qs = nic->vf_id;
> +	pf_rq_cfg.rbdr_strt_idx = 0;
> +
> +	/* Send a mailbox msg to PF to config RQ */
> +	if (nicvf_mbox_rq_config(nic, qidx, &pf_rq_cfg))
> +		return NICVF_ERR_RQ_PF_CFG;
> +
> +	/* Select Rx backpressure */
> +	if (nicvf_mbox_rq_bp_config(nic, qidx, rxq->rx_drop_en))
> +		return NICVF_ERR_RQ_BP_CFG;
> +
> +	/* Send a mailbox msg to PF to config RQ drop */
> +	if (nicvf_mbox_rq_drop_config(nic, qidx, rxq->rx_drop_en))
> +		return NICVF_ERR_RQ_DROP_CFG;
> +
> +	/* Enable Receive queue */
> +	rq_cfg.ena = 1;
> +	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, rq_cfg.value);
> +
> +	return 0;
> +}
> +
> +int
> +nicvf_qset_cq_reclaim(struct nicvf *nic, uint16_t qidx)
> +{
> +	uint64_t tail, head;
> +
> +	/* Disable completion queue */
> +	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
> +	if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_CQ_0_7_CFG, 42, 1, 0))
> +		return NICVF_ERR_CQ_DISABLE;
> +
> +	/* Reset completion queue */
> +	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
> +	tail = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, qidx) >> 9;
> +	head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, qidx) >> 9;
> +	if (head | tail)
> +		return  NICVF_ERR_CQ_RESET;
> +
> +	/* Disable timer threshold (doesn't get reset upon CQ reset) */
> +	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
> +	return 0;
> +}
> +
> +int
> +nicvf_qset_cq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
> +{
> +	int ret;
> +	struct cq_cfg cq_cfg = {.value = 0};
> +
> +	ret = nicvf_qset_cq_reclaim(nic, qidx);
> +	if (ret)
> +		return ret;
> +
> +	/* Set completion queue base address */
> +	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, rxq->phys);
> +
> +	cq_cfg.ena = 1;
> +	cq_cfg.reset = 0;
> +	/* Writes of CQE will be allocated into L2C */
> +	cq_cfg.caching = 1;
> +	cq_cfg.qsize = nicvf_qsize_regbit(rxq->qlen_mask + 1, CMP_QSIZE_SHIFT);
> +	cq_cfg.avg_con = 0;
> +	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, cq_cfg.value);
> +
> +	/* Set threshold value for interrupt generation */
> +	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, 0);
> +	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
> +	return 0;
> +}
> +
> +uint32_t
> +nicvf_qsize_cq_roundup(uint32_t val)
> +{
> +	uint32_t list[] = {CMP_QUEUE_SZ_1K, CMP_QUEUE_SZ_2K,
> +				CMP_QUEUE_SZ_4K, CMP_QUEUE_SZ_8K,
> +				CMP_QUEUE_SZ_16K, CMP_QUEUE_SZ_32K,
> +				CMP_QUEUE_SZ_64K};
> +	return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
> +}
> +
> +
> +void
> +nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
> +{
> +	uint64_t val;
> +
> +	val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
> +	if (enable)
> +		val |= (STRIP_FIRST_VLAN << 25);
> +	else
> +		val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25);
> +
> +	nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
> +}
> +
> +void
> +nicvf_rss_set_key(struct nicvf *nic, uint8_t *key)
> +{
> +	int idx;
> +	uint64_t addr, val;
> +	uint64_t *keyptr = (uint64_t *)key;
> +
> +	addr = NIC_VNIC_RSS_KEY_0_4;
> +	for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
> +		val = nicvf_cpu_to_be_64(*keyptr);
> +		nicvf_reg_write(nic, addr, val);
> +		addr += sizeof(uint64_t);
> +		keyptr++;
> +	}
> +}
> +
> +void
> +nicvf_rss_get_key(struct nicvf *nic, uint8_t *key)
> +{
> +	int idx;
> +	uint64_t addr, val;
> +	uint64_t *keyptr = (uint64_t *)key;
> +
> +	addr = NIC_VNIC_RSS_KEY_0_4;
> +	for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
> +		val = nicvf_reg_read(nic, addr);
> +		*keyptr = nicvf_be_to_cpu_64(val);
> +		addr += sizeof(uint64_t);
> +		keyptr++;
> +	}
> +}
> +
> +void
> +nicvf_rss_set_cfg(struct nicvf *nic, uint64_t val)
> +{
> +	nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, val);
> +}
> +
> +uint64_t
> +nicvf_rss_get_cfg(struct nicvf *nic)
> +{
> +	return nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
> +}
> +
> +int
> +nicvf_rss_reta_update(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
> +{
> +	uint32_t idx;
> +	struct nicvf_rss_reta_info *rss = &nic->rss_info;
> +
> +	/* result will be stored in nic->rss_info.rss_size */
> +	if (nicvf_mbox_get_rss_size(nic))
> +		return NICVF_ERR_RSS_GET_SZ;
> +
> +	assert(rss->rss_size > 0);
> +	rss->hash_bits = (uint8_t)log2(rss->rss_size);
> +	for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
> +		rss->ind_tbl[idx] = tbl[idx];
> +
> +	if (nicvf_mbox_config_rss(nic))
> +		return NICVF_ERR_RSS_TBL_UPDATE;
> +
> +	return NICVF_OK;
> +}
> +
> +int
> +nicvf_rss_reta_query(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
> +{
> +	uint32_t idx;
> +	struct nicvf_rss_reta_info *rss = &nic->rss_info;
> +
> +	/* result will be stored in nic->rss_info.rss_size */
> +	if (nicvf_mbox_get_rss_size(nic))
> +		return NICVF_ERR_RSS_GET_SZ;
> +
> +	assert(rss->rss_size > 0);
> +	rss->hash_bits = (uint8_t)log2(rss->rss_size);
> +	for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
> +		tbl[idx] = rss->ind_tbl[idx];
> +
> +	return NICVF_OK;
> +}
> +
> +int
> +nicvf_rss_config(struct nicvf *nic, uint32_t  qcnt, uint64_t cfg)
> +{
> +	uint32_t idx;
> +	uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
> +	uint8_t default_key[RSS_HASH_KEY_BYTE_SIZE] = {
> +		0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
> +		0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
> +		0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
> +		0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
> +		0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD
> +	};
> +
> +	if (nic->cpi_alg != CPI_ALG_NONE)
> +		return -EINVAL;
> +
> +	if (cfg == 0)
> +		return -EINVAL;
> +
> +	/* Update default RSS key and cfg */
> +	nicvf_rss_set_key(nic, default_key);
> +	nicvf_rss_set_cfg(nic, cfg);
> +
> +	/* Update default RSS RETA */
> +	for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
> +		default_reta[idx] = idx % qcnt;
> +
> +	return nicvf_rss_reta_update(nic, default_reta,
> +				NIC_MAX_RSS_IDR_TBL_SIZE);
> +}
> +
> +int
> +nicvf_rss_term(struct nicvf *nic)
> +{
> +	uint32_t idx;
> +	uint8_t disable_rss[NIC_MAX_RSS_IDR_TBL_SIZE];
> +
> +	nicvf_rss_set_cfg(nic, 0);
> +	/* Redirect the output to 0th queue  */
> +	for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
> +		disable_rss[idx] = 0;
> +
> +	return nicvf_rss_reta_update(nic, disable_rss,
> +				NIC_MAX_RSS_IDR_TBL_SIZE);
> +}
> +
> +int
> +nicvf_loopback_config(struct nicvf *nic, bool enable)
> +{
> +	if (enable && nic->loopback_supported == 0)
> +		return NICVF_ERR_LOOPBACK_CFG;
> +
> +	return nicvf_mbox_loopback_config(nic, enable);
> +}
> +
> +void
> +nicvf_hw_get_stats(struct nicvf *nic, struct nicvf_hw_stats *stats)
> +{
> +	stats->rx_bytes = NICVF_GET_RX_STATS(RX_OCTS);
> +	stats->rx_ucast_frames = NICVF_GET_RX_STATS(RX_UCAST);
> +	stats->rx_bcast_frames = NICVF_GET_RX_STATS(RX_BCAST);
> +	stats->rx_mcast_frames = NICVF_GET_RX_STATS(RX_MCAST);
> +	stats->rx_fcs_errors = NICVF_GET_RX_STATS(RX_FCS);
> +	stats->rx_l2_errors = NICVF_GET_RX_STATS(RX_L2ERR);
> +	stats->rx_drop_red = NICVF_GET_RX_STATS(RX_RED);
> +	stats->rx_drop_red_bytes = NICVF_GET_RX_STATS(RX_RED_OCTS);
> +	stats->rx_drop_overrun = NICVF_GET_RX_STATS(RX_ORUN);
> +	stats->rx_drop_overrun_bytes = NICVF_GET_RX_STATS(RX_ORUN_OCTS);
> +	stats->rx_drop_bcast = NICVF_GET_RX_STATS(RX_DRP_BCAST);
> +	stats->rx_drop_mcast = NICVF_GET_RX_STATS(RX_DRP_MCAST);
> +	stats->rx_drop_l3_bcast = NICVF_GET_RX_STATS(RX_DRP_L3BCAST);
> +	stats->rx_drop_l3_mcast = NICVF_GET_RX_STATS(RX_DRP_L3MCAST);
> +
> +	stats->tx_bytes_ok = NICVF_GET_TX_STATS(TX_OCTS);
> +	stats->tx_ucast_frames_ok = NICVF_GET_TX_STATS(TX_UCAST);
> +	stats->tx_bcast_frames_ok = NICVF_GET_TX_STATS(TX_BCAST);
> +	stats->tx_mcast_frames_ok = NICVF_GET_TX_STATS(TX_MCAST);
> +	stats->tx_drops = NICVF_GET_TX_STATS(TX_DROP);
> +}
> +
> +void
> +nicvf_hw_get_rx_qstats(struct nicvf *nic, struct nicvf_hw_rx_qstats *qstats,
> +		       uint16_t qidx)
> +{
> +	qstats->q_rx_bytes =
> +		nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS0, qidx);
> +	qstats->q_rx_packets =
> +		nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS1, qidx);
> +}
> +
> +void
> +nicvf_hw_get_tx_qstats(struct nicvf *nic, struct nicvf_hw_tx_qstats *qstats,
> +		       uint16_t qidx)
> +{
> +	qstats->q_tx_bytes =
> +		nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS0, qidx);
> +	qstats->q_tx_packets =
> +		nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS1, qidx);
> +}
> diff --git a/drivers/net/thunderx/base/nicvf_hw.h b/drivers/net/thunderx/base/nicvf_hw.h
> new file mode 100644
> index 0000000..32357cc
> --- /dev/null
> +++ b/drivers/net/thunderx/base/nicvf_hw.h
> @@ -0,0 +1,240 @@
> +/*
> + *   BSD LICENSE
> + *
> + *   Copyright (C) Cavium networks Ltd. 2016.
> + *
> + *   Redistribution and use in source and binary forms, with or without
> + *   modification, are permitted provided that the following conditions
> + *   are met:
> + *
> + *     * Redistributions of source code must retain the above copyright
> + *       notice, this list of conditions and the following disclaimer.
> + *     * Redistributions in binary form must reproduce the above copyright
> + *       notice, this list of conditions and the following disclaimer in
> + *       the documentation and/or other materials provided with the
> + *       distribution.
> + *     * Neither the name of Cavium networks nor the names of its
> + *       contributors may be used to endorse or promote products derived
> + *       from this software without specific prior written permission.
> + *
> + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
> + */
> +
> +#ifndef _THUNDERX_NICVF_HW_H
> +#define _THUNDERX_NICVF_HW_H
> +
> +#include <stdint.h>
> +
> +#include "nicvf_hw_defs.h"
> +
> +#define	PCI_VENDOR_ID_CAVIUM			0x177D
> +#define	PCI_DEVICE_ID_THUNDERX_PASS1_NICVF	0x0011
> +#define	PCI_DEVICE_ID_THUNDERX_PASS2_NICVF	0xA034
> +#define	PCI_SUB_DEVICE_ID_THUNDERX_PASS1_NICVF	0xA11E
> +#define	PCI_SUB_DEVICE_ID_THUNDERX_PASS2_NICVF	0xA134
> +
> +#define NICVF_ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
> +
> +#define NICVF_GET_RX_STATS(reg) \
> +	nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
> +#define NICVF_GET_TX_STATS(reg) \
> +	nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
> +
> +#define NICVF_PASS1	(PCI_SUB_DEVICE_ID_THUNDERX_PASS1_NICVF)
> +#define NICVF_PASS2	(PCI_SUB_DEVICE_ID_THUNDERX_PASS2_NICVF)
> +
> +#define NICVF_CAP_TUNNEL_PARSING          (1ULL << 0)
> +
> +enum nicvf_tns_mode {
> +	NIC_TNS_BYPASS_MODE = 0,
unnecessary assignment

> +	NIC_TNS_MODE,
> +};
> +
> +enum nicvf_err_e {
> +	NICVF_OK = 0,
unnecessary assignment

> +	NICVF_ERR_SET_QS = -8191,/* -8191 */
> +	NICVF_ERR_RESET_QS,      /* -8190 */
> +	NICVF_ERR_REG_POLL,      /* -8189 */
> +	NICVF_ERR_RBDR_RESET,    /* -8188 */
> +	NICVF_ERR_RBDR_DISABLE,  /* -8187 */
> +	NICVF_ERR_RBDR_PREFETCH, /* -8186 */
> +	NICVF_ERR_RBDR_RESET1,   /* -8185 */
> +	NICVF_ERR_RBDR_RESET2,   /* -8184 */
> +	NICVF_ERR_RQ_CLAIM,      /* -8183 */
> +	NICVF_ERR_RQ_PF_CFG,	 /* -8182 */
> +	NICVF_ERR_RQ_BP_CFG,	 /* -8181 */
> +	NICVF_ERR_RQ_DROP_CFG,	 /* -8180 */
> +	NICVF_ERR_CQ_DISABLE,	 /* -8179 */
> +	NICVF_ERR_CQ_RESET,	 /* -8178 */
> +	NICVF_ERR_SQ_DISABLE,	 /* -8177 */
> +	NICVF_ERR_SQ_RESET,	 /* -8176 */
> +	NICVF_ERR_SQ_PF_CFG,	 /* -8175 */
> +	NICVF_ERR_RSS_TBL_UPDATE,/* -8174 */
> +	NICVF_ERR_RSS_GET_SZ,    /* -8173 */
> +	NICVF_ERR_BASE_INIT,     /* -8172 */
> +	NICVF_ERR_LOOPBACK_CFG,  /* -8171 */
> +};
> +
> +typedef nicvf_phys_addr_t (*rbdr_pool_get_handler)(void *opaque);
> +
> +struct nicvf_hw_rx_qstats {
> +	uint64_t q_rx_bytes;
> +	uint64_t q_rx_packets;
> +};
> +
> +struct nicvf_hw_tx_qstats {
> +	uint64_t q_tx_bytes;
> +	uint64_t q_tx_packets;
> +};
> +
> +struct nicvf_hw_stats {
> +	uint64_t rx_bytes;
> +	uint64_t rx_ucast_frames;
> +	uint64_t rx_bcast_frames;
> +	uint64_t rx_mcast_frames;
> +	uint64_t rx_fcs_errors;
> +	uint64_t rx_l2_errors;
> +	uint64_t rx_drop_red;
> +	uint64_t rx_drop_red_bytes;
> +	uint64_t rx_drop_overrun;
> +	uint64_t rx_drop_overrun_bytes;
> +	uint64_t rx_drop_bcast;
> +	uint64_t rx_drop_mcast;
> +	uint64_t rx_drop_l3_bcast;
> +	uint64_t rx_drop_l3_mcast;
> +
> +	uint64_t tx_bytes_ok;
> +	uint64_t tx_ucast_frames_ok;
> +	uint64_t tx_bcast_frames_ok;
> +	uint64_t tx_mcast_frames_ok;
> +	uint64_t tx_drops;
> +};
> +
> +struct nicvf_rss_reta_info {
> +	uint8_t hash_bits;
> +	uint16_t rss_size;
> +	uint8_t ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
> +};
> +
> +/* Common structs used in DPDK and base layer are defined in DPDK layer */
> +#include "../nicvf_struct.h"
> +
> +NICVF_STATIC_ASSERT(sizeof(struct nicvf_rbdr) <= 128);
> +NICVF_STATIC_ASSERT(sizeof(struct nicvf_txq) <= 128);
> +NICVF_STATIC_ASSERT(sizeof(struct nicvf_rxq) <= 128);
> +
> +static inline void
> +nicvf_reg_write(struct nicvf *nic, uint32_t offset, uint64_t val)
> +{
> +	nicvf_addr_write(nic->reg_base + offset, val);
> +}
> +
> +static inline uint64_t
> +nicvf_reg_read(struct nicvf *nic, uint32_t offset)
> +{
> +	return nicvf_addr_read(nic->reg_base + offset);
> +}
> +
> +static inline uintptr_t
> +nicvf_qset_base(struct nicvf *nic, uint32_t qidx)
> +{
> +	return nic->reg_base + (qidx << NIC_Q_NUM_SHIFT);
> +}
> +
> +static inline void
> +nicvf_queue_reg_write(struct nicvf *nic, uint32_t offset, uint32_t qidx,
> +		      uint64_t val)
> +{
> +	nicvf_addr_write(nicvf_qset_base(nic, qidx) + offset, val);
> +}
> +
> +static inline uint64_t
> +nicvf_queue_reg_read(struct nicvf *nic, uint32_t offset, uint32_t qidx)
> +{
> +	return	nicvf_addr_read(nicvf_qset_base(nic, qidx) + offset);
> +}
> +
> +static inline void
> +nicvf_disable_all_interrupts(struct nicvf *nic)
> +{
> +	nicvf_reg_write(nic, NIC_VF_ENA_W1C, NICVF_INTR_ALL_MASK);
> +	nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_ALL_MASK);
> +}
> +
> +static inline uint32_t
> +nicvf_hw_version(struct nicvf *nic)
> +{
> +	return nic->subsystem_device_id;
> +}
> +
> +static inline uint64_t
> +nicvf_hw_cap(struct nicvf *nic)
> +{
> +	return nic->hwcap;
> +}
> +
> +int nicvf_base_init(struct nicvf *nic);
> +
> +int nicvf_reg_get_count(void);
> +int nicvf_reg_poll_interrupts(struct nicvf *nic);
> +int nicvf_reg_dump(struct nicvf *nic, uint64_t *data);
> +
> +int nicvf_qset_config(struct nicvf *nic);
> +int nicvf_qset_reclaim(struct nicvf *nic);
> +
> +int nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx);
> +int nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx);
> +int nicvf_qset_rbdr_precharge(struct nicvf *nic, uint16_t ridx,
> +			      rbdr_pool_get_handler handler, void *opaque,
> +			      uint32_t max_buffs);
> +int nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx);
> +
> +int nicvf_qset_rq_config(struct nicvf *nic, uint16_t qidx,
> +			 struct nicvf_rxq *rxq);
> +int nicvf_qset_rq_reclaim(struct nicvf *nic, uint16_t qidx);
> +
> +int nicvf_qset_cq_config(struct nicvf *nic, uint16_t qidx,
> +			 struct nicvf_rxq *rxq);
> +int nicvf_qset_cq_reclaim(struct nicvf *nic, uint16_t qidx);
> +
> +int nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx,
> +			 struct nicvf_txq *txq);
> +int nicvf_qset_sq_reclaim(struct nicvf *nic, uint16_t qidx);
> +
> +uint32_t nicvf_qsize_rbdr_roundup(uint32_t val);
> +uint32_t nicvf_qsize_cq_roundup(uint32_t val);
> +uint32_t nicvf_qsize_sq_roundup(uint32_t val);
> +
> +void nicvf_vlan_hw_strip(struct nicvf *nic, bool enable);
> +
> +int nicvf_rss_config(struct nicvf *nic, uint32_t  qcnt, uint64_t cfg);
> +int nicvf_rss_term(struct nicvf *nic);
> +
> +int nicvf_rss_reta_update(struct nicvf *nic, uint8_t *tbl, uint32_t max_count);
> +int nicvf_rss_reta_query(struct nicvf *nic, uint8_t *tbl, uint32_t max_count);
> +
> +void nicvf_rss_set_key(struct nicvf *nic, uint8_t *key);
> +void nicvf_rss_get_key(struct nicvf *nic, uint8_t *key);
> +
> +void nicvf_rss_set_cfg(struct nicvf *nic, uint64_t val);
> +uint64_t nicvf_rss_get_cfg(struct nicvf *nic);
> +
> +int nicvf_loopback_config(struct nicvf *nic, bool enable);
> +
> +void nicvf_hw_get_stats(struct nicvf *nic, struct nicvf_hw_stats *stats);
> +void nicvf_hw_get_rx_qstats(struct nicvf *nic,
> +			    struct nicvf_hw_rx_qstats *qstats, uint16_t qidx);
> +void nicvf_hw_get_tx_qstats(struct nicvf *nic,
> +			    struct nicvf_hw_tx_qstats *qstats, uint16_t qidx);
> +
> +#endif /* _THUNDERX_NICVF_HW_H */
> diff --git a/drivers/net/thunderx/base/nicvf_hw_defs.h b/drivers/net/thunderx/base/nicvf_hw_defs.h
> new file mode 100644
> index 0000000..ef9354b
> --- /dev/null
> +++ b/drivers/net/thunderx/base/nicvf_hw_defs.h
> @@ -0,0 +1,1216 @@
> +/*
> + *   BSD LICENSE
> + *
> + *   Copyright (C) Cavium networks Ltd. 2016.
> + *
> + *   Redistribution and use in source and binary forms, with or without
> + *   modification, are permitted provided that the following conditions
> + *   are met:
> + *
> + *     * Redistributions of source code must retain the above copyright
> + *       notice, this list of conditions and the following disclaimer.
> + *     * Redistributions in binary form must reproduce the above copyright
> + *       notice, this list of conditions and the following disclaimer in
> + *       the documentation and/or other materials provided with the
> + *       distribution.
> + *     * Neither the name of Cavium networks nor the names of its
> + *       contributors may be used to endorse or promote products derived
> + *       from this software without specific prior written permission.
> + *
> + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
> + */
> +
> +#ifndef _THUNDERX_NICVF_HW_DEFS_H
> +#define _THUNDERX_NICVF_HW_DEFS_H
> +
> +#include <stdint.h>
> +#include <stdbool.h>
> +
> +/* Virtual function register offsets */
> +
> +#define NIC_VF_CFG                      (0x000020)
> +#define NIC_VF_PF_MAILBOX_0_1           (0x000130)
> +#define NIC_VF_INT                      (0x000200)
> +#define NIC_VF_INT_W1S                  (0x000220)
> +#define NIC_VF_ENA_W1C                  (0x000240)
> +#define NIC_VF_ENA_W1S                  (0x000260)
> +
> +#define NIC_VNIC_RSS_CFG                (0x0020E0)
> +#define NIC_VNIC_RSS_KEY_0_4            (0x002200)
> +#define NIC_VNIC_TX_STAT_0_4            (0x004000)
> +#define NIC_VNIC_RX_STAT_0_13           (0x004100)
> +#define NIC_VNIC_RQ_GEN_CFG             (0x010010)
> +
> +#define NIC_QSET_CQ_0_7_CFG             (0x010400)
> +#define NIC_QSET_CQ_0_7_CFG2            (0x010408)
> +#define NIC_QSET_CQ_0_7_THRESH          (0x010410)
> +#define NIC_QSET_CQ_0_7_BASE            (0x010420)
> +#define NIC_QSET_CQ_0_7_HEAD            (0x010428)
> +#define NIC_QSET_CQ_0_7_TAIL            (0x010430)
> +#define NIC_QSET_CQ_0_7_DOOR            (0x010438)
> +#define NIC_QSET_CQ_0_7_STATUS          (0x010440)
> +#define NIC_QSET_CQ_0_7_STATUS2         (0x010448)
> +#define NIC_QSET_CQ_0_7_DEBUG           (0x010450)
> +
> +#define NIC_QSET_RQ_0_7_CFG             (0x010600)
> +#define NIC_QSET_RQ_0_7_STATUS0         (0x010700)
> +#define NIC_QSET_RQ_0_7_STATUS1         (0x010708)
> +
> +#define NIC_QSET_SQ_0_7_CFG             (0x010800)
> +#define NIC_QSET_SQ_0_7_THRESH          (0x010810)
> +#define NIC_QSET_SQ_0_7_BASE            (0x010820)
> +#define NIC_QSET_SQ_0_7_HEAD            (0x010828)
> +#define NIC_QSET_SQ_0_7_TAIL            (0x010830)
> +#define NIC_QSET_SQ_0_7_DOOR            (0x010838)
> +#define NIC_QSET_SQ_0_7_STATUS          (0x010840)
> +#define NIC_QSET_SQ_0_7_DEBUG           (0x010848)
> +#define NIC_QSET_SQ_0_7_STATUS0         (0x010900)
> +#define NIC_QSET_SQ_0_7_STATUS1         (0x010908)
> +
> +#define NIC_QSET_RBDR_0_1_CFG           (0x010C00)
> +#define NIC_QSET_RBDR_0_1_THRESH        (0x010C10)
> +#define NIC_QSET_RBDR_0_1_BASE          (0x010C20)
> +#define NIC_QSET_RBDR_0_1_HEAD          (0x010C28)
> +#define NIC_QSET_RBDR_0_1_TAIL          (0x010C30)
> +#define NIC_QSET_RBDR_0_1_DOOR          (0x010C38)
> +#define NIC_QSET_RBDR_0_1_STATUS0       (0x010C40)
> +#define NIC_QSET_RBDR_0_1_STATUS1       (0x010C48)
> +#define NIC_QSET_RBDR_0_1_PRFCH_STATUS  (0x010C50)
> +
> +/* vNIC HW Constants */
> +
> +#define NIC_Q_NUM_SHIFT                 18
> +
> +#define MAX_QUEUE_SET                   128
> +#define MAX_RCV_QUEUES_PER_QS           8
> +#define MAX_RCV_BUF_DESC_RINGS_PER_QS   2
> +#define MAX_SND_QUEUES_PER_QS           8
> +#define MAX_CMP_QUEUES_PER_QS           8
> +
> +#define NICVF_INTR_CQ_SHIFT             0
> +#define NICVF_INTR_SQ_SHIFT             8
> +#define NICVF_INTR_RBDR_SHIFT           16
> +#define NICVF_INTR_PKT_DROP_SHIFT       20
> +#define NICVF_INTR_TCP_TIMER_SHIFT      21
> +#define NICVF_INTR_MBOX_SHIFT           22
> +#define NICVF_INTR_QS_ERR_SHIFT         23
> +
> +#define NICVF_INTR_CQ_MASK              (0xFF << NICVF_INTR_CQ_SHIFT)
> +#define NICVF_INTR_SQ_MASK              (0xFF << NICVF_INTR_SQ_SHIFT)
> +#define NICVF_INTR_RBDR_MASK            (0x03 << NICVF_INTR_RBDR_SHIFT)
> +#define NICVF_INTR_PKT_DROP_MASK        (1 << NICVF_INTR_PKT_DROP_SHIFT)
> +#define NICVF_INTR_TCP_TIMER_MASK       (1 << NICVF_INTR_TCP_TIMER_SHIFT)
> +#define NICVF_INTR_MBOX_MASK            (1 << NICVF_INTR_MBOX_SHIFT)
> +#define NICVF_INTR_QS_ERR_MASK          (1 << NICVF_INTR_QS_ERR_SHIFT)
> +#define NICVF_INTR_ALL_MASK             (0x7FFFFF)
> +
> +#define NICVF_CQ_WR_FULL                (1ULL << 26)
> +#define NICVF_CQ_WR_DISABLE             (1ULL << 25)
> +#define NICVF_CQ_WR_FAULT               (1ULL << 24)
> +#define NICVF_CQ_ERR_MASK               (NICVF_CQ_WR_FULL |\
> +					 NICVF_CQ_WR_DISABLE |\
> +					 NICVF_CQ_WR_FAULT)
> +#define NICVF_CQ_CQE_COUNT_MASK         (0xFFFF)
> +
> +#define NICVF_SQ_ERR_STOPPED            (1ULL << 21)
> +#define NICVF_SQ_ERR_SEND               (1ULL << 20)
> +#define NICVF_SQ_ERR_DPE                (1ULL << 19)
> +#define NICVF_SQ_ERR_MASK               (NICVF_SQ_ERR_STOPPED |\
> +					 NICVF_SQ_ERR_SEND |\
> +					 NICVF_SQ_ERR_DPE)
> +#define NICVF_SQ_STATUS_STOPPED_BIT     (21)
> +
> +#define NICVF_RBDR_FIFO_STATE_SHIFT     (62)
> +#define NICVF_RBDR_FIFO_STATE_MASK      (3ULL << NICVF_RBDR_FIFO_STATE_SHIFT)
> +#define NICVF_RBDR_COUNT_MASK           (0x7FFFF)
> +
> +/* Queue reset */
> +#define NICVF_CQ_RESET                  (1ULL << 41)
> +#define NICVF_SQ_RESET                  (1ULL << 17)
> +#define NICVF_RBDR_RESET                (1ULL << 43)
> +
> +/* RSS constants */
> +#define NIC_MAX_RSS_HASH_BITS           (8)
> +#define NIC_MAX_RSS_IDR_TBL_SIZE        (1 << NIC_MAX_RSS_HASH_BITS)
> +#define RSS_HASH_KEY_SIZE               (5) /* 320 bit key */
> +#define RSS_HASH_KEY_BYTE_SIZE          (40) /* 320 bit key */
> +
> +#define RSS_L2_EXTENDED_HASH_ENA        (1 << 0)
> +#define RSS_IP_ENA                      (1 << 1)
> +#define RSS_TCP_ENA                     (1 << 2)
> +#define RSS_TCP_SYN_ENA                 (1 << 3)
> +#define RSS_UDP_ENA                     (1 << 4)
> +#define RSS_L4_EXTENDED_ENA             (1 << 5)
> +#define RSS_L3_BI_DIRECTION_ENA         (1 << 7)
> +#define RSS_L4_BI_DIRECTION_ENA         (1 << 8)
> +#define RSS_TUN_VXLAN_ENA               (1 << 9)
> +#define RSS_TUN_GENEVE_ENA              (1 << 10)
> +#define RSS_TUN_NVGRE_ENA               (1 << 11)
> +
> +#define RBDR_QUEUE_SZ_8K                (8 * 1024)
> +#define RBDR_QUEUE_SZ_16K               (16 * 1024)
> +#define RBDR_QUEUE_SZ_32K               (32 * 1024)
> +#define RBDR_QUEUE_SZ_64K               (64 * 1024)
> +#define RBDR_QUEUE_SZ_128K              (128 * 1024)
> +#define RBDR_QUEUE_SZ_256K              (256 * 1024)
> +#define RBDR_QUEUE_SZ_512K              (512 * 1024)
> +
> +#define RBDR_SIZE_SHIFT                 (13) /* 8k */
> +
> +#define SND_QUEUE_SZ_1K                 (1 * 1024)
> +#define SND_QUEUE_SZ_2K                 (2 * 1024)
> +#define SND_QUEUE_SZ_4K                 (4 * 1024)
> +#define SND_QUEUE_SZ_8K                 (8 * 1024)
> +#define SND_QUEUE_SZ_16K                (16 * 1024)
> +#define SND_QUEUE_SZ_32K                (32 * 1024)
> +#define SND_QUEUE_SZ_64K                (64 * 1024)
> +
> +#define SND_QSIZE_SHIFT                 (10) /* 1k */
> +
> +#define CMP_QUEUE_SZ_1K                 (1 * 1024)
> +#define CMP_QUEUE_SZ_2K                 (2 * 1024)
> +#define CMP_QUEUE_SZ_4K                 (4 * 1024)
> +#define CMP_QUEUE_SZ_8K                 (8 * 1024)
> +#define CMP_QUEUE_SZ_16K                (16 * 1024)
> +#define CMP_QUEUE_SZ_32K                (32 * 1024)
> +#define CMP_QUEUE_SZ_64K                (64 * 1024)
> +
> +#define CMP_QSIZE_SHIFT                 (10) /* 1k */
> +
> +/* Min/Max packet size */
> +#define NIC_HW_MIN_FRS			64
> +#define NIC_HW_MAX_FRS			9200 /* 9216 max packet including FCS */
> +#define NIC_HW_MAX_SEGS			12
> +
> +/* Descriptor alignments */
> +#define NICVF_RBDR_BASE_ALIGN_BYTES	128 /* 7 bits */
> +#define NICVF_CQ_BASE_ALIGN_BYTES	512 /* 9 bits */
> +#define NICVF_SQ_BASE_ALIGN_BYTES	128 /* 7 bits */
> +
> +/* vNIC HW Enumerations */
> +
> +enum nic_send_ld_type_e {
> +	NIC_SEND_LD_TYPE_E_LDD = 0x0,
> +	NIC_SEND_LD_TYPE_E_LDT = 0x1,
> +	NIC_SEND_LD_TYPE_E_LDWB = 0x2,
> +	NIC_SEND_LD_TYPE_E_ENUM_LAST = 0x3,
unnecessary assignments

> +};
> +
> +enum ether_type_algorithm {
> +	ETYPE_ALG_NONE = 0x0,
> +	ETYPE_ALG_SKIP = 0x1,
> +	ETYPE_ALG_ENDPARSE = 0x2,
> +	ETYPE_ALG_VLAN = 0x3,
> +	ETYPE_ALG_VLAN_STRIP = 0x4,
unnecessary assignment
> +};
> +
> +enum layer3_type {
> +	L3TYPE_NONE = 0x0,
> +	L3TYPE_GRH = 0x1,
unnecessary assignment
> +	L3TYPE_IPV4 = 0x4,
> +	L3TYPE_IPV4_OPTIONS = 0x5,
> +	L3TYPE_IPV6 = 0x6,
> +	L3TYPE_IPV6_OPTIONS = 0x7,
> +	L3TYPE_ET_STOP = 0xD,
> +	L3TYPE_OTHER = 0xE,
> +};
> +
> +#define NICVF_L3TYPE_OPTIONS_MASK	((uint8_t)1)
> +#define NICVF_L3TYPE_IPVX_MASK		((uint8_t)0x06)
> +
> +enum layer4_type {
> +	L4TYPE_NONE = 0x0,
> +	L4TYPE_IPSEC_ESP = 0x1,
> +	L4TYPE_IPFRAG = 0x2,
> +	L4TYPE_IPCOMP = 0x3,
> +	L4TYPE_TCP = 0x4,
> +	L4TYPE_UDP = 0x5,
> +	L4TYPE_SCTP = 0x6,
> +	L4TYPE_GRE = 0x7,
> +	L4TYPE_ROCE_BTH = 0x8,
unnecessary assignment
> +	L4TYPE_OTHER = 0xE,
> +};
> +
> +/* CPI and RSSI configuration */
> +enum cpi_algorithm_type {
> +	CPI_ALG_NONE = 0x0,
> +	CPI_ALG_VLAN = 0x1,
> +	CPI_ALG_VLAN16 = 0x2,
> +	CPI_ALG_DIFF = 0x3,
unnecessary assignment, more usage below
> +};
> +
> +enum rss_algorithm_type {
> +	RSS_ALG_NONE = 0x00,
> +	RSS_ALG_PORT = 0x01,
> +	RSS_ALG_IP = 0x02,
> +	RSS_ALG_TCP_IP = 0x03,
> +	RSS_ALG_UDP_IP = 0x04,
> +	RSS_ALG_SCTP_IP = 0x05,
> +	RSS_ALG_GRE_IP = 0x06,
> +	RSS_ALG_ROCE = 0x07,
> +};
> +
> +enum rss_hash_cfg {
> +	RSS_HASH_L2ETC = 0x00,
> +	RSS_HASH_IP = 0x01,
> +	RSS_HASH_TCP = 0x02,
> +	RSS_HASH_TCP_SYN_DIS = 0x03,
> +	RSS_HASH_UDP = 0x04,
> +	RSS_HASH_L4ETC = 0x05,
> +	RSS_HASH_ROCE = 0x06,
> +	RSS_L3_BIDI = 0x07,
> +	RSS_L4_BIDI = 0x08,
> +};
> +
> +/* Completion queue entry types */
> +enum cqe_type {
> +	CQE_TYPE_INVALID = 0x0,
> +	CQE_TYPE_RX = 0x2,
> +	CQE_TYPE_RX_SPLIT = 0x3,
> +	CQE_TYPE_RX_TCP = 0x4,
> +	CQE_TYPE_SEND = 0x8,
> +	CQE_TYPE_SEND_PTP = 0x9,
> +};
> +
> +enum cqe_rx_tcp_status {
> +	CQE_RX_STATUS_VALID_TCP_CNXT = 0x00,
> +	CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F,
> +};
> +
> +enum cqe_send_status {
> +	CQE_SEND_STATUS_GOOD = 0x00,
> +	CQE_SEND_STATUS_DESC_FAULT = 0x01,
> +	CQE_SEND_STATUS_HDR_CONS_ERR = 0x11,
> +	CQE_SEND_STATUS_SUBDESC_ERR = 0x12,
> +	CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80,
> +	CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81,
> +	CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82,
> +	CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83,
> +	CQE_SEND_STATUS_LOCK_VIOL = 0x84,
> +	CQE_SEND_STATUS_LOCK_UFLOW = 0x85,
> +	CQE_SEND_STATUS_DATA_FAULT = 0x86,
> +	CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87,
> +	CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88,
> +	CQE_SEND_STATUS_MEM_FAULT = 0x89,
> +	CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A,
> +	CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B,
> +};
> +
> +enum cqe_rx_tcp_end_reason {
> +	CQE_RX_TCP_END_FIN_FLAG_DET = 0,
> +	CQE_RX_TCP_END_INVALID_FLAG = 1,
> +	CQE_RX_TCP_END_TIMEOUT = 2,
> +	CQE_RX_TCP_END_OUT_OF_SEQ = 3,
> +	CQE_RX_TCP_END_PKT_ERR = 4,
> +	CQE_RX_TCP_END_QS_DISABLED = 0x0F,
> +};
> +
> +/* Packet protocol level error enumeration */
> +enum cqe_rx_err_level {
> +	CQE_RX_ERRLVL_RE = 0x0,
> +	CQE_RX_ERRLVL_L2 = 0x1,
> +	CQE_RX_ERRLVL_L3 = 0x2,
> +	CQE_RX_ERRLVL_L4 = 0x3,
> +};
> +
> +/* Packet protocol level error type enumeration */
> +enum cqe_rx_err_opcode {
> +	CQE_RX_ERR_RE_NONE = 0x0,
> +	CQE_RX_ERR_RE_PARTIAL = 0x1,
> +	CQE_RX_ERR_RE_JABBER = 0x2,
> +	CQE_RX_ERR_RE_FCS = 0x7,
> +	CQE_RX_ERR_RE_TERMINATE = 0x9,
> +	CQE_RX_ERR_RE_RX_CTL = 0xb,
> +	CQE_RX_ERR_PREL2_ERR = 0x1f,
> +	CQE_RX_ERR_L2_FRAGMENT = 0x20,
> +	CQE_RX_ERR_L2_OVERRUN = 0x21,
> +	CQE_RX_ERR_L2_PFCS = 0x22,
> +	CQE_RX_ERR_L2_PUNY = 0x23,
> +	CQE_RX_ERR_L2_MAL = 0x24,
> +	CQE_RX_ERR_L2_OVERSIZE = 0x25,
> +	CQE_RX_ERR_L2_UNDERSIZE = 0x26,
> +	CQE_RX_ERR_L2_LENMISM = 0x27,
> +	CQE_RX_ERR_L2_PCLP = 0x28,
> +	CQE_RX_ERR_IP_NOT = 0x41,
> +	CQE_RX_ERR_IP_CHK = 0x42,
> +	CQE_RX_ERR_IP_MAL = 0x43,
> +	CQE_RX_ERR_IP_MALD = 0x44,
> +	CQE_RX_ERR_IP_HOP = 0x45,
> +	CQE_RX_ERR_L3_ICRC = 0x46,
> +	CQE_RX_ERR_L3_PCLP = 0x47,
> +	CQE_RX_ERR_L4_MAL = 0x61,
> +	CQE_RX_ERR_L4_CHK = 0x62,
> +	CQE_RX_ERR_UDP_LEN = 0x63,
> +	CQE_RX_ERR_L4_PORT = 0x64,
> +	CQE_RX_ERR_TCP_FLAG = 0x65,
> +	CQE_RX_ERR_TCP_OFFSET = 0x66,
> +	CQE_RX_ERR_L4_PCLP = 0x67,
> +	CQE_RX_ERR_RBDR_TRUNC = 0x70,
> +};
> +
> +enum send_l4_csum_type {
> +	SEND_L4_CSUM_DISABLE = 0x00,
> +	SEND_L4_CSUM_UDP = 0x01,
> +	SEND_L4_CSUM_TCP = 0x02,
> +};
> +
> +enum send_crc_alg {
> +	SEND_CRCALG_CRC32 = 0x00,
> +	SEND_CRCALG_CRC32C = 0x01,
> +	SEND_CRCALG_ICRC = 0x02,
> +};
> +
> +enum send_load_type {
> +	SEND_LD_TYPE_LDD = 0x00,
> +	SEND_LD_TYPE_LDT = 0x01,
> +	SEND_LD_TYPE_LDWB = 0x02,
> +};
> +
> +enum send_mem_alg_type {
> +	SEND_MEMALG_SET = 0x00,
> +	SEND_MEMALG_ADD = 0x08,
> +	SEND_MEMALG_SUB = 0x09,
> +	SEND_MEMALG_ADDLEN = 0x0A,
> +	SEND_MEMALG_SUBLEN = 0x0B,
> +};
> +
> +enum send_mem_dsz_type {
> +	SEND_MEMDSZ_B64 = 0x00,
> +	SEND_MEMDSZ_B32 = 0x01,
> +	SEND_MEMDSZ_B8 = 0x03,
> +};
> +
> +enum sq_subdesc_type {
> +	SQ_DESC_TYPE_INVALID = 0x00,
> +	SQ_DESC_TYPE_HEADER = 0x01,
> +	SQ_DESC_TYPE_CRC = 0x02,
> +	SQ_DESC_TYPE_IMMEDIATE = 0x03,
> +	SQ_DESC_TYPE_GATHER = 0x04,
> +	SQ_DESC_TYPE_MEMORY = 0x05,
> +};
> +
> +enum l3_type_t {
> +	L3_NONE		= 0x00,
> +	L3_IPV4		= 0x04,
> +	L3_IPV4_OPT	= 0x05,
> +	L3_IPV6		= 0x06,
> +	L3_IPV6_OPT	= 0x07,
> +	L3_ET_STOP	= 0x0D,
> +	L3_OTHER	= 0x0E
> +};
> +
> +enum l4_type_t {
> +	L4_NONE		= 0x00,
> +	L4_IPSEC_ESP	= 0x01,
> +	L4_IPFRAG	= 0x02,
> +	L4_IPCOMP	= 0x03,
> +	L4_TCP		= 0x04,
> +	L4_UDP_PASS1	= 0x05,
> +	L4_GRE		= 0x07,
> +	L4_UDP_PASS2	= 0x08,
> +	L4_UDP_GENEVE	= 0x09,
> +	L4_UDP_VXLAN	= 0x0A,
> +	L4_NVGRE	= 0x0C,
> +	L4_OTHER	= 0x0E
> +};
> +
> +enum vlan_strip {
> +	NO_STRIP = 0x0,
> +	STRIP_FIRST_VLAN = 0x1,
> +	STRIP_SECOND_VLAN = 0x2,
> +	STRIP_RESERV = 0x3
> +};
> +
> +enum rbdr_state {
> +	RBDR_FIFO_STATE_INACTIVE = 0,
> +	RBDR_FIFO_STATE_ACTIVE   = 1,
> +	RBDR_FIFO_STATE_RESET    = 2,
> +	RBDR_FIFO_STATE_FAIL     = 3
> +};
> +
> +enum rq_cache_allocation {
> +	RQ_CACHE_ALLOC_OFF      = 0,
> +	RQ_CACHE_ALLOC_ALL      = 1,
> +	RQ_CACHE_ALLOC_FIRST    = 2,
> +	RQ_CACHE_ALLOC_TWO      = 3,
> +};
> +
> +enum cq_rx_errlvl_e {
> +	CQ_ERRLVL_MAC,
> +	CQ_ERRLVL_L2,
> +	CQ_ERRLVL_L3,
> +	CQ_ERRLVL_L4,
> +};
> +
> +enum cq_rx_errop_e {
> +	CQ_RX_ERROP_RE_NONE = 0x0,
> +	CQ_RX_ERROP_RE_PARTIAL = 0x1,
> +	CQ_RX_ERROP_RE_JABBER = 0x2,
> +	CQ_RX_ERROP_RE_FCS = 0x7,
> +	CQ_RX_ERROP_RE_TERMINATE = 0x9,
> +	CQ_RX_ERROP_RE_RX_CTL = 0xb,
> +	CQ_RX_ERROP_PREL2_ERR = 0x1f,
> +	CQ_RX_ERROP_L2_FRAGMENT = 0x20,
> +	CQ_RX_ERROP_L2_OVERRUN = 0x21,
> +	CQ_RX_ERROP_L2_PFCS = 0x22,
> +	CQ_RX_ERROP_L2_PUNY = 0x23,
> +	CQ_RX_ERROP_L2_MAL = 0x24,
> +	CQ_RX_ERROP_L2_OVERSIZE = 0x25,
> +	CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
> +	CQ_RX_ERROP_L2_LENMISM = 0x27,
> +	CQ_RX_ERROP_L2_PCLP = 0x28,
> +	CQ_RX_ERROP_IP_NOT = 0x41,
> +	CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
> +	CQ_RX_ERROP_IP_MAL = 0x43,
> +	CQ_RX_ERROP_IP_MALD = 0x44,
> +	CQ_RX_ERROP_IP_HOP = 0x45,
> +	CQ_RX_ERROP_L3_ICRC = 0x46,
> +	CQ_RX_ERROP_L3_PCLP = 0x47,
> +	CQ_RX_ERROP_L4_MAL = 0x61,
> +	CQ_RX_ERROP_L4_CHK = 0x62,
> +	CQ_RX_ERROP_UDP_LEN = 0x63,
> +	CQ_RX_ERROP_L4_PORT = 0x64,
> +	CQ_RX_ERROP_TCP_FLAG = 0x65,
> +	CQ_RX_ERROP_TCP_OFFSET = 0x66,
> +	CQ_RX_ERROP_L4_PCLP = 0x67,
> +	CQ_RX_ERROP_RBDR_TRUNC = 0x70,
> +};
> +
> +enum cq_tx_errop_e {
> +	CQ_TX_ERROP_GOOD = 0x0,
> +	CQ_TX_ERROP_DESC_FAULT = 0x10,
> +	CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
> +	CQ_TX_ERROP_SUBDC_ERR = 0x12,
> +	CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
> +	CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
> +	CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
> +	CQ_TX_ERROP_LOCK_VIOL = 0x83,
> +	CQ_TX_ERROP_DATA_FAULT = 0x84,
> +	CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
> +	CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
> +	CQ_TX_ERROP_MEM_FAULT = 0x87,
> +	CQ_TX_ERROP_CK_OVERLAP = 0x88,
> +	CQ_TX_ERROP_CK_OFLOW = 0x89,
> +	CQ_TX_ERROP_ENUM_LAST = 0x8a,
> +};
> +
> +enum rq_sq_stats_reg_offset {
> +	RQ_SQ_STATS_OCTS = 0x0,
> +	RQ_SQ_STATS_PKTS = 0x1,
> +};
> +
> +enum nic_stat_vnic_rx_e {
> +	RX_OCTS = 0,
> +	RX_UCAST,
> +	RX_BCAST,
> +	RX_MCAST,
> +	RX_RED,
> +	RX_RED_OCTS,
> +	RX_ORUN,
> +	RX_ORUN_OCTS,
> +	RX_FCS,
> +	RX_L2ERR,
> +	RX_DRP_BCAST,
> +	RX_DRP_MCAST,
> +	RX_DRP_L3BCAST,
> +	RX_DRP_L3MCAST,
> +};
> +
> +enum nic_stat_vnic_tx_e {
> +	TX_OCTS = 0,
> +	TX_UCAST,
> +	TX_BCAST,
> +	TX_MCAST,
> +	TX_DROP,
> +};
> +
> +#define NICVF_STATIC_ASSERT(s) _Static_assert(s, #s)
> +
> +typedef uint64_t nicvf_phys_addr_t;
> +
> +#ifndef __BYTE_ORDER__
> +#error __BYTE_ORDER__ not defined
> +#endif
> +
> +/* vNIC HW Structures */
> +
> +#define NICVF_CQE_RBPTR_WORD         6
> +#define NICVF_CQE_RX2_RBPTR_WORD     7
> +
> +typedef union {
> +	uint64_t u64;
> +	struct {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +		uint64_t cqe_type:4;
> +		uint64_t stdn_fault:1;
> +		uint64_t rsvd0:1;
> +		uint64_t rq_qs:7;
> +		uint64_t rq_idx:3;
> +		uint64_t rsvd1:12;
> +		uint64_t rss_alg:4;
> +		uint64_t rsvd2:4;
> +		uint64_t rb_cnt:4;
> +		uint64_t vlan_found:1;
> +		uint64_t vlan_stripped:1;
> +		uint64_t vlan2_found:1;
> +		uint64_t vlan2_stripped:1;
> +		uint64_t l4_type:4;
> +		uint64_t l3_type:4;
> +		uint64_t l2_present:1;
> +		uint64_t err_level:3;
> +		uint64_t err_opcode:8;
> +#else
> +		uint64_t err_opcode:8;
> +		uint64_t err_level:3;
> +		uint64_t l2_present:1;
> +		uint64_t l3_type:4;
> +		uint64_t l4_type:4;
> +		uint64_t vlan2_stripped:1;
> +		uint64_t vlan2_found:1;
> +		uint64_t vlan_stripped:1;
> +		uint64_t vlan_found:1;
> +		uint64_t rb_cnt:4;
> +		uint64_t rsvd2:4;
> +		uint64_t rss_alg:4;
> +		uint64_t rsvd1:12;
> +		uint64_t rq_idx:3;
> +		uint64_t rq_qs:7;
> +		uint64_t rsvd0:1;
> +		uint64_t stdn_fault:1;
> +		uint64_t cqe_type:4;
> +#endif
> +	};
> +} cqe_rx_word0_t;
> +
> +typedef union {
> +	uint64_t u64;
> +	struct {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +		uint64_t pkt_len:16;
> +		uint64_t l2_ptr:8;
> +		uint64_t l3_ptr:8;
> +		uint64_t l4_ptr:8;
> +		uint64_t cq_pkt_len:8;
> +		uint64_t align_pad:3;
> +		uint64_t rsvd3:1;
> +		uint64_t chan:12;
> +#else
> +		uint64_t chan:12;
> +		uint64_t rsvd3:1;
> +		uint64_t align_pad:3;
> +		uint64_t cq_pkt_len:8;
> +		uint64_t l4_ptr:8;
> +		uint64_t l3_ptr:8;
> +		uint64_t l2_ptr:8;
> +		uint64_t pkt_len:16;
> +#endif
> +	};
> +} cqe_rx_word1_t;
> +
> +typedef union {
> +	uint64_t u64;
> +	struct {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +		uint64_t rss_tag:32;
> +		uint64_t vlan_tci:16;
> +		uint64_t vlan_ptr:8;
> +		uint64_t vlan2_ptr:8;
> +#else
> +		uint64_t vlan2_ptr:8;
> +		uint64_t vlan_ptr:8;
> +		uint64_t vlan_tci:16;
> +		uint64_t rss_tag:32;
> +#endif
> +	};
> +} cqe_rx_word2_t;
> +
> +typedef union {
> +	uint64_t u64;
> +	struct {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +		uint16_t rb3_sz;
> +		uint16_t rb2_sz;
> +		uint16_t rb1_sz;
> +		uint16_t rb0_sz;
> +#else
> +		uint16_t rb0_sz;
> +		uint16_t rb1_sz;
> +		uint16_t rb2_sz;
> +		uint16_t rb3_sz;
> +#endif
> +	};
> +} cqe_rx_word3_t;
> +
> +typedef union {
> +	uint64_t u64;
> +	struct {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +		uint16_t rb7_sz;
> +		uint16_t rb6_sz;
> +		uint16_t rb5_sz;
> +		uint16_t rb4_sz;
> +#else
> +		uint16_t rb4_sz;
> +		uint16_t rb5_sz;
> +		uint16_t rb6_sz;
> +		uint16_t rb7_sz;
> +#endif
> +	};
> +} cqe_rx_word4_t;
> +
> +typedef union {
> +	uint64_t u64;
> +	struct {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +		uint16_t rb11_sz;
> +		uint16_t rb10_sz;
> +		uint16_t rb9_sz;
> +		uint16_t rb8_sz;
> +#else
> +		uint16_t rb8_sz;
> +		uint16_t rb9_sz;
> +		uint16_t rb10_sz;
> +		uint16_t rb11_sz;
> +#endif
> +	};
> +} cqe_rx_word5_t;
> +
> +typedef union {
> +	uint64_t u64;
> +	struct {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +		uint64_t vlan_found:1;
> +		uint64_t vlan_stripped:1;
> +		uint64_t vlan2_found:1;
> +		uint64_t vlan2_stripped:1;
> +		uint64_t rsvd2:3;
> +		uint64_t inner_l2:1;
> +		uint64_t inner_l4type:4;
> +		uint64_t inner_l3type:4;
> +		uint64_t vlan_ptr:8;
> +		uint64_t vlan2_ptr:8;
> +		uint64_t rsvd1:8;
> +		uint64_t rsvd0:8;
> +		uint64_t inner_l3ptr:8;
> +		uint64_t inner_l4ptr:8;
> +#else
> +		uint64_t inner_l4ptr:8;
> +		uint64_t inner_l3ptr:8;
> +		uint64_t rsvd0:8;
> +		uint64_t rsvd1:8;
> +		uint64_t vlan2_ptr:8;
> +		uint64_t vlan_ptr:8;
> +		uint64_t inner_l3type:4;
> +		uint64_t inner_l4type:4;
> +		uint64_t inner_l2:1;
> +		uint64_t rsvd2:3;
> +		uint64_t vlan2_stripped:1;
> +		uint64_t vlan2_found:1;
> +		uint64_t vlan_stripped:1;
> +		uint64_t vlan_found:1;
> +#endif
> +	};
> +} cqe_rx2_word6_t;
> +
> +struct cqe_rx_t {
> +	cqe_rx_word0_t word0;
> +	cqe_rx_word1_t word1;
> +	cqe_rx_word2_t word2;
> +	cqe_rx_word3_t word3;
> +	cqe_rx_word4_t word4;
> +	cqe_rx_word5_t word5;
> +	cqe_rx2_word6_t word6; /* if NIC_PF_RX_CFG[CQE_RX2_ENA] set */
> +};
> +
> +struct cqe_rx_tcp_err_t {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +	uint64_t   cqe_type:4; /* W0 */
> +	uint64_t   rsvd0:60;
> +
> +	uint64_t   rsvd1:4; /* W1 */
> +	uint64_t   partial_first:1;
> +	uint64_t   rsvd2:27;
> +	uint64_t   rbdr_bytes:8;
> +	uint64_t   rsvd3:24;
> +#else
> +	uint64_t   rsvd0:60;
> +	uint64_t   cqe_type:4;
> +
> +	uint64_t   rsvd3:24;
> +	uint64_t   rbdr_bytes:8;
> +	uint64_t   rsvd2:27;
> +	uint64_t   partial_first:1;
> +	uint64_t   rsvd1:4;
> +#endif
> +};
> +
> +struct cqe_rx_tcp_t {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +	uint64_t   cqe_type:4; /* W0 */
> +	uint64_t   rsvd0:52;
> +	uint64_t   cq_tcp_status:8;
> +
> +	uint64_t   rsvd1:32; /* W1 */
> +	uint64_t   tcp_cntx_bytes:8;
> +	uint64_t   rsvd2:8;
> +	uint64_t   tcp_err_bytes:16;
> +#else
> +	uint64_t   cq_tcp_status:8;
> +	uint64_t   rsvd0:52;
> +	uint64_t   cqe_type:4; /* W0 */
> +
> +	uint64_t   tcp_err_bytes:16;
> +	uint64_t   rsvd2:8;
> +	uint64_t   tcp_cntx_bytes:8;
> +	uint64_t   rsvd1:32; /* W1 */
> +#endif
> +};
> +
> +struct cqe_send_t {
> +#if defined(__BIG_ENDIAN_BITFIELD)
> +	uint64_t   cqe_type:4; /* W0 */
> +	uint64_t   rsvd0:4;
> +	uint64_t   sqe_ptr:16;
> +	uint64_t   rsvd1:4;
> +	uint64_t   rsvd2:10;
> +	uint64_t   sq_qs:7;
> +	uint64_t   sq_idx:3;
> +	uint64_t   rsvd3:8;
> +	uint64_t   send_status:8;
> +
> +	uint64_t   ptp_timestamp:64; /* W1 */
> +#elif defined(__LITTLE_ENDIAN_BITFIELD)
> +	uint64_t   send_status:8;
> +	uint64_t   rsvd3:8;
> +	uint64_t   sq_idx:3;
> +	uint64_t   sq_qs:7;
> +	uint64_t   rsvd2:10;
> +	uint64_t   rsvd1:4;
> +	uint64_t   sqe_ptr:16;
> +	uint64_t   rsvd0:4;
> +	uint64_t   cqe_type:4; /* W0 */
> +
> +	uint64_t   ptp_timestamp:64;
> +#endif
> +};
> +
> +struct cq_entry_type_t {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +	uint64_t cqe_type:4;
> +	uint64_t __pad:60;
> +#else
> +	uint64_t __pad:60;
> +	uint64_t cqe_type:4;
> +#endif
> +};
> +
> +union cq_entry_t {
> +	uint64_t u[64];
> +	struct cq_entry_type_t type;
> +	struct cqe_rx_t rx_hdr;
> +	struct cqe_rx_tcp_t rx_tcp_hdr;
> +	struct cqe_rx_tcp_err_t rx_tcp_err_hdr;
> +	struct cqe_send_t cqe_send;
> +};
> +
> +NICVF_STATIC_ASSERT(sizeof(union cq_entry_t) == 512);
> +
> +struct rbdr_entry_t {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +	union {
> +		struct {
> +			uint64_t   rsvd0:15;
> +			uint64_t   buf_addr:42;
> +			uint64_t   cache_align:7;
> +		};
> +		nicvf_phys_addr_t full_addr;
> +	};
> +#else
> +	union {
> +		struct {
> +			uint64_t   cache_align:7;
> +			uint64_t   buf_addr:42;
> +			uint64_t   rsvd0:15;
> +		};
> +		nicvf_phys_addr_t full_addr;
> +	};
> +#endif
> +};
> +
> +NICVF_STATIC_ASSERT(sizeof(struct rbdr_entry_t) == sizeof(uint64_t));
> +
> +/* TCP reassembly context */
> +struct rbe_tcp_cnxt_t {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +	uint64_t   tcp_pkt_cnt:12;
> +	uint64_t   rsvd1:4;
> +	uint64_t   align_hdr_bytes:4;
> +	uint64_t   align_ptr_bytes:4;
> +	uint64_t   ptr_bytes:16;
> +	uint64_t   rsvd2:24;
> +	uint64_t   cqe_type:4;
> +	uint64_t   rsvd0:54;
> +	uint64_t   tcp_end_reason:2;
> +	uint64_t   tcp_status:4;
> +#else
> +	uint64_t   tcp_status:4;
> +	uint64_t   tcp_end_reason:2;
> +	uint64_t   rsvd0:54;
> +	uint64_t   cqe_type:4;
> +	uint64_t   rsvd2:24;
> +	uint64_t   ptr_bytes:16;
> +	uint64_t   align_ptr_bytes:4;
> +	uint64_t   align_hdr_bytes:4;
> +	uint64_t   rsvd1:4;
> +	uint64_t   tcp_pkt_cnt:12;
> +#endif
> +};
> +
> +/* Always Big endian */
> +struct rx_hdr_t {
> +	uint64_t   opaque:32;
> +	uint64_t   rss_flow:8;
> +	uint64_t   skip_length:6;
> +	uint64_t   disable_rss:1;
> +	uint64_t   disable_tcp_reassembly:1;
> +	uint64_t   nodrop:1;
> +	uint64_t   dest_alg:2;
> +	uint64_t   rsvd0:2;
> +	uint64_t   dest_rq:11;
> +};
> +
> +struct sq_crc_subdesc {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +	uint64_t    rsvd1:32;
> +	uint64_t    crc_ival:32;
> +	uint64_t    subdesc_type:4;
> +	uint64_t    crc_alg:2;
> +	uint64_t    rsvd0:10;
> +	uint64_t    crc_insert_pos:16;
> +	uint64_t    hdr_start:16;
> +	uint64_t    crc_len:16;
> +#else
> +	uint64_t    crc_len:16;
> +	uint64_t    hdr_start:16;
> +	uint64_t    crc_insert_pos:16;
> +	uint64_t    rsvd0:10;
> +	uint64_t    crc_alg:2;
> +	uint64_t    subdesc_type:4;
> +	uint64_t    crc_ival:32;
> +	uint64_t    rsvd1:32;
> +#endif
> +};
> +
> +struct sq_gather_subdesc {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +	uint64_t    subdesc_type:4; /* W0 */
> +	uint64_t    ld_type:2;
> +	uint64_t    rsvd0:42;
> +	uint64_t    size:16;
> +
> +	uint64_t    rsvd1:15; /* W1 */
> +	uint64_t    addr:49;
> +#else
> +	uint64_t    size:16;
> +	uint64_t    rsvd0:42;
> +	uint64_t    ld_type:2;
> +	uint64_t    subdesc_type:4; /* W0 */
> +
> +	uint64_t    addr:49;
> +	uint64_t    rsvd1:15; /* W1 */
> +#endif
> +};
> +
> +/* SQ immediate subdescriptor */
> +struct sq_imm_subdesc {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +	uint64_t    subdesc_type:4; /* W0 */
> +	uint64_t    rsvd0:46;
> +	uint64_t    len:14;
> +
> +	uint64_t    data:64; /* W1 */
> +#else
> +	uint64_t    len:14;
> +	uint64_t    rsvd0:46;
> +	uint64_t    subdesc_type:4; /* W0 */
> +
> +	uint64_t    data:64; /* W1 */
> +#endif
> +};
> +
> +struct sq_mem_subdesc {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +	uint64_t    subdesc_type:4; /* W0 */
> +	uint64_t    mem_alg:4;
> +	uint64_t    mem_dsz:2;
> +	uint64_t    wmem:1;
> +	uint64_t    rsvd0:21;
> +	uint64_t    offset:32;
> +
> +	uint64_t    rsvd1:15; /* W1 */
> +	uint64_t    addr:49;
> +#else
> +	uint64_t    offset:32;
> +	uint64_t    rsvd0:21;
> +	uint64_t    wmem:1;
> +	uint64_t    mem_dsz:2;
> +	uint64_t    mem_alg:4;
> +	uint64_t    subdesc_type:4; /* W0 */
> +
> +	uint64_t    addr:49;
> +	uint64_t    rsvd1:15; /* W1 */
> +#endif
> +};
> +
> +struct sq_hdr_subdesc {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +	uint64_t    subdesc_type:4;
> +	uint64_t    tso:1;
> +	uint64_t    post_cqe:1; /* Post CQE on no error also */
> +	uint64_t    dont_send:1;
> +	uint64_t    tstmp:1;
> +	uint64_t    subdesc_cnt:8;
> +	uint64_t    csum_l4:2;
> +	uint64_t    csum_l3:1;
> +	uint64_t    csum_inner_l4:2;
> +	uint64_t    csum_inner_l3:1;
> +	uint64_t    rsvd0:2;
> +	uint64_t    l4_offset:8;
> +	uint64_t    l3_offset:8;
> +	uint64_t    rsvd1:4;
> +	uint64_t    tot_len:20; /* W0 */
> +
> +	uint64_t    rsvd2:24;
> +	uint64_t    inner_l4_offset:8;
> +	uint64_t    inner_l3_offset:8;
> +	uint64_t    tso_start:8;
> +	uint64_t    rsvd3:2;
> +	uint64_t    tso_max_paysize:14; /* W1 */
> +#else
> +	uint64_t    tot_len:20;
> +	uint64_t    rsvd1:4;
> +	uint64_t    l3_offset:8;
> +	uint64_t    l4_offset:8;
> +	uint64_t    rsvd0:2;
> +	uint64_t    csum_inner_l3:1;
> +	uint64_t    csum_inner_l4:2;
> +	uint64_t    csum_l3:1;
> +	uint64_t    csum_l4:2;
> +	uint64_t    subdesc_cnt:8;
> +	uint64_t    tstmp:1;
> +	uint64_t    dont_send:1;
> +	uint64_t    post_cqe:1; /* Post CQE on no error also */
> +	uint64_t    tso:1;
> +	uint64_t    subdesc_type:4; /* W0 */
> +
> +	uint64_t    tso_max_paysize:14;
> +	uint64_t    rsvd3:2;
> +	uint64_t    tso_start:8;
> +	uint64_t    inner_l3_offset:8;
> +	uint64_t    inner_l4_offset:8;
> +	uint64_t    rsvd2:24; /* W1 */
> +#endif
> +};
> +
> +/* Each sq entry is 128 bits wide */
> +union sq_entry_t {
> +	uint64_t buff[2];
> +	struct sq_hdr_subdesc hdr;
> +	struct sq_imm_subdesc imm;
> +	struct sq_gather_subdesc gather;
> +	struct sq_crc_subdesc crc;
> +	struct sq_mem_subdesc mem;
> +};
> +
> +NICVF_STATIC_ASSERT(sizeof(union sq_entry_t) == 16);
> +
> +/* Queue config register formats */
> +struct rq_cfg { union { struct {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +	uint64_t reserved_2_63:62;
> +	uint64_t ena:1;
> +	uint64_t reserved_0:1;
> +#else
> +	uint64_t reserved_0:1;
> +	uint64_t ena:1;
> +	uint64_t reserved_2_63:62;
> +#endif
> +	};
> +	uint64_t value;
> +}; };
> +
> +struct cq_cfg { union { struct {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +	uint64_t reserved_43_63:21;
> +	uint64_t ena:1;
> +	uint64_t reset:1;
> +	uint64_t caching:1;
> +	uint64_t reserved_35_39:5;
> +	uint64_t qsize:3;
> +	uint64_t reserved_25_31:7;
> +	uint64_t avg_con:9;
> +	uint64_t reserved_0_15:16;
> +#else
> +	uint64_t reserved_0_15:16;
> +	uint64_t avg_con:9;
> +	uint64_t reserved_25_31:7;
> +	uint64_t qsize:3;
> +	uint64_t reserved_35_39:5;
> +	uint64_t caching:1;
> +	uint64_t reset:1;
> +	uint64_t ena:1;
> +	uint64_t reserved_43_63:21;
> +#endif
> +	};
> +	uint64_t value;
> +}; };
> +
> +struct sq_cfg { union { struct {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +	uint64_t reserved_20_63:44;
> +	uint64_t ena:1;
> +	uint64_t reserved_18_18:1;
> +	uint64_t reset:1;
> +	uint64_t ldwb:1;
> +	uint64_t reserved_11_15:5;
> +	uint64_t qsize:3;
> +	uint64_t reserved_3_7:5;
> +	uint64_t tstmp_bgx_intf:3;
> +#else
> +	uint64_t tstmp_bgx_intf:3;
> +	uint64_t reserved_3_7:5;
> +	uint64_t qsize:3;
> +	uint64_t reserved_11_15:5;
> +	uint64_t ldwb:1;
> +	uint64_t reset:1;
> +	uint64_t reserved_18_18:1;
> +	uint64_t ena:1;
> +	uint64_t reserved_20_63:44;
> +#endif
> +	};
> +	uint64_t value;
> +}; };
> +
> +struct rbdr_cfg { union { struct {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +	uint64_t reserved_45_63:19;
> +	uint64_t ena:1;
> +	uint64_t reset:1;
> +	uint64_t ldwb:1;
> +	uint64_t reserved_36_41:6;
> +	uint64_t qsize:4;
> +	uint64_t reserved_25_31:7;
> +	uint64_t avg_con:9;
> +	uint64_t reserved_12_15:4;
> +	uint64_t lines:12;
> +#else
> +	uint64_t lines:12;
> +	uint64_t reserved_12_15:4;
> +	uint64_t avg_con:9;
> +	uint64_t reserved_25_31:7;
> +	uint64_t qsize:4;
> +	uint64_t reserved_36_41:6;
> +	uint64_t ldwb:1;
> +	uint64_t reset:1;
> +	uint64_t ena: 1;
> +	uint64_t reserved_45_63:19;
> +#endif
> +	};
> +	uint64_t value;
> +}; };
> +
> +struct pf_qs_cfg { union { struct {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +	uint64_t reserved_32_63:32;
> +	uint64_t ena:1;
> +	uint64_t reserved_27_30:4;
> +	uint64_t sq_ins_ena:1;
> +	uint64_t sq_ins_pos:6;
> +	uint64_t lock_ena:1;
> +	uint64_t lock_viol_cqe_ena:1;
> +	uint64_t send_tstmp_ena:1;
> +	uint64_t be:1;
> +	uint64_t reserved_7_15:9;
> +	uint64_t vnic:7;
> +#else
> +	uint64_t vnic:7;
> +	uint64_t reserved_7_15:9;
> +	uint64_t be:1;
> +	uint64_t send_tstmp_ena:1;
> +	uint64_t lock_viol_cqe_ena:1;
> +	uint64_t lock_ena:1;
> +	uint64_t sq_ins_pos:6;
> +	uint64_t sq_ins_ena:1;
> +	uint64_t reserved_27_30:4;
> +	uint64_t ena:1;
> +	uint64_t reserved_32_63:32;
> +#endif
> +	};
> +	uint64_t value;
> +}; };
> +
> +struct pf_rq_cfg { union { struct {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +	uint64_t reserverd1:1;
> +	uint64_t reserverd0:34;
> +	uint64_t strip_pre_l2:1;
> +	uint64_t caching:2;
> +	uint64_t cq_qs:7;
> +	uint64_t cq_idx:3;
> +	uint64_t rbdr_cont_qs:7;
> +	uint64_t rbdr_cont_idx:1;
> +	uint64_t rbdr_strt_qs:7;
> +	uint64_t rbdr_strt_idx:1;
> +#else
> +	uint64_t rbdr_strt_idx:1;
> +	uint64_t rbdr_strt_qs:7;
> +	uint64_t rbdr_cont_idx:1;
> +	uint64_t rbdr_cont_qs:7;
> +	uint64_t cq_idx:3;
> +	uint64_t cq_qs:7;
> +	uint64_t caching:2;
> +	uint64_t strip_pre_l2:1;
> +	uint64_t reserverd0:34;
> +	uint64_t reserverd1:1;
> +#endif
> +	};
> +	uint64_t value;
> +}; };
> +
> +struct pf_rq_drop_cfg { union { struct {
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +	uint64_t rbdr_red:1;
> +	uint64_t cq_red:1;
> +	uint64_t reserved3:14;
> +	uint64_t rbdr_pass:8;
> +	uint64_t rbdr_drop:8;
> +	uint64_t reserved2:8;
> +	uint64_t cq_pass:8;
> +	uint64_t cq_drop:8;
> +	uint64_t reserved1:8;
> +#else
> +	uint64_t reserved1:8;
> +	uint64_t cq_drop:8;
> +	uint64_t cq_pass:8;
> +	uint64_t reserved2:8;
> +	uint64_t rbdr_drop:8;
> +	uint64_t rbdr_pass:8;
> +	uint64_t reserved3:14;
> +	uint64_t cq_red:1;
> +	uint64_t rbdr_red:1;
> +#endif
> +	};
> +	uint64_t value;
> +}; };
> +
> +#endif /* _THUNDERX_NICVF_HW_DEFS_H */
> diff --git a/drivers/net/thunderx/base/nicvf_mbox.c b/drivers/net/thunderx/base/nicvf_mbox.c
> new file mode 100644
> index 0000000..715c7c3
> --- /dev/null
> +++ b/drivers/net/thunderx/base/nicvf_mbox.c
> @@ -0,0 +1,416 @@
> +/*
> + *   BSD LICENSE
> + *
> + *   Copyright (C) Cavium networks Ltd. 2016.
> + *
> + *   Redistribution and use in source and binary forms, with or without
> + *   modification, are permitted provided that the following conditions
> + *   are met:
> + *
> + *     * Redistributions of source code must retain the above copyright
> + *       notice, this list of conditions and the following disclaimer.
> + *     * Redistributions in binary form must reproduce the above copyright
> + *       notice, this list of conditions and the following disclaimer in
> + *       the documentation and/or other materials provided with the
> + *       distribution.
> + *     * Neither the name of Cavium networks nor the names of its
> + *       contributors may be used to endorse or promote products derived
> + *       from this software without specific prior written permission.
> + *
> + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
> + */
> +
> +#include <assert.h>
> +#include <unistd.h>
> +#include <stdio.h>
> +#include <stdlib.h>
> +
> +#include "nicvf_plat.h"
> +
> +static const char *mbox_message[NIC_MBOX_MSG_MAX] =  {
> +	[NIC_MBOX_MSG_INVALID]            = "NIC_MBOX_MSG_INVALID",
> +	[NIC_MBOX_MSG_READY]              = "NIC_MBOX_MSG_READY",
> +	[NIC_MBOX_MSG_ACK]                = "NIC_MBOX_MSG_ACK",
> +	[NIC_MBOX_MSG_NACK]               = "NIC_MBOX_MSG_ACK",
> +	[NIC_MBOX_MSG_QS_CFG]             = "NIC_MBOX_MSG_QS_CFG",
> +	[NIC_MBOX_MSG_RQ_CFG]             = "NIC_MBOX_MSG_RQ_CFG",
> +	[NIC_MBOX_MSG_SQ_CFG]             = "NIC_MBOX_MSG_SQ_CFG",
> +	[NIC_MBOX_MSG_RQ_DROP_CFG]        = "NIC_MBOX_MSG_RQ_DROP_CFG",
> +	[NIC_MBOX_MSG_SET_MAC]            = "NIC_MBOX_MSG_SET_MAC",
> +	[NIC_MBOX_MSG_SET_MAX_FRS]        = "NIC_MBOX_MSG_SET_MAX_FRS",
> +	[NIC_MBOX_MSG_CPI_CFG]            = "NIC_MBOX_MSG_CPI_CFG",
> +	[NIC_MBOX_MSG_RSS_SIZE]           = "NIC_MBOX_MSG_RSS_SIZE",
> +	[NIC_MBOX_MSG_RSS_CFG]            = "NIC_MBOX_MSG_RSS_CFG",
> +	[NIC_MBOX_MSG_RSS_CFG_CONT]       = "NIC_MBOX_MSG_RSS_CFG_CONT",
> +	[NIC_MBOX_MSG_RQ_BP_CFG]          = "NIC_MBOX_MSG_RQ_BP_CFG",
> +	[NIC_MBOX_MSG_RQ_SW_SYNC]         = "NIC_MBOX_MSG_RQ_SW_SYNC",
> +	[NIC_MBOX_MSG_BGX_LINK_CHANGE]    = "NIC_MBOX_MSG_BGX_LINK_CHANGE",
> +	[NIC_MBOX_MSG_ALLOC_SQS]          = "NIC_MBOX_MSG_ALLOC_SQS",
> +	[NIC_MBOX_MSG_LOOPBACK]           = "NIC_MBOX_MSG_LOOPBACK",
> +	[NIC_MBOX_MSG_RESET_STAT_COUNTER] = "NIC_MBOX_MSG_RESET_STAT_COUNTER",
> +	[NIC_MBOX_MSG_CFG_DONE]           = "NIC_MBOX_MSG_CFG_DONE",
> +	[NIC_MBOX_MSG_SHUTDOWN]           = "NIC_MBOX_MSG_SHUTDOWN",
> +};
> +
> +static inline const char *
> +nicvf_mbox_msg_str(int msg)
> +{
> +	assert(msg >= 0 && msg < NIC_MBOX_MSG_MAX);
> +	/* undefined messages */
> +	if (mbox_message[msg] == NULL)
> +		msg = 0;
> +	return mbox_message[msg];
> +}
> +
> +static inline void
> +nicvf_mbox_send_msg_to_pf_raw(struct nicvf *nic, struct nic_mbx *mbx)
> +{
> +	uint64_t *mbx_data;
> +	uint64_t mbx_addr;
> +	int i;
> +
> +	mbx_addr = NIC_VF_PF_MAILBOX_0_1;
> +	mbx_data = (uint64_t *)mbx;
> +	for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
> +		nicvf_reg_write(nic, mbx_addr, *mbx_data);
> +		mbx_data++;
> +		mbx_addr += sizeof(uint64_t);
> +	}
> +	nicvf_mbox_log("msg sent %s (VF%d)",
> +			nicvf_mbox_msg_str(mbx->msg.msg), nic->vf_id);
> +}
> +
> +static inline void
> +nicvf_mbox_send_async_msg_to_pf(struct nicvf *nic, struct nic_mbx *mbx)
> +{
> +	nicvf_mbox_send_msg_to_pf_raw(nic, mbx);
> +	/* Messages without ack are racy!*/
> +	nicvf_delay_us(1000);
hardcoded delay time, more blow

> +}
> +
> +static inline int
> +nicvf_mbox_send_msg_to_pf(struct nicvf *nic, struct nic_mbx *mbx)
> +{
> +	long timeout;
> +	long sleep = 10;
> +	int i, retry = 5;
> +
> +	for (i = 0; i < retry; i++) {
> +		nic->pf_acked = false;
> +		nic->pf_nacked = false;
> +		nicvf_smp_wmb();
> +
> +		nicvf_mbox_send_msg_to_pf_raw(nic, mbx);
> +		/* Give some time to get PF response */
> +		nicvf_delay_us(1000);
> +		timeout = NIC_MBOX_MSG_TIMEOUT;
> +		while (timeout > 0) {
> +			/* Periodic poll happens from nicvf_interrupt() */
> +			nicvf_smp_rmb();
> +
> +			if (nic->pf_nacked)
> +				return -EINVAL;
> +			if (nic->pf_acked)
> +				return 0;
> +
> +			nicvf_delay_us(1000);
> +			timeout -= sleep;
> +		}
> +		nicvf_log_error("PF didn't ack to msg 0x%02x %s VF%d (%d/%d)",
> +				mbx->msg.msg, nicvf_mbox_msg_str(mbx->msg.msg),
> +				nic->vf_id, i, retry);
> +	}
> +	return -EBUSY;
> +}
> +
> +
> +int
> +nicvf_handle_mbx_intr(struct nicvf *nic)
> +{
> +	struct nic_mbx mbx;
> +	uint64_t *mbx_data = (uint64_t *)&mbx;
> +	uint64_t mbx_addr = NIC_VF_PF_MAILBOX_0_1;
> +	size_t i;
> +
> +	for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
> +		*mbx_data = nicvf_reg_read(nic, mbx_addr);
> +		mbx_data++;
> +		mbx_addr += sizeof(uint64_t);
> +	}
> +
> +	/* Overwrite the message so we won't receive it again */
> +	nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1, 0x0);
> +
> +	nicvf_mbox_log("msg received id=0x%hhx %s (VF%d)", mbx.msg.msg,
> +			nicvf_mbox_msg_str(mbx.msg.msg), nic->vf_id);
> +
> +	switch (mbx.msg.msg) {
> +	case NIC_MBOX_MSG_READY:
> +		nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
> +		nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
> +		nic->node = mbx.nic_cfg.node_id;
> +		nic->sqs_mode = mbx.nic_cfg.sqs_mode;
> +		nic->loopback_supported = mbx.nic_cfg.loopback_supported;
> +		ether_addr_copy((struct ether_addr *)mbx.nic_cfg.mac_addr,
> +				(struct ether_addr *)nic->mac_addr);
> +		nic->pf_acked = true;
> +		break;
> +	case NIC_MBOX_MSG_ACK:
> +		nic->pf_acked = true;
> +		break;
> +	case NIC_MBOX_MSG_NACK:
> +		nic->pf_nacked = true;
> +		break;
> +	case NIC_MBOX_MSG_RSS_SIZE:
> +		nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
> +		nic->pf_acked = true;
> +		break;
> +	case NIC_MBOX_MSG_BGX_LINK_CHANGE:
> +		nic->link_up = mbx.link_status.link_up;
> +		nic->duplex = mbx.link_status.duplex;
> +		nic->speed = mbx.link_status.speed;
> +		nic->pf_acked = true;
> +		break;
> +	default:
> +		nicvf_log_error("Invalid message from PF, msg_id=0x%hhx %s",
> +				mbx.msg.msg, nicvf_mbox_msg_str(mbx.msg.msg));
> +		break;
> +	}
> +	nicvf_smp_wmb();
> +
> +	return mbx.msg.msg;
> +}
> +
> +/*
> + * Checks if VF is able to communicate with PF
> + * and also gets the VNIC number this VF is associated to.
> + */
> +int
> +nicvf_mbox_check_pf_ready(struct nicvf *nic)
> +{
> +	struct nic_mbx mbx = { .msg = {.msg = NIC_MBOX_MSG_READY} };
> +
> +	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
> +}
> +
> +int
> +nicvf_mbox_set_mac_addr(struct nicvf *nic,
> +			const uint8_t mac[NICVF_MAC_ADDR_SIZE])
> +{
> +	struct nic_mbx mbx = { .msg = {0} };
> +	int i;
> +
> +	mbx.msg.msg = NIC_MBOX_MSG_SET_MAC;
> +	mbx.mac.vf_id = nic->vf_id;
> +	for (i = 0; i < 6; i++)
> +		mbx.mac.mac_addr[i] = mac[i];
> +
> +	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
> +}
> +
> +int
> +nicvf_mbox_config_cpi(struct nicvf *nic, uint32_t qcnt)
> +{
> +	struct nic_mbx mbx = { .msg = { 0 } };
> +
> +	mbx.msg.msg = NIC_MBOX_MSG_CPI_CFG;
> +	mbx.cpi_cfg.vf_id = nic->vf_id;
> +	mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
> +	mbx.cpi_cfg.rq_cnt = qcnt;
> +
> +	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
> +}
> +
> +int
> +nicvf_mbox_get_rss_size(struct nicvf *nic)
> +{
> +	struct nic_mbx mbx = { .msg = { 0 } };
> +
> +	mbx.msg.msg = NIC_MBOX_MSG_RSS_SIZE;
> +	mbx.rss_size.vf_id = nic->vf_id;
> +
> +	/* Result will be stored in nic->rss_info.rss_size */
> +	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
> +}
> +
> +int
> +nicvf_mbox_config_rss(struct nicvf *nic)
> +{
> +	struct nic_mbx mbx = { .msg = { 0 } };
> +	struct nicvf_rss_reta_info *rss = &nic->rss_info;
> +	size_t tot_len = rss->rss_size;
> +	size_t cur_len;
> +	size_t cur_idx = 0;
> +	size_t i;
> +
> +	mbx.rss_cfg.vf_id = nic->vf_id;
> +	mbx.rss_cfg.hash_bits = rss->hash_bits;
> +	mbx.rss_cfg.tbl_len = 0;
> +	mbx.rss_cfg.tbl_offset = 0;
> +
> +	while (cur_idx < tot_len) {
> +		cur_len = nicvf_min(tot_len - cur_idx,
> +				(size_t)RSS_IND_TBL_LEN_PER_MBX_MSG);
> +		mbx.msg.msg = (cur_idx > 0) ?
> +			NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
> +		mbx.rss_cfg.tbl_offset = cur_idx;
> +		mbx.rss_cfg.tbl_len = cur_len;
> +		for (i = 0; i < cur_len; i++)
> +			mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[cur_idx++];
> +
> +		if (nicvf_mbox_send_msg_to_pf(nic, &mbx))
> +			return NICVF_ERR_RSS_TBL_UPDATE;
> +	}
> +
> +	return 0;
> +}
> +
> +int
> +nicvf_mbox_rq_config(struct nicvf *nic, uint16_t qidx,
> +		     struct pf_rq_cfg *pf_rq_cfg)
> +{
> +	struct nic_mbx mbx = { .msg = { 0 } };
> +
> +	mbx.msg.msg = NIC_MBOX_MSG_RQ_CFG;
> +	mbx.rq.qs_num = nic->vf_id;
> +	mbx.rq.rq_num = qidx;
> +	mbx.rq.cfg = pf_rq_cfg->value;
> +	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
> +}
> +
> +int
> +nicvf_mbox_sq_config(struct nicvf *nic, uint16_t qidx)
> +{
> +	struct nic_mbx mbx = { .msg = { 0 } };
> +
> +	mbx.msg.msg = NIC_MBOX_MSG_SQ_CFG;
> +	mbx.sq.qs_num = nic->vf_id;
> +	mbx.sq.sq_num = qidx;
> +	mbx.sq.sqs_mode = nic->sqs_mode;
> +	mbx.sq.cfg = (nic->vf_id << 3) | qidx;
> +	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
> +}
> +
> +int
> +nicvf_mbox_qset_config(struct nicvf *nic, struct pf_qs_cfg *qs_cfg)
> +{
> +	struct nic_mbx mbx = { .msg = { 0 } };
> +
> +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +	qs_cfg->be = 1;
> +#endif
> +	/* Send a mailbox msg to PF to config Qset */
> +	mbx.msg.msg = NIC_MBOX_MSG_QS_CFG;
> +	mbx.qs.num = nic->vf_id;
> +	mbx.qs.cfg = qs_cfg->value;
> +	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
> +}
> +
> +int
> +nicvf_mbox_rq_drop_config(struct nicvf *nic, uint16_t qidx, bool enable)
> +{
> +	struct nic_mbx mbx = { .msg = { 0 } };
> +	struct pf_rq_drop_cfg *drop_cfg;
> +
> +	/* Enable CQ drop to reserve sufficient CQEs for all tx packets */
> +	mbx.msg.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
> +	mbx.rq.qs_num = nic->vf_id;
> +	mbx.rq.rq_num = qidx;
> +	drop_cfg = (struct pf_rq_drop_cfg *)&mbx.rq.cfg;
> +	drop_cfg->value = 0;
> +	if (enable) {
> +		drop_cfg->cq_red = 1;
> +		drop_cfg->cq_drop = 2;
> +	}
> +	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
> +}
> +
> +int
> +nicvf_mbox_update_hw_max_frs(struct nicvf *nic, uint16_t mtu)
> +{
> +	struct nic_mbx mbx = { .msg = { 0 } };
> +
> +	mbx.msg.msg = NIC_MBOX_MSG_SET_MAX_FRS;
> +	mbx.frs.max_frs = mtu;
> +	mbx.frs.vf_id = nic->vf_id;
> +	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
> +}
> +
> +int
> +nicvf_mbox_rq_sync(struct nicvf *nic)
> +{
> +	struct nic_mbx mbx = { .msg = { 0 } };
> +
> +	/* Make sure all packets in the pipeline are written back into mem */
> +	mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
> +	mbx.rq.cfg = 0;
> +	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
> +}
> +
> +int
> +nicvf_mbox_rq_bp_config(struct nicvf *nic, uint16_t qidx, bool enable)
> +{
> +	struct nic_mbx mbx = { .msg = { 0 } };
> +
> +	mbx.msg.msg = NIC_MBOX_MSG_RQ_BP_CFG;
> +	mbx.rq.qs_num = nic->vf_id;
> +	mbx.rq.rq_num = qidx;
> +	mbx.rq.cfg = 0;
> +	if (enable)
> +		mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (nic->vf_id << 0);
> +	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
> +}
> +
> +int
> +nicvf_mbox_loopback_config(struct nicvf *nic, bool enable)
> +{
> +	struct nic_mbx mbx = { .msg = { 0 } };
> +
> +	mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK;
> +	mbx.lbk.vf_id = nic->vf_id;
> +	mbx.lbk.enable = enable;
> +	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
> +}
> +
> +int
> +nicvf_mbox_reset_stat_counters(struct nicvf *nic, uint16_t rx_stat_mask,
> +			       uint8_t tx_stat_mask, uint16_t rq_stat_mask,
> +			       uint16_t sq_stat_mask)
> +{
> +	struct nic_mbx mbx = { .msg = { 0 } };
> +
> +	mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
> +	mbx.reset_stat.rx_stat_mask = rx_stat_mask;
> +	mbx.reset_stat.tx_stat_mask = tx_stat_mask;
> +	mbx.reset_stat.rq_stat_mask = rq_stat_mask;
> +	mbx.reset_stat.sq_stat_mask = sq_stat_mask;
> +	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
> +}
> +
> +void
> +nicvf_mbox_shutdown(struct nicvf *nic)
> +{
> +	struct nic_mbx mbx = { .msg = { 0 } };
> +
> +	mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
> +	nicvf_mbox_send_msg_to_pf(nic, &mbx);
> +}
> +
> +void
> +nicvf_mbox_cfg_done(struct nicvf *nic)
> +{
> +	struct nic_mbx mbx = { .msg = { 0 } };
> +
> +	mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
> +	nicvf_mbox_send_async_msg_to_pf(nic, &mbx);
> +}
> diff --git a/drivers/net/thunderx/base/nicvf_mbox.h b/drivers/net/thunderx/base/nicvf_mbox.h
> new file mode 100644
> index 0000000..7c0c6a9
> --- /dev/null
> +++ b/drivers/net/thunderx/base/nicvf_mbox.h
> @@ -0,0 +1,232 @@
> +/*
> + *   BSD LICENSE
> + *
> + *   Copyright (C) Cavium networks Ltd. 2016.
> + *
> + *   Redistribution and use in source and binary forms, with or without
> + *   modification, are permitted provided that the following conditions
> + *   are met:
> + *
> + *     * Redistributions of source code must retain the above copyright
> + *       notice, this list of conditions and the following disclaimer.
> + *     * Redistributions in binary form must reproduce the above copyright
> + *       notice, this list of conditions and the following disclaimer in
> + *       the documentation and/or other materials provided with the
> + *       distribution.
> + *     * Neither the name of Cavium networks nor the names of its
> + *       contributors may be used to endorse or promote products derived
> + *       from this software without specific prior written permission.
> + *
> + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
> + */
> +
> +#ifndef __THUNDERX_NICVF_MBOX__
> +#define __THUNDERX_NICVF_MBOX__
> +
> +#include <stdint.h>
> +
> +#include "nicvf_plat.h"
> +
> +/* PF <--> VF Mailbox communication
> + * Two 64bit registers are shared between PF and VF for each VF
> + * Writing into second register means end of message.
> + */
> +
> +/* PF <--> VF mailbox communication */
> +#define	NIC_PF_VF_MAILBOX_SIZE		2
> +#define	NIC_MBOX_MSG_TIMEOUT		2000	/* ms */
> +
> +/* Mailbox message types */
> +#define	NIC_MBOX_MSG_INVALID		0x00	/* Invalid message */
> +#define	NIC_MBOX_MSG_READY		0x01	/* Is PF ready to rcv msgs */
> +#define	NIC_MBOX_MSG_ACK		0x02	/* ACK the message received */
> +#define	NIC_MBOX_MSG_NACK		0x03	/* NACK the message received */
> +#define	NIC_MBOX_MSG_QS_CFG		0x04	/* Configure Qset */
> +#define	NIC_MBOX_MSG_RQ_CFG		0x05	/* Configure receive queue */
> +#define	NIC_MBOX_MSG_SQ_CFG		0x06	/* Configure Send queue */
> +#define	NIC_MBOX_MSG_RQ_DROP_CFG	0x07	/* Configure receive queue */
> +#define	NIC_MBOX_MSG_SET_MAC		0x08	/* Add MAC ID to DMAC filter */
> +#define	NIC_MBOX_MSG_SET_MAX_FRS	0x09	/* Set max frame size */
> +#define	NIC_MBOX_MSG_CPI_CFG		0x0A	/* Config CPI, RSSI */
> +#define	NIC_MBOX_MSG_RSS_SIZE		0x0B	/* Get RSS indir_tbl size */
> +#define	NIC_MBOX_MSG_RSS_CFG		0x0C	/* Config RSS table */
> +#define	NIC_MBOX_MSG_RSS_CFG_CONT	0x0D	/* RSS config continuation */
> +#define	NIC_MBOX_MSG_RQ_BP_CFG		0x0E	/* RQ backpressure config */
> +#define	NIC_MBOX_MSG_RQ_SW_SYNC		0x0F	/* Flush inflight pkts to RQ */
> +#define	NIC_MBOX_MSG_BGX_LINK_CHANGE	0x11	/* BGX:LMAC link status */
> +#define	NIC_MBOX_MSG_ALLOC_SQS		0x12	/* Allocate secondary Qset */
> +#define	NIC_MBOX_MSG_LOOPBACK		0x16	/* Set interface in loopback */
> +#define	NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17	/* Reset statistics counters */
> +#define	NIC_MBOX_MSG_CFG_DONE		0xF0	/* VF configuration done */
> +#define	NIC_MBOX_MSG_SHUTDOWN		0xF1	/* VF is being shutdown */
> +#define	NIC_MBOX_MSG_MAX		0x100	/* Maximum number of messages */
> +
> +/* Get vNIC VF configuration */
> +struct nic_cfg_msg {
> +	uint8_t    msg;
> +	uint8_t    vf_id;
> +	uint8_t    node_id;
> +	bool	   tns_mode:1;
> +	bool	   sqs_mode:1;
> +	bool	   loopback_supported:1;
> +	uint8_t    mac_addr[NICVF_MAC_ADDR_SIZE];
> +};
> +
> +/* Qset configuration */
> +struct qs_cfg_msg {
> +	uint8_t    msg;
> +	uint8_t    num;
> +	uint8_t    sqs_count;
> +	uint64_t   cfg;
> +};
> +
> +/* Receive queue configuration */
> +struct rq_cfg_msg {
> +	uint8_t    msg;
> +	uint8_t    qs_num;
> +	uint8_t    rq_num;
> +	uint64_t   cfg;
> +};
> +
> +/* Send queue configuration */
> +struct sq_cfg_msg {
> +	uint8_t    msg;
> +	uint8_t    qs_num;
> +	uint8_t    sq_num;
> +	bool       sqs_mode;
> +	uint64_t   cfg;
> +};
> +
> +/* Set VF's MAC address */
> +struct set_mac_msg {
> +	uint8_t    msg;
> +	uint8_t    vf_id;
> +	uint8_t    mac_addr[NICVF_MAC_ADDR_SIZE];
> +};
> +
> +/* Set Maximum frame size */
> +struct set_frs_msg {
> +	uint8_t    msg;
> +	uint8_t    vf_id;
> +	uint16_t   max_frs;
> +};
> +
> +/* Set CPI algorithm type */
> +struct cpi_cfg_msg {
> +	uint8_t    msg;
> +	uint8_t    vf_id;
> +	uint8_t    rq_cnt;
> +	uint8_t    cpi_alg;
> +};
> +
> +/* Get RSS table size */
> +struct rss_sz_msg {
> +	uint8_t    msg;
> +	uint8_t    vf_id;
> +	uint16_t   ind_tbl_size;
> +};
> +
> +/* Set RSS configuration */
> +struct rss_cfg_msg {
> +	uint8_t    msg;
> +	uint8_t    vf_id;
> +	uint8_t    hash_bits;
> +	uint8_t    tbl_len;
> +	uint8_t    tbl_offset;
> +#define RSS_IND_TBL_LEN_PER_MBX_MSG	8
> +	uint8_t    ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
> +};
> +
> +/* Physical interface link status */
> +struct bgx_link_status {
> +	uint8_t    msg;
> +	uint8_t    link_up;
> +	uint8_t    duplex;
> +	uint32_t   speed;
> +};
> +
> +/* Set interface in loopback mode */
> +struct set_loopback {
> +	uint8_t    msg;
> +	uint8_t    vf_id;
> +	bool	   enable;
> +};
> +
> +/* Reset statistics counters */
> +struct reset_stat_cfg {
> +	uint8_t    msg;
> +	/* Bitmap to select NIC_PF_VNIC(vf_id)_RX_STAT(0..13) */
> +	uint16_t   rx_stat_mask;
> +	/* Bitmap to select NIC_PF_VNIC(vf_id)_TX_STAT(0..4) */
> +	uint8_t    tx_stat_mask;
> +	/* Bitmap to select NIC_PF_QS(0..127)_RQ(0..7)_STAT(0..1)
> +	 * bit14, bit15 NIC_PF_QS(vf_id)_RQ7_STAT(0..1)
> +	 * bit12, bit13 NIC_PF_QS(vf_id)_RQ6_STAT(0..1)
> +	 * ..
> +	 * bit2, bit3 NIC_PF_QS(vf_id)_RQ1_STAT(0..1)
> +	 * bit0, bit1 NIC_PF_QS(vf_id)_RQ0_STAT(0..1)
> +	 */
> +	uint16_t   rq_stat_mask;
> +	/* Bitmap to select NIC_PF_QS(0..127)_SQ(0..7)_STAT(0..1)
> +	 * bit14, bit15 NIC_PF_QS(vf_id)_SQ7_STAT(0..1)
> +	 * bit12, bit13 NIC_PF_QS(vf_id)_SQ6_STAT(0..1)
> +	 * ..
> +	 * bit2, bit3 NIC_PF_QS(vf_id)_SQ1_STAT(0..1)
> +	 * bit0, bit1 NIC_PF_QS(vf_id)_SQ0_STAT(0..1)
> +	 */
> +	uint16_t   sq_stat_mask;
> +};
> +
> +struct nic_mbx {
> +/* 128 bit shared memory between PF and each VF */
> +union {
> +	struct { uint8_t msg; }	msg;
> +	struct nic_cfg_msg	nic_cfg;
> +	struct qs_cfg_msg	qs;
> +	struct rq_cfg_msg	rq;
> +	struct sq_cfg_msg	sq;
> +	struct set_mac_msg	mac;
> +	struct set_frs_msg	frs;
> +	struct cpi_cfg_msg	cpi_cfg;
> +	struct rss_sz_msg	rss_size;
> +	struct rss_cfg_msg	rss_cfg;
> +	struct bgx_link_status  link_status;
> +	struct set_loopback	lbk;
> +	struct reset_stat_cfg	reset_stat;
> +};
> +};
> +
> +NICVF_STATIC_ASSERT(sizeof(struct nic_mbx) <= 16);
> +
> +int nicvf_handle_mbx_intr(struct nicvf *nic);
> +int nicvf_mbox_check_pf_ready(struct nicvf *nic);
> +int nicvf_mbox_qset_config(struct nicvf *nic, struct pf_qs_cfg *qs_cfg);
> +int nicvf_mbox_rq_config(struct nicvf *nic, uint16_t qidx,
> +			 struct pf_rq_cfg *pf_rq_cfg);
> +int nicvf_mbox_sq_config(struct nicvf *nic, uint16_t qidx);
> +int nicvf_mbox_rq_drop_config(struct nicvf *nic, uint16_t qidx, bool enable);
> +int nicvf_mbox_rq_bp_config(struct nicvf *nic, uint16_t qidx, bool enable);
> +int nicvf_mbox_set_mac_addr(struct nicvf *nic,
> +			    const uint8_t mac[NICVF_MAC_ADDR_SIZE]);
> +int nicvf_mbox_config_cpi(struct nicvf *nic, uint32_t qcnt);
> +int nicvf_mbox_get_rss_size(struct nicvf *nic);
> +int nicvf_mbox_config_rss(struct nicvf *nic);
> +int nicvf_mbox_update_hw_max_frs(struct nicvf *nic, uint16_t mtu);
> +int nicvf_mbox_rq_sync(struct nicvf *nic);
> +int nicvf_mbox_loopback_config(struct nicvf *nic, bool enable);
> +int nicvf_mbox_reset_stat_counters(struct nicvf *nic, uint16_t rx_stat_mask,
> +	uint8_t tx_stat_mask, uint16_t rq_stat_mask, uint16_t sq_stat_mask);
> +void nicvf_mbox_shutdown(struct nicvf *nic);
> +void nicvf_mbox_cfg_done(struct nicvf *nic);
> +
> +#endif /* __THUNDERX_NICVF_MBOX__ */
> diff --git a/drivers/net/thunderx/base/nicvf_plat.h b/drivers/net/thunderx/base/nicvf_plat.h
> new file mode 100644
> index 0000000..83c1844
> --- /dev/null
> +++ b/drivers/net/thunderx/base/nicvf_plat.h
> @@ -0,0 +1,132 @@
> +/*
> + *   BSD LICENSE
> + *
> + *   Copyright (C) Cavium networks Ltd. 2016.
> + *
> + *   Redistribution and use in source and binary forms, with or without
> + *   modification, are permitted provided that the following conditions
> + *   are met:
> + *
> + *     * Redistributions of source code must retain the above copyright
> + *       notice, this list of conditions and the following disclaimer.
> + *     * Redistributions in binary form must reproduce the above copyright
> + *       notice, this list of conditions and the following disclaimer in
> + *       the documentation and/or other materials provided with the
> + *       distribution.
> + *     * Neither the name of Cavium networks nor the names of its
> + *       contributors may be used to endorse or promote products derived
> + *       from this software without specific prior written permission.
> + *
> + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
> + */
> +
> +#ifndef _THUNDERX_NICVF_H
> +#define _THUNDERX_NICVF_H
> +
> +/* Platform/OS/arch specific abstractions */
> +
> +/* log */
> +#include <rte_log.h>
> +#include "../nicvf_logs.h"
> +
> +#define nicvf_log_error(s, ...) PMD_DRV_LOG(ERR, s, ##__VA_ARGS__)
> +
> +#define nicvf_log_debug(s, ...) PMD_DRV_LOG(DEBUG, s, ##__VA_ARGS__)
> +
> +#define nicvf_mbox_log(s, ...) PMD_MBOX_LOG(DEBUG, s, ##__VA_ARGS__)
> +
> +#define nicvf_log(s, ...) fprintf(stderr, s, ##__VA_ARGS__)
Why not using RTE_LOG but fprintf to stderr?

> +
> +/* delay */
> +#include <rte_cycles.h>
> +#define nicvf_delay_us(x) rte_delay_us(x)
> +
> +/* barrier */
> +#include <rte_atomic.h>
> +#define nicvf_smp_wmb() rte_smp_wmb()
> +#define nicvf_smp_rmb() rte_smp_rmb()
> +
> +/* utils */
> +#include <rte_common.h>
> +#define nicvf_min(x, y) RTE_MIN(x, y)
> +
> +/* byte order */
> +#include <rte_byteorder.h>
> +#define nicvf_cpu_to_be_64(x) rte_cpu_to_be_64(x)
> +#define nicvf_be_to_cpu_64(x) rte_be_to_cpu_64(x)
> +
> +/* Constants */
> +#include <rte_ether.h>
> +#define NICVF_MAC_ADDR_SIZE ETHER_ADDR_LEN
> +
> +/* ARM64 specific functions */
> +#if defined(RTE_ARCH_ARM64)
> +#define nicvf_prefetch_store_keep(_ptr) ({\
> +	asm volatile("prfm pstl1keep, %a0\n" : : "p" (_ptr)); })
> +
> +static inline void __attribute__((always_inline))
> +nicvf_addr_write(uintptr_t addr, uint64_t val)
> +{
> +	asm volatile(
> +		    "str %x[val], [%x[addr]]"
> +		    :
> +		    : [val] "r" (val), [addr] "r" (addr));
> +}
> +
> +static inline uint64_t __attribute__((always_inline))
> +nicvf_addr_read(uintptr_t addr)
> +{
> +	uint64_t val;
> +
> +	asm volatile(
> +		    "ldr %x[val], [%x[addr]]"
> +		    : [val] "=r" (val)
> +		    : [addr] "r" (addr));
> +	return val;
> +}
> +
> +#define NICVF_LOAD_PAIR(reg1, reg2, addr) ({		\
> +			asm volatile(			\
> +			"ldp %x[x1], %x[x0], [%x[p1]]"	\
> +			: [x1]"=r"(reg1), [x0]"=r"(reg2)\
> +			: [p1]"r"(addr)			\
> +			); })
> +
> +#else /* non optimized functions for building on non arm64 arch */
> +
> +#define nicvf_prefetch_store_keep(_ptr) do {} while (0)
> +
> +static inline void __attribute__((always_inline))
> +nicvf_addr_write(uintptr_t addr, uint64_t val)
> +{
> +	*(volatile uint64_t *)addr = val;
> +}
> +
> +static inline uint64_t __attribute__((always_inline))
> +nicvf_addr_read(uintptr_t addr)
> +{
> +	return	*(volatile uint64_t *)addr;
> +}
> +
> +#define NICVF_LOAD_PAIR(reg1, reg2, addr)		\
> +do {							\
> +	reg1 = nicvf_addr_read((uintptr_t)addr);	\
> +	reg2 = nicvf_addr_read((uintptr_t)addr + 8);	\
> +} while (0)
> +
> +#endif
> +
> +#include "nicvf_hw.h"
> +#include "nicvf_mbox.h"
> +
> +#endif /* _THUNDERX_NICVF_H */
>
  
Jerin Jacob June 13, 2016, 1:55 p.m. UTC | #3
This patch set provides the initial version of DPDK PMD for the
built-in NIC device in Cavium ThunderX SoC family.

Implemented features and ThunderX nicvf PMD documentation added
in doc/guides/nics/overview.rst and doc/guides/nics/thunderx.rst
respectively in this patch set.

These patches are checked using checkpatch.sh with following
additional ignore option:
    options="$options --ignore=CAMELCASE,BRACKET_SPACE"
CAMELCASE - To accommodate PRIx64
BRACKET_SPACE - To accommodate AT&T inline line assembly in two places

This patch set is based on DPDK 16.07-RC1
and tested with git HEAD change-set
ca173a909538a2f1082cd0dcb4d778a97dab69c3 along with
following depended patch

http://dpdk.org/dev/patchwork/patch/11826/
ethdev: add tunnel and port RSS offload types

V1->V2

http://dpdk.org/dev/patchwork/patch/12609/
-- added const for the const struct tables
-- remove multiple blank lines
-- addressed style comments
http://dpdk.org/dev/patchwork/patch/12610/
-- removed DEPDIRS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += lib/librte_net lib/librte_malloc
-- add const for table structs
-- addressed style comments
http://dpdk.org/dev/patchwork/patch/12614/
-- s/DEFAULT_*/NICVF_DEFAULT_*/gc
http://dpdk.org/dev/patchwork/patch/12615/
-- Fix typos
-- addressed style comments
http://dpdk.org/dev/patchwork/patch/12616/
-- removed redundant txq->tail = 0 and txq->head = 0
http://dpdk.org/dev/patchwork/patch/12627/
-- fixed the documentation changes

-- fixed TAB+space occurrences in functions
-- rebased to c8c33ad7f94c59d1c0676af0cfd61207b3e808db

V2->V3

http://dpdk.org/dev/patchwork/patch/13060/
-- Changed polling infrastructure to use rte_eal_alarm* instead of timerfd_create API
-- rebased to ca173a909538a2f1082cd0dcb4d778a97dab69c3

V3->V4

ddressed review comments of Ferruh's review

http://dpdk.org/dev/patchwork/patch/13314/
-- s/avilable/available
http://dpdk.org/dev/patchwork/patch/13323/
-- s/witout/without

http://dpdk.org/dev/patchwork/patch/13318/
-- s/nicvf_free_xmittted_buffers/nicvf_free_xmitted_buffers
-- fix checkpatch errors
http://dpdk.org/dev/patchwork/patch/13307/
-- addressed review comments
http://dpdk.org/dev/patchwork/patch/13308/
-- addressed review comments
http://dpdk.org/dev/patchwork/patch/13320/
-- addressed review comments
http://dpdk.org/dev/patchwork/patch/13321/
-- addressed review comments
http://dpdk.org/dev/patchwork/patch/13322/
-- addressed review comments
http://dpdk.org/dev/patchwork/patch/13324/
-- addressed review comments and created separated patch for
platform specific config change

-- update change log to net/thunderx: ........

Jerin Jacob (19):
  net/thunderx/base: add hardware API for ThunderX nicvf inbuilt NIC
  net/thunderx: add pmd skeleton
  net/thunderx: add link status and link update support
  net/thunderx: add get_reg and get_reg_length support
  net/thunderx: add dev_configure support
  net/thunderx: add dev_infos_get support
  net/thunderx: add rx_queue_setup/release support
  net/thunderx: add tx_queue_setup/release support
  net/thunderx: add rss and reta query and update support
  net/thunderx: add mtu_set and promiscuous_enable support
  net/thunderx: add stats support
  net/thunderx: add single and multi segment tx functions
  net/thunderx: add single and multi segment rx functions
  net/thunderx: add dev_supported_ptypes_get and rx_queue_count support
  net/thunderx: add rx queue start and stop support
  net/thunderx: add tx queue start and stop support
  net/thunderx: add device start,stop and close support
  net/thunderx: updated driver documentation and release notes
  maintainers: claim responsibility for the ThunderX nicvf PMD

 MAINTAINERS                                        |    6 +
 config/common_base                                 |   10 +
 config/defconfig_arm64-thunderx-linuxapp-gcc       |   10 +
 doc/guides/nics/index.rst                          |    1 +
 doc/guides/nics/overview.rst                       |   96 +-
 doc/guides/nics/thunderx.rst                       |  354 ++++
 doc/guides/rel_notes/release_16_07.rst             |    1 +
 drivers/net/Makefile                               |    1 +
 drivers/net/thunderx/Makefile                      |   65 +
 drivers/net/thunderx/base/nicvf_hw.c               |  905 ++++++++++
 drivers/net/thunderx/base/nicvf_hw.h               |  240 +++
 drivers/net/thunderx/base/nicvf_hw_defs.h          | 1219 +++++++++++++
 drivers/net/thunderx/base/nicvf_mbox.c             |  418 +++++
 drivers/net/thunderx/base/nicvf_mbox.h             |  232 +++
 drivers/net/thunderx/base/nicvf_plat.h             |  132 ++
 drivers/net/thunderx/nicvf_ethdev.c                | 1789 ++++++++++++++++++++
 drivers/net/thunderx/nicvf_ethdev.h                |  106 ++
 drivers/net/thunderx/nicvf_logs.h                  |   83 +
 drivers/net/thunderx/nicvf_rxtx.c                  |  599 +++++++
 drivers/net/thunderx/nicvf_rxtx.h                  |  101 ++
 drivers/net/thunderx/nicvf_struct.h                |  124 ++
 .../thunderx/rte_pmd_thunderx_nicvf_version.map    |    4 +
 mk/rte.app.mk                                      |    2 +
 23 files changed, 6450 insertions(+), 48 deletions(-)
 create mode 100644 doc/guides/nics/thunderx.rst
 create mode 100644 drivers/net/thunderx/Makefile
 create mode 100644 drivers/net/thunderx/base/nicvf_hw.c
 create mode 100644 drivers/net/thunderx/base/nicvf_hw.h
 create mode 100644 drivers/net/thunderx/base/nicvf_hw_defs.h
 create mode 100644 drivers/net/thunderx/base/nicvf_mbox.c
 create mode 100644 drivers/net/thunderx/base/nicvf_mbox.h
 create mode 100644 drivers/net/thunderx/base/nicvf_plat.h
 create mode 100644 drivers/net/thunderx/nicvf_ethdev.c
 create mode 100644 drivers/net/thunderx/nicvf_ethdev.h
 create mode 100644 drivers/net/thunderx/nicvf_logs.h
 create mode 100644 drivers/net/thunderx/nicvf_rxtx.c
 create mode 100644 drivers/net/thunderx/nicvf_rxtx.h
 create mode 100644 drivers/net/thunderx/nicvf_struct.h
 create mode 100644 drivers/net/thunderx/rte_pmd_thunderx_nicvf_version.map
  
Bruce Richardson June 13, 2016, 3:46 p.m. UTC | #4
On Mon, Jun 13, 2016 at 07:25:24PM +0530, Jerin Jacob wrote:
> This patch set provides the initial version of DPDK PMD for the
> built-in NIC device in Cavium ThunderX SoC family.
> 
> Implemented features and ThunderX nicvf PMD documentation added
> in doc/guides/nics/overview.rst and doc/guides/nics/thunderx.rst
> respectively in this patch set.
> 
> These patches are checked using checkpatch.sh with following
> additional ignore option:
>     options="$options --ignore=CAMELCASE,BRACKET_SPACE"
> CAMELCASE - To accommodate PRIx64
> BRACKET_SPACE - To accommodate AT&T inline line assembly in two places
> 
Hi Jerin,

other than the fact that patch 1 is very big, this set looks pretty ok to me.
However, as a general comment on the series: the commit titles are overly
low-level, as they refer too much to function/structure names e.g. patches 4
through 10. If you run the script "check-git-log.sh" on your patchset this will
be flagged. What is expected in commit titles is that the change introduced by
the path is explained without directly using the function names.

Regards,
/Bruce
  
Jerin Jacob June 14, 2016, 7:06 p.m. UTC | #5
This patch set provides the initial version of DPDK PMD for the
built-in NIC device in Cavium ThunderX SoC family.

Implemented features and ThunderX nicvf PMD documentation added
in doc/guides/nics/overview.rst and doc/guides/nics/thunderx.rst
respectively in this patch set.

These patches are checked using checkpatch.sh with following
additional ignore option:
    options="$options --ignore=CAMELCASE,BRACKET_SPACE"
CAMELCASE - To accommodate PRIx64
BRACKET_SPACE - To accommodate AT&T inline line assembly in two places

This patch set is based on DPDK 16.07-RC1
and tested with git HEAD change-set
ca173a909538a2f1082cd0dcb4d778a97dab69c3 along with
following depended patch

http://dpdk.org/dev/patchwork/patch/11826/
ethdev: add tunnel and port RSS offload types

V1->V2

http://dpdk.org/dev/patchwork/patch/12609/
-- added const for the const struct tables
-- remove multiple blank lines
-- addressed style comments
http://dpdk.org/dev/patchwork/patch/12610/
-- removed DEPDIRS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += lib/librte_net lib/librte_malloc
-- add const for table structs
-- addressed style comments
http://dpdk.org/dev/patchwork/patch/12614/
-- s/DEFAULT_*/NICVF_DEFAULT_*/gc
http://dpdk.org/dev/patchwork/patch/12615/
-- Fix typos
-- addressed style comments
http://dpdk.org/dev/patchwork/patch/12616/
-- removed redundant txq->tail = 0 and txq->head = 0
http://dpdk.org/dev/patchwork/patch/12627/
-- fixed the documentation changes

-- fixed TAB+space occurrences in functions
-- rebased to c8c33ad7f94c59d1c0676af0cfd61207b3e808db

V2->V3

http://dpdk.org/dev/patchwork/patch/13060/
-- Changed polling infrastructure to use rte_eal_alarm* instead of timerfd_create API
-- rebased to ca173a909538a2f1082cd0dcb4d778a97dab69c3

V3->V4

addressed review comments of Ferruh's review

http://dpdk.org/dev/patchwork/patch/13314/
-- s/avilable/available
http://dpdk.org/dev/patchwork/patch/13323/
-- s/witout/without

http://dpdk.org/dev/patchwork/patch/13318/
-- s/nicvf_free_xmittted_buffers/nicvf_free_xmitted_buffers
-- fix checkpatch errors
http://dpdk.org/dev/patchwork/patch/13307/
-- addressed review comments
http://dpdk.org/dev/patchwork/patch/13308/
-- addressed review comments
http://dpdk.org/dev/patchwork/patch/13320/
-- addressed review comments
http://dpdk.org/dev/patchwork/patch/13321/
-- addressed review comments
http://dpdk.org/dev/patchwork/patch/13322/
-- addressed review comments
http://dpdk.org/dev/patchwork/patch/13324/
-- addressed review comments and created separated patch for
platform specific config change

-- update change log to net/thunderx: ........

V4->V5
-- splitting up drivers/net/thunderx/nicvf/base files to following
patches as suggested by Bruce

net/thunderx/base: add HW constants for ThunderX inbuilt NIC
net/thunderx/base: add register definition for ThunderX inbuilt NIC
net/thunderx/base: implement DPDK based platform abstraction for base code
net/thunderx/base: add mbox API for ThunderX PF/VF driver communication
net/thunderx/base: add hardware API for ThunderX nicvf inbuilt NIC
net/thunderx/base: add RSS and reta configuration HW APIs
net/thunderx/base: add statistics get HW APIs

-- Corrected wrong git commit log messages flagged by check-git-log.sh

Jerin Jacob (25):
  net/thunderx/base: add HW constants for ThunderX inbuilt NIC
  net/thunderx/base: add register definition for ThunderX inbuilt NIC
  net/thunderx/base: implement DPDK based platform abstraction for base
    code
  net/thunderx/base: add mbox API for ThunderX PF/VF driver
    communication
  net/thunderx/base: add hardware API for ThunderX nicvf inbuilt NIC
  net/thunderx/base: add RSS and reta configuration HW APIs
  net/thunderx/base: add statistics get HW APIs
  net/thunderx: add pmd skeleton
  net/thunderx: add link status and link update support
  net/thunderx: add registers dump support
  net/thunderx: add ethdev configure support
  net/thunderx: add get device info support
  net/thunderx: add Rx queue setup and release support
  net/thunderx: add Tx queue setup and release support
  net/thunderx: add RSS and reta query and update support
  net/thunderx: add MTU set and promiscuous enable support
  net/thunderx: add stats support
  net/thunderx: add single and multi segment Tx functions
  net/thunderx: add single and multi segment Rx functions
  net/thunderx: implement supported ptype get and Rx queue count
  net/thunderx: add Rx queue start and stop support
  net/thunderx: add Tx queue start and stop support
  net/thunderx: add device start,stop and close support
  net/thunderx: updated driver documentation and release notes
  maintainers: claim responsibility for the ThunderX nicvf PMD

 MAINTAINERS                                        |    6 +
 config/common_base                                 |   10 +
 config/defconfig_arm64-thunderx-linuxapp-gcc       |   10 +
 doc/guides/nics/index.rst                          |    1 +
 doc/guides/nics/overview.rst                       |   96 +-
 doc/guides/nics/thunderx.rst                       |  354 ++++
 doc/guides/rel_notes/release_16_07.rst             |    1 +
 drivers/net/Makefile                               |    1 +
 drivers/net/thunderx/Makefile                      |   65 +
 drivers/net/thunderx/base/nicvf_hw.c               |  905 ++++++++++
 drivers/net/thunderx/base/nicvf_hw.h               |  240 +++
 drivers/net/thunderx/base/nicvf_hw_defs.h          | 1219 +++++++++++++
 drivers/net/thunderx/base/nicvf_mbox.c             |  418 +++++
 drivers/net/thunderx/base/nicvf_mbox.h             |  232 +++
 drivers/net/thunderx/base/nicvf_plat.h             |  132 ++
 drivers/net/thunderx/nicvf_ethdev.c                | 1789 ++++++++++++++++++++
 drivers/net/thunderx/nicvf_ethdev.h                |  106 ++
 drivers/net/thunderx/nicvf_logs.h                  |   83 +
 drivers/net/thunderx/nicvf_rxtx.c                  |  599 +++++++
 drivers/net/thunderx/nicvf_rxtx.h                  |  101 ++
 drivers/net/thunderx/nicvf_struct.h                |  124 ++
 .../thunderx/rte_pmd_thunderx_nicvf_version.map    |    4 +
 mk/rte.app.mk                                      |    2 +
 23 files changed, 6450 insertions(+), 48 deletions(-)
 create mode 100644 doc/guides/nics/thunderx.rst
 create mode 100644 drivers/net/thunderx/Makefile
 create mode 100644 drivers/net/thunderx/base/nicvf_hw.c
 create mode 100644 drivers/net/thunderx/base/nicvf_hw.h
 create mode 100644 drivers/net/thunderx/base/nicvf_hw_defs.h
 create mode 100644 drivers/net/thunderx/base/nicvf_mbox.c
 create mode 100644 drivers/net/thunderx/base/nicvf_mbox.h
 create mode 100644 drivers/net/thunderx/base/nicvf_plat.h
 create mode 100644 drivers/net/thunderx/nicvf_ethdev.c
 create mode 100644 drivers/net/thunderx/nicvf_ethdev.h
 create mode 100644 drivers/net/thunderx/nicvf_logs.h
 create mode 100644 drivers/net/thunderx/nicvf_rxtx.c
 create mode 100644 drivers/net/thunderx/nicvf_rxtx.h
 create mode 100644 drivers/net/thunderx/nicvf_struct.h
 create mode 100644 drivers/net/thunderx/rte_pmd_thunderx_nicvf_version.map
  
Bruce Richardson June 15, 2016, 2:39 p.m. UTC | #6
On Wed, Jun 15, 2016 at 12:36:15AM +0530, Jerin Jacob wrote:
> This patch set provides the initial version of DPDK PMD for the
> built-in NIC device in Cavium ThunderX SoC family.
> 
> Implemented features and ThunderX nicvf PMD documentation added
> in doc/guides/nics/overview.rst and doc/guides/nics/thunderx.rst
> respectively in this patch set.
> 
> These patches are checked using checkpatch.sh with following
> additional ignore option:
>     options="$options --ignore=CAMELCASE,BRACKET_SPACE"
> CAMELCASE - To accommodate PRIx64
> BRACKET_SPACE - To accommodate AT&T inline line assembly in two places
> 
> This patch set is based on DPDK 16.07-RC1
> and tested with git HEAD change-set
> ca173a909538a2f1082cd0dcb4d778a97dab69c3 along with
> following depended patch
> 
> http://dpdk.org/dev/patchwork/patch/11826/
> ethdev: add tunnel and port RSS offload types
> 
Hi Jerin,

hopefully a final set of comments before merge on this set, as it's looking
very good now.

* Two patches look like they need to be split, as they are combining multiple
  functions into one patch. They are:
    [dpdk-dev,v5,16/25] net/thunderx: add MTU set and promiscuous enable support
    [dpdk-dev,v5,20/25] net/thunderx: implement supported ptype get and Rx queue count
  For the other patches which add multiple functions, the functions seem to be
  logically related so I don't think there is a problem

* check-git-logs.sh is warning about a few of the commit messages being too long.
  Splitting patch 20 should fix one of those, but there are a few remaining.
  A number of titles refer to ThunderX in the message, but this is probably
  unnecessary, as the prefix already contains "net/thunderx" in it.

Regards,
/Bruce

PS: Please also baseline patches on dpdk-next-net/rel_16_07 tree. They currently
apply fine to that tree so there is no problem, but just in case later commits
break things, that is the tree that net patches should be based on.
  
Jerin Jacob June 16, 2016, 9:31 a.m. UTC | #7
On Wed, Jun 15, 2016 at 03:39:25PM +0100, Bruce Richardson wrote:
> On Wed, Jun 15, 2016 at 12:36:15AM +0530, Jerin Jacob wrote:
> > This patch set provides the initial version of DPDK PMD for the
> > built-in NIC device in Cavium ThunderX SoC family.
> > 
> > Implemented features and ThunderX nicvf PMD documentation added
> > in doc/guides/nics/overview.rst and doc/guides/nics/thunderx.rst
> > respectively in this patch set.
> > 
> > These patches are checked using checkpatch.sh with following
> > additional ignore option:
> >     options="$options --ignore=CAMELCASE,BRACKET_SPACE"
> > CAMELCASE - To accommodate PRIx64
> > BRACKET_SPACE - To accommodate AT&T inline line assembly in two places
> > 
> > This patch set is based on DPDK 16.07-RC1
> > and tested with git HEAD change-set
> > ca173a909538a2f1082cd0dcb4d778a97dab69c3 along with
> > following depended patch
> > 
> > http://dpdk.org/dev/patchwork/patch/11826/
> > ethdev: add tunnel and port RSS offload types
> > 
> Hi Jerin,
> 
> hopefully a final set of comments before merge on this set, as it's looking
> very good now.
> 
> * Two patches look like they need to be split, as they are combining multiple
>   functions into one patch. They are:
>     [dpdk-dev,v5,16/25] net/thunderx: add MTU set and promiscuous enable support
>     [dpdk-dev,v5,20/25] net/thunderx: implement supported ptype get and Rx queue count
>   For the other patches which add multiple functions, the functions seem to be
>   logically related so I don't think there is a problem
> 
> * check-git-logs.sh is warning about a few of the commit messages being too long.
>   Splitting patch 20 should fix one of those, but there are a few remaining.
>   A number of titles refer to ThunderX in the message, but this is probably
>   unnecessary, as the prefix already contains "net/thunderx" in it.

OK. I will send the next revision.

> 
> Regards,
> /Bruce
> 
> PS: Please also baseline patches on dpdk-next-net/rel_16_07 tree. They currently
> apply fine to that tree so there is no problem, but just in case later commits
> break things, that is the tree that net patches should be based on.
  
Bruce Richardson June 16, 2016, 10:58 a.m. UTC | #8
On Thu, Jun 16, 2016 at 03:01:02PM +0530, Jerin Jacob wrote:
> On Wed, Jun 15, 2016 at 03:39:25PM +0100, Bruce Richardson wrote:
> > On Wed, Jun 15, 2016 at 12:36:15AM +0530, Jerin Jacob wrote:
> > > This patch set provides the initial version of DPDK PMD for the
> > > built-in NIC device in Cavium ThunderX SoC family.
> > > 
> > > Implemented features and ThunderX nicvf PMD documentation added
> > > in doc/guides/nics/overview.rst and doc/guides/nics/thunderx.rst
> > > respectively in this patch set.
> > > 
> > > These patches are checked using checkpatch.sh with following
> > > additional ignore option:
> > >     options="$options --ignore=CAMELCASE,BRACKET_SPACE"
> > > CAMELCASE - To accommodate PRIx64
> > > BRACKET_SPACE - To accommodate AT&T inline line assembly in two places
> > > 
> > > This patch set is based on DPDK 16.07-RC1
> > > and tested with git HEAD change-set
> > > ca173a909538a2f1082cd0dcb4d778a97dab69c3 along with
> > > following depended patch
> > > 
> > > http://dpdk.org/dev/patchwork/patch/11826/
> > > ethdev: add tunnel and port RSS offload types
> > > 
> > Hi Jerin,
> > 
> > hopefully a final set of comments before merge on this set, as it's looking
> > very good now.
> > 
> > * Two patches look like they need to be split, as they are combining multiple
> >   functions into one patch. They are:
> >     [dpdk-dev,v5,16/25] net/thunderx: add MTU set and promiscuous enable support
> >     [dpdk-dev,v5,20/25] net/thunderx: implement supported ptype get and Rx queue count
> >   For the other patches which add multiple functions, the functions seem to be
> >   logically related so I don't think there is a problem
> > 
> > * check-git-logs.sh is warning about a few of the commit messages being too long.
> >   Splitting patch 20 should fix one of those, but there are a few remaining.
> >   A number of titles refer to ThunderX in the message, but this is probably
> >   unnecessary, as the prefix already contains "net/thunderx" in it.
> 
> OK. I will send the next revision.
> 

Please hold off a few hours, as I'm hoping to merge in the bnxt driver this
afternoon. If all goes well, I would appreciate it if you could base your patchset
off the rel_16_07 tree with that set applied - save me having to resolve conflicts
in files like the nic overview doc, which is always a pain to try and edit. :-)

Regards,
/Bruce
  
Jerin Jacob June 16, 2016, 11:17 a.m. UTC | #9
On Thu, Jun 16, 2016 at 11:58:27AM +0100, Bruce Richardson wrote:
> On Thu, Jun 16, 2016 at 03:01:02PM +0530, Jerin Jacob wrote:
> > On Wed, Jun 15, 2016 at 03:39:25PM +0100, Bruce Richardson wrote:
> > > On Wed, Jun 15, 2016 at 12:36:15AM +0530, Jerin Jacob wrote:
> > > > This patch set provides the initial version of DPDK PMD for the
> > > > built-in NIC device in Cavium ThunderX SoC family.
> > > > 
> > > > Implemented features and ThunderX nicvf PMD documentation added
> > > > in doc/guides/nics/overview.rst and doc/guides/nics/thunderx.rst
> > > > respectively in this patch set.
> > > > 
> > > > These patches are checked using checkpatch.sh with following
> > > > additional ignore option:
> > > >     options="$options --ignore=CAMELCASE,BRACKET_SPACE"
> > > > CAMELCASE - To accommodate PRIx64
> > > > BRACKET_SPACE - To accommodate AT&T inline line assembly in two places
> > > > 
> > > > This patch set is based on DPDK 16.07-RC1
> > > > and tested with git HEAD change-set
> > > > ca173a909538a2f1082cd0dcb4d778a97dab69c3 along with
> > > > following depended patch
> > > > 
> > > > http://dpdk.org/dev/patchwork/patch/11826/
> > > > ethdev: add tunnel and port RSS offload types
> > > > 
> > > Hi Jerin,
> > > 
> > > hopefully a final set of comments before merge on this set, as it's looking
> > > very good now.
> > > 
> > > * Two patches look like they need to be split, as they are combining multiple
> > >   functions into one patch. They are:
> > >     [dpdk-dev,v5,16/25] net/thunderx: add MTU set and promiscuous enable support
> > >     [dpdk-dev,v5,20/25] net/thunderx: implement supported ptype get and Rx queue count
> > >   For the other patches which add multiple functions, the functions seem to be
> > >   logically related so I don't think there is a problem
> > > 
> > > * check-git-logs.sh is warning about a few of the commit messages being too long.
> > >   Splitting patch 20 should fix one of those, but there are a few remaining.
> > >   A number of titles refer to ThunderX in the message, but this is probably
> > >   unnecessary, as the prefix already contains "net/thunderx" in it.
> > 
> > OK. I will send the next revision.
> > 
> 
> Please hold off a few hours, as I'm hoping to merge in the bnxt driver this
> afternoon. If all goes well, I would appreciate it if you could base your patchset
> off the rel_16_07 tree with that set applied - save me having to resolve conflicts
> in files like the nic overview doc, which is always a pain to try and edit. :-)

OK. I will re-base the changes once you have done with bnxt merge.
Let me know once its done.

> 
> Regards,
> /Bruce
  
Bruce Richardson June 16, 2016, 2:33 p.m. UTC | #10
On Thu, Jun 16, 2016 at 04:47:39PM +0530, Jerin Jacob wrote:
> On Thu, Jun 16, 2016 at 11:58:27AM +0100, Bruce Richardson wrote:
> > On Thu, Jun 16, 2016 at 03:01:02PM +0530, Jerin Jacob wrote:
> > > On Wed, Jun 15, 2016 at 03:39:25PM +0100, Bruce Richardson wrote:
> > > > On Wed, Jun 15, 2016 at 12:36:15AM +0530, Jerin Jacob wrote:
> > > > > This patch set provides the initial version of DPDK PMD for the
> > > > > built-in NIC device in Cavium ThunderX SoC family.
> > > > > 
> > > > > Implemented features and ThunderX nicvf PMD documentation added
> > > > > in doc/guides/nics/overview.rst and doc/guides/nics/thunderx.rst
> > > > > respectively in this patch set.
> > > > > 
> > > > > These patches are checked using checkpatch.sh with following
> > > > > additional ignore option:
> > > > >     options="$options --ignore=CAMELCASE,BRACKET_SPACE"
> > > > > CAMELCASE - To accommodate PRIx64
> > > > > BRACKET_SPACE - To accommodate AT&T inline line assembly in two places
> > > > > 
> > > > > This patch set is based on DPDK 16.07-RC1
> > > > > and tested with git HEAD change-set
> > > > > ca173a909538a2f1082cd0dcb4d778a97dab69c3 along with
> > > > > following depended patch
> > > > > 
> > > > > http://dpdk.org/dev/patchwork/patch/11826/
> > > > > ethdev: add tunnel and port RSS offload types
> > > > > 
> > > > Hi Jerin,
> > > > 
> > > > hopefully a final set of comments before merge on this set, as it's looking
> > > > very good now.
> > > > 
> > > > * Two patches look like they need to be split, as they are combining multiple
> > > >   functions into one patch. They are:
> > > >     [dpdk-dev,v5,16/25] net/thunderx: add MTU set and promiscuous enable support
> > > >     [dpdk-dev,v5,20/25] net/thunderx: implement supported ptype get and Rx queue count
> > > >   For the other patches which add multiple functions, the functions seem to be
> > > >   logically related so I don't think there is a problem
> > > > 
> > > > * check-git-logs.sh is warning about a few of the commit messages being too long.
> > > >   Splitting patch 20 should fix one of those, but there are a few remaining.
> > > >   A number of titles refer to ThunderX in the message, but this is probably
> > > >   unnecessary, as the prefix already contains "net/thunderx" in it.
> > > 
> > > OK. I will send the next revision.
> > > 
> > 
> > Please hold off a few hours, as I'm hoping to merge in the bnxt driver this
> > afternoon. If all goes well, I would appreciate it if you could base your patchset
> > off the rel_16_07 tree with that set applied - save me having to resolve conflicts
> > in files like the nic overview doc, which is always a pain to try and edit. :-)
> 
> OK. I will re-base the changes once you have done with bnxt merge.
> Let me know once its done.
> 
Done now. Feel free to submit a new version based on rel_16_07 branch.

Thanks,
/Bruce
  
Jerin Jacob June 17, 2016, 1:29 p.m. UTC | #11
This patch set provides the initial version of DPDK PMD for the
built-in NIC device in Cavium ThunderX SoC family.

Implemented features and ThunderX nicvf PMD documentation added
in doc/guides/nics/overview.rst and doc/guides/nics/thunderx.rst
respectively in this patch set.

These patches are checked using checkpatch.sh with following
additional ignore option:
    options="$options --ignore=CAMELCASE,BRACKET_SPACE"
CAMELCASE - To accommodate PRIx64
BRACKET_SPACE - To accommodate AT&T inline line assembly in two places

This patch set is based on DPDK 16.07-RC1
and tested with git HEAD change-set
ad00c7ec23e3b7723217bc29e03eb40409aaf617(in dpdk-next-net/rel_16_07)
along with following depended patch

http://dpdk.org/dev/patchwork/patch/11826/
ethdev: add tunnel and port RSS offload types

V1->V2

http://dpdk.org/dev/patchwork/patch/12609/
-- added const for the const struct tables
-- remove multiple blank lines
-- addressed style comments
http://dpdk.org/dev/patchwork/patch/12610/
-- removed DEPDIRS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += lib/librte_net lib/librte_malloc
-- add const for table structs
-- addressed style comments
http://dpdk.org/dev/patchwork/patch/12614/
-- s/DEFAULT_*/NICVF_DEFAULT_*/gc
http://dpdk.org/dev/patchwork/patch/12615/
-- Fix typos
-- addressed style comments
http://dpdk.org/dev/patchwork/patch/12616/
-- removed redundant txq->tail = 0 and txq->head = 0
http://dpdk.org/dev/patchwork/patch/12627/
-- fixed the documentation changes

-- fixed TAB+space occurrences in functions
-- rebased to c8c33ad7f94c59d1c0676af0cfd61207b3e808db

V2->V3

http://dpdk.org/dev/patchwork/patch/13060/
-- Changed polling infrastructure to use rte_eal_alarm* instead of timerfd_create API
-- rebased to ca173a909538a2f1082cd0dcb4d778a97dab69c3

V3->V4

addressed review comments of Ferruh's review

http://dpdk.org/dev/patchwork/patch/13314/
-- s/avilable/available
http://dpdk.org/dev/patchwork/patch/13323/
-- s/witout/without

http://dpdk.org/dev/patchwork/patch/13318/
-- s/nicvf_free_xmittted_buffers/nicvf_free_xmitted_buffers
-- fix checkpatch errors
http://dpdk.org/dev/patchwork/patch/13307/
-- addressed review comments
http://dpdk.org/dev/patchwork/patch/13308/
-- addressed review comments
http://dpdk.org/dev/patchwork/patch/13320/
-- addressed review comments
http://dpdk.org/dev/patchwork/patch/13321/
-- addressed review comments
http://dpdk.org/dev/patchwork/patch/13322/
-- addressed review comments
http://dpdk.org/dev/patchwork/patch/13324/
-- addressed review comments and created separated patch for
platform specific config change

-- update change log to net/thunderx: ........

V4->V5
-- splitting up drivers/net/thunderx/nicvf/base files to following
patches as suggested by Bruce

net/thunderx/base: add HW constants for ThunderX inbuilt NIC
net/thunderx/base: add register definition for ThunderX inbuilt NIC
net/thunderx/base: implement DPDK based platform abstraction for base code
net/thunderx/base: add mbox API for ThunderX PF/VF driver communication
net/thunderx/base: add hardware API for ThunderX nicvf inbuilt NIC
net/thunderx/base: add RSS and reta configuration HW APIs
net/thunderx/base: add statistics get HW APIs

-- Corrected wrong git commit log messages flagged by check-git-log.sh

V5->V6
-- Rebased to dpdk-next-net/rel_16_07(ad00c7ec23e3b7723217bc29e03eb40409aaf617)
-- Splitted following patches in v5 to two logical patches
[dpdk-dev,v5,16/25] net/thunderx: add MTU set and promiscuous enable support
[dpdk-dev,v5,20/25] net/thunderx: implement supported ptype get and Rx queue count
-- Fixed check-git-logs.sh for "commit messages being too long"

Jerin Jacob (27):
  net/thunderx/base: add HW constants
  net/thunderx/base: add HW register definitions
  net/thunderx/base: implement DPDK based platform abstraction
  net/thunderx/base: add mbox APIs for PF/VF communication
  net/thunderx/base: add hardware API
  net/thunderx/base: add RSS and reta configuration HW APIs
  net/thunderx/base: add statistics get HW APIs
  net/thunderx: add pmd skeleton
  net/thunderx: add link status and link update support
  net/thunderx: add registers dump support
  net/thunderx: add ethdev configure support
  net/thunderx: add get device info support
  net/thunderx: add Rx queue setup and release support
  net/thunderx: add Tx queue setup and release support
  net/thunderx: add RSS and reta query and update support
  net/thunderx: add MTU set support
  net/thunderx: add promiscuous enable support
  net/thunderx: add stats support
  net/thunderx: add single and multi segment Tx functions
  net/thunderx: add single and multi segment Rx functions
  net/thunderx: add supported packet type get
  net/thunderx: add Rx queue count support
  net/thunderx: add Rx queue start and stop support
  net/thunderx: add Tx queue start and stop support
  net/thunderx: add device start,stop and close support
  net/thunderx: updated driver documentation and release notes
  maintainers: claim responsibility for the ThunderX nicvf PMD

 MAINTAINERS                                        |    6 +
 config/common_base                                 |   10 +
 config/defconfig_arm64-thunderx-linuxapp-gcc       |   10 +
 doc/guides/nics/index.rst                          |    1 +
 doc/guides/nics/overview.rst                       |   96 +-
 doc/guides/nics/thunderx.rst                       |  354 ++++
 doc/guides/rel_notes/release_16_07.rst             |    1 +
 drivers/net/Makefile                               |    1 +
 drivers/net/thunderx/Makefile                      |   65 +
 drivers/net/thunderx/base/nicvf_hw.c               |  905 ++++++++++
 drivers/net/thunderx/base/nicvf_hw.h               |  240 +++
 drivers/net/thunderx/base/nicvf_hw_defs.h          | 1219 +++++++++++++
 drivers/net/thunderx/base/nicvf_mbox.c             |  418 +++++
 drivers/net/thunderx/base/nicvf_mbox.h             |  232 +++
 drivers/net/thunderx/base/nicvf_plat.h             |  132 ++
 drivers/net/thunderx/nicvf_ethdev.c                | 1791 ++++++++++++++++++++
 drivers/net/thunderx/nicvf_ethdev.h                |  106 ++
 drivers/net/thunderx/nicvf_logs.h                  |   83 +
 drivers/net/thunderx/nicvf_rxtx.c                  |  599 +++++++
 drivers/net/thunderx/nicvf_rxtx.h                  |  101 ++
 drivers/net/thunderx/nicvf_struct.h                |  124 ++
 .../thunderx/rte_pmd_thunderx_nicvf_version.map    |    4 +
 mk/rte.app.mk                                      |    1 +
 23 files changed, 6451 insertions(+), 48 deletions(-)
 create mode 100644 doc/guides/nics/thunderx.rst
 create mode 100644 drivers/net/thunderx/Makefile
 create mode 100644 drivers/net/thunderx/base/nicvf_hw.c
 create mode 100644 drivers/net/thunderx/base/nicvf_hw.h
 create mode 100644 drivers/net/thunderx/base/nicvf_hw_defs.h
 create mode 100644 drivers/net/thunderx/base/nicvf_mbox.c
 create mode 100644 drivers/net/thunderx/base/nicvf_mbox.h
 create mode 100644 drivers/net/thunderx/base/nicvf_plat.h
 create mode 100644 drivers/net/thunderx/nicvf_ethdev.c
 create mode 100644 drivers/net/thunderx/nicvf_ethdev.h
 create mode 100644 drivers/net/thunderx/nicvf_logs.h
 create mode 100644 drivers/net/thunderx/nicvf_rxtx.c
 create mode 100644 drivers/net/thunderx/nicvf_rxtx.h
 create mode 100644 drivers/net/thunderx/nicvf_struct.h
 create mode 100644 drivers/net/thunderx/rte_pmd_thunderx_nicvf_version.map
  
Bruce Richardson June 20, 2016, 11:28 a.m. UTC | #12
On Fri, Jun 17, 2016 at 06:59:27PM +0530, Jerin Jacob wrote:
> This patch set provides the initial version of DPDK PMD for the
> built-in NIC device in Cavium ThunderX SoC family.
> 
> Implemented features and ThunderX nicvf PMD documentation added
> in doc/guides/nics/overview.rst and doc/guides/nics/thunderx.rst
> respectively in this patch set.
> 
Patchset applied to dpdk-next-net/rel_16_07

Thanks,
/Bruce
  

Patch

diff --git a/drivers/net/thunderx/base/nicvf_hw.c b/drivers/net/thunderx/base/nicvf_hw.c
new file mode 100644
index 0000000..24fe77d
--- /dev/null
+++ b/drivers/net/thunderx/base/nicvf_hw.c
@@ -0,0 +1,908 @@ 
+/*
+ *   BSD LICENSE
+ *
+ *   Copyright (C) Cavium networks Ltd. 2016.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Cavium networks nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+#include <math.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+#include "nicvf_plat.h"
+
+struct nicvf_reg_info {
+	uint32_t offset;
+	const char *name;
+};
+
+#define NICVF_REG_INFO(reg) {reg, #reg}
+
+static const struct nicvf_reg_info nicvf_reg_tbl[] = {
+	NICVF_REG_INFO(NIC_VF_CFG),
+	NICVF_REG_INFO(NIC_VF_PF_MAILBOX_0_1),
+	NICVF_REG_INFO(NIC_VF_INT),
+	NICVF_REG_INFO(NIC_VF_INT_W1S),
+	NICVF_REG_INFO(NIC_VF_ENA_W1C),
+	NICVF_REG_INFO(NIC_VF_ENA_W1S),
+	NICVF_REG_INFO(NIC_VNIC_RSS_CFG),
+	NICVF_REG_INFO(NIC_VNIC_RQ_GEN_CFG),
+};
+
+static const struct nicvf_reg_info nicvf_multi_reg_tbl[] = {
+	{NIC_VNIC_RSS_KEY_0_4 + 0,  "NIC_VNIC_RSS_KEY_0"},
+	{NIC_VNIC_RSS_KEY_0_4 + 8,  "NIC_VNIC_RSS_KEY_1"},
+	{NIC_VNIC_RSS_KEY_0_4 + 16, "NIC_VNIC_RSS_KEY_2"},
+	{NIC_VNIC_RSS_KEY_0_4 + 24, "NIC_VNIC_RSS_KEY_3"},
+	{NIC_VNIC_RSS_KEY_0_4 + 32, "NIC_VNIC_RSS_KEY_4"},
+	{NIC_VNIC_TX_STAT_0_4 + 0,  "NIC_VNIC_STAT_TX_OCTS"},
+	{NIC_VNIC_TX_STAT_0_4 + 8,  "NIC_VNIC_STAT_TX_UCAST"},
+	{NIC_VNIC_TX_STAT_0_4 + 16,  "NIC_VNIC_STAT_TX_BCAST"},
+	{NIC_VNIC_TX_STAT_0_4 + 24,  "NIC_VNIC_STAT_TX_MCAST"},
+	{NIC_VNIC_TX_STAT_0_4 + 32,  "NIC_VNIC_STAT_TX_DROP"},
+	{NIC_VNIC_RX_STAT_0_13 + 0,  "NIC_VNIC_STAT_RX_OCTS"},
+	{NIC_VNIC_RX_STAT_0_13 + 8,  "NIC_VNIC_STAT_RX_UCAST"},
+	{NIC_VNIC_RX_STAT_0_13 + 16, "NIC_VNIC_STAT_RX_BCAST"},
+	{NIC_VNIC_RX_STAT_0_13 + 24, "NIC_VNIC_STAT_RX_MCAST"},
+	{NIC_VNIC_RX_STAT_0_13 + 32, "NIC_VNIC_STAT_RX_RED"},
+	{NIC_VNIC_RX_STAT_0_13 + 40, "NIC_VNIC_STAT_RX_RED_OCTS"},
+	{NIC_VNIC_RX_STAT_0_13 + 48, "NIC_VNIC_STAT_RX_ORUN"},
+	{NIC_VNIC_RX_STAT_0_13 + 56, "NIC_VNIC_STAT_RX_ORUN_OCTS"},
+	{NIC_VNIC_RX_STAT_0_13 + 64, "NIC_VNIC_STAT_RX_FCS"},
+	{NIC_VNIC_RX_STAT_0_13 + 72, "NIC_VNIC_STAT_RX_L2ERR"},
+	{NIC_VNIC_RX_STAT_0_13 + 80, "NIC_VNIC_STAT_RX_DRP_BCAST"},
+	{NIC_VNIC_RX_STAT_0_13 + 88, "NIC_VNIC_STAT_RX_DRP_MCAST"},
+	{NIC_VNIC_RX_STAT_0_13 + 96, "NIC_VNIC_STAT_RX_DRP_L3BCAST"},
+	{NIC_VNIC_RX_STAT_0_13 + 104, "NIC_VNIC_STAT_RX_DRP_L3MCAST"},
+};
+
+static const struct nicvf_reg_info nicvf_qset_cq_reg_tbl[] = {
+	NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG),
+	NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG2),
+	NICVF_REG_INFO(NIC_QSET_CQ_0_7_THRESH),
+	NICVF_REG_INFO(NIC_QSET_CQ_0_7_BASE),
+	NICVF_REG_INFO(NIC_QSET_CQ_0_7_HEAD),
+	NICVF_REG_INFO(NIC_QSET_CQ_0_7_TAIL),
+	NICVF_REG_INFO(NIC_QSET_CQ_0_7_DOOR),
+	NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS),
+	NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS2),
+	NICVF_REG_INFO(NIC_QSET_CQ_0_7_DEBUG),
+};
+
+static const struct nicvf_reg_info nicvf_qset_rq_reg_tbl[] = {
+	NICVF_REG_INFO(NIC_QSET_RQ_0_7_CFG),
+	NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS0),
+	NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS1),
+};
+
+static const struct nicvf_reg_info nicvf_qset_sq_reg_tbl[] = {
+	NICVF_REG_INFO(NIC_QSET_SQ_0_7_CFG),
+	NICVF_REG_INFO(NIC_QSET_SQ_0_7_THRESH),
+	NICVF_REG_INFO(NIC_QSET_SQ_0_7_BASE),
+	NICVF_REG_INFO(NIC_QSET_SQ_0_7_HEAD),
+	NICVF_REG_INFO(NIC_QSET_SQ_0_7_TAIL),
+	NICVF_REG_INFO(NIC_QSET_SQ_0_7_DOOR),
+	NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS),
+	NICVF_REG_INFO(NIC_QSET_SQ_0_7_DEBUG),
+	NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS0),
+	NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS1),
+};
+
+static const struct nicvf_reg_info nicvf_qset_rbdr_reg_tbl[] = {
+	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_CFG),
+	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_THRESH),
+	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_BASE),
+	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_HEAD),
+	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_TAIL),
+	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_DOOR),
+	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS0),
+	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS1),
+	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_PRFCH_STATUS),
+};
+
+int
+nicvf_base_init(struct nicvf *nic)
+{
+	nic->hwcap = 0;
+	if (nic->subsystem_device_id == 0)
+		return NICVF_ERR_BASE_INIT;
+
+	if (nicvf_hw_version(nic) == NICVF_PASS2)
+		nic->hwcap |= NICVF_CAP_TUNNEL_PARSING;
+
+	return NICVF_OK;
+}
+
+/* dump on stdout if data is NULL */
+int
+nicvf_reg_dump(struct nicvf *nic,  uint64_t *data)
+{
+	uint32_t i, q;
+	bool dump_stdout;
+
+	dump_stdout = data ? 0 : 1;
+
+	for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_reg_tbl); i++)
+		if (dump_stdout)
+			nicvf_log("%24s  = 0x%" PRIx64 "\n",
+				nicvf_reg_tbl[i].name,
+				nicvf_reg_read(nic, nicvf_reg_tbl[i].offset));
+		else
+			*data++ = nicvf_reg_read(nic, nicvf_reg_tbl[i].offset);
+
+	for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl); i++)
+		if (dump_stdout)
+			nicvf_log("%24s  = 0x%" PRIx64 "\n",
+				nicvf_multi_reg_tbl[i].name,
+				nicvf_reg_read(nic,
+					nicvf_multi_reg_tbl[i].offset));
+		else
+			*data++ = nicvf_reg_read(nic,
+					nicvf_multi_reg_tbl[i].offset);
+
+	for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++)
+		for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl); i++)
+			if (dump_stdout)
+				nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
+					nicvf_qset_cq_reg_tbl[i].name, q,
+					nicvf_queue_reg_read(nic,
+					nicvf_qset_cq_reg_tbl[i].offset, q));
+			else
+				*data++ = nicvf_queue_reg_read(nic,
+					nicvf_qset_cq_reg_tbl[i].offset, q);
+
+	for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++)
+		for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl); i++)
+			if (dump_stdout)
+				nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
+					nicvf_qset_rq_reg_tbl[i].name, q,
+					nicvf_queue_reg_read(nic,
+					nicvf_qset_rq_reg_tbl[i].offset, q));
+			else
+				*data++ = nicvf_queue_reg_read(nic,
+					nicvf_qset_rq_reg_tbl[i].offset, q);
+
+	for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++)
+		for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl); i++)
+			if (dump_stdout)
+				nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
+					nicvf_qset_sq_reg_tbl[i].name, q,
+					nicvf_queue_reg_read(nic,
+					nicvf_qset_sq_reg_tbl[i].offset, q));
+			else
+				*data++ = nicvf_queue_reg_read(nic,
+					nicvf_qset_sq_reg_tbl[i].offset, q);
+
+	for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++)
+		for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl); i++)
+			if (dump_stdout)
+				nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
+					nicvf_qset_rbdr_reg_tbl[i].name, q,
+					nicvf_queue_reg_read(nic,
+					nicvf_qset_rbdr_reg_tbl[i].offset, q));
+			else
+				*data++ = nicvf_queue_reg_read(nic,
+					nicvf_qset_rbdr_reg_tbl[i].offset, q);
+	return 0;
+}
+
+int
+nicvf_reg_get_count(void)
+{
+	int nr_regs;
+
+	nr_regs = NICVF_ARRAY_SIZE(nicvf_reg_tbl);
+	nr_regs += NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl);
+	nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl) *
+			MAX_CMP_QUEUES_PER_QS;
+	nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl) *
+			MAX_RCV_QUEUES_PER_QS;
+	nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl) *
+			MAX_SND_QUEUES_PER_QS;
+	nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl) *
+			MAX_RCV_BUF_DESC_RINGS_PER_QS;
+
+	return nr_regs;
+}
+
+static int
+nicvf_qset_config_internal(struct nicvf *nic, bool enable)
+{
+	int ret;
+	struct pf_qs_cfg pf_qs_cfg = {.value = 0};
+
+	pf_qs_cfg.ena = enable ? 1 : 0;
+	pf_qs_cfg.vnic = nic->vf_id;
+	ret = nicvf_mbox_qset_config(nic, &pf_qs_cfg);
+	return ret ? NICVF_ERR_SET_QS : 0;
+}
+
+/* Requests PF to assign and enable Qset */
+int
+nicvf_qset_config(struct nicvf *nic)
+{
+	/* Enable Qset */
+	return nicvf_qset_config_internal(nic, true);
+}
+
+int
+nicvf_qset_reclaim(struct nicvf *nic)
+{
+	/* Disable Qset */
+	return nicvf_qset_config_internal(nic, false);
+}
+
+static int
+cmpfunc(const void *a, const void *b)
+{
+	return (*(const uint32_t *)a - *(const uint32_t *)b);
+}
+
+static uint32_t
+nicvf_roundup_list(uint32_t val, uint32_t list[], uint32_t entries)
+{
+	uint32_t i;
+
+	qsort(list, entries, sizeof(uint32_t), cmpfunc);
+	for (i = 0; i < entries; i++)
+		if (val <= list[i])
+			break;
+	/* Not in the list */
+	if (i >= entries)
+		return 0;
+	else
+		return list[i];
+}
+
+static void
+nicvf_handle_qset_err_intr(struct nicvf *nic)
+{
+	uint16_t qidx;
+	uint64_t status;
+
+	nicvf_log("%s (VF%d)\n", __func__, nic->vf_id);
+	nicvf_reg_dump(nic, NULL);
+
+	for (qidx = 0; qidx < MAX_CMP_QUEUES_PER_QS; qidx++) {
+		status = nicvf_queue_reg_read(
+				nic, NIC_QSET_CQ_0_7_STATUS, qidx);
+		if (!(status & NICVF_CQ_ERR_MASK))
+			continue;
+
+		if (status & NICVF_CQ_WR_FULL)
+			nicvf_log("[%d]NICVF_CQ_WR_FULL\n", qidx);
+		if (status & NICVF_CQ_WR_DISABLE)
+			nicvf_log("[%d]NICVF_CQ_WR_DISABLE\n", qidx);
+		if (status & NICVF_CQ_WR_FAULT)
+			nicvf_log("[%d]NICVF_CQ_WR_FAULT\n", qidx);
+		nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_STATUS, qidx, 0);
+	}
+
+	for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
+		status = nicvf_queue_reg_read(
+				nic, NIC_QSET_SQ_0_7_STATUS, qidx);
+		if (!(status & NICVF_SQ_ERR_MASK))
+			continue;
+
+		if (status & NICVF_SQ_ERR_STOPPED)
+			nicvf_log("[%d]NICVF_SQ_ERR_STOPPED\n", qidx);
+		if (status & NICVF_SQ_ERR_SEND)
+			nicvf_log("[%d]NICVF_SQ_ERR_SEND\n", qidx);
+		if (status & NICVF_SQ_ERR_DPE)
+			nicvf_log("[%d]NICVF_SQ_ERR_DPE\n", qidx);
+		nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_STATUS, qidx, 0);
+	}
+
+	for (qidx = 0; qidx < MAX_RCV_BUF_DESC_RINGS_PER_QS; qidx++) {
+		status = nicvf_queue_reg_read(nic,
+					NIC_QSET_RBDR_0_1_STATUS0, qidx);
+		status &= NICVF_RBDR_FIFO_STATE_MASK;
+		status >>= NICVF_RBDR_FIFO_STATE_SHIFT;
+
+		if (status == RBDR_FIFO_STATE_FAIL)
+			nicvf_log("[%d]RBDR_FIFO_STATE_FAIL\n", qidx);
+		nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx, 0);
+	}
+
+	nicvf_disable_all_interrupts(nic);
+	abort();
+}
+
+/*
+ * Handle poll mode driver interested "mbox" and "queue-set error" interrupts.
+ * This function is not re-entrant.
+ * The caller should provide proper serialization.
+ */
+int
+nicvf_reg_poll_interrupts(struct nicvf *nic)
+{
+	int msg = 0;
+	uint64_t intr;
+
+	intr = nicvf_reg_read(nic, NIC_VF_INT);
+	if (intr & NICVF_INTR_MBOX_MASK) {
+		nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_MBOX_MASK);
+		msg = nicvf_handle_mbx_intr(nic);
+	}
+	if (intr & NICVF_INTR_QS_ERR_MASK) {
+		nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_QS_ERR_MASK);
+		nicvf_handle_qset_err_intr(nic);
+	}
+	return msg;
+}
+
+static int
+nicvf_qset_poll_reg(struct nicvf *nic, uint16_t qidx, uint32_t offset,
+		    uint32_t bit_pos, uint32_t bits, uint64_t val)
+{
+	uint64_t bit_mask;
+	uint64_t reg_val;
+	int timeout = 10;
+
+	bit_mask = (1ULL << bits) - 1;
+	bit_mask = (bit_mask << bit_pos);
+
+	while (timeout) {
+		reg_val = nicvf_queue_reg_read(nic, offset, qidx);
+		if (((reg_val & bit_mask) >> bit_pos) == val)
+			return NICVF_OK;
+		nicvf_delay_us(2000);
+		timeout--;
+	}
+	return NICVF_ERR_REG_POLL;
+}
+
+int
+nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx)
+{
+	uint64_t status;
+	int timeout = 10;
+	struct nicvf_rbdr *rbdr = nic->rbdr;
+
+	/* Save head and tail pointers for freeing up buffers */
+	if (rbdr) {
+		rbdr->head = nicvf_queue_reg_read(nic,
+					NIC_QSET_RBDR_0_1_HEAD,
+					qidx) >> 3;
+		rbdr->tail = nicvf_queue_reg_read(nic,
+					NIC_QSET_RBDR_0_1_TAIL,
+					qidx) >> 3;
+		rbdr->next_tail = rbdr->tail;
+	}
+
+	/* Reset RBDR */
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
+				NICVF_RBDR_RESET);
+
+	/* Disable RBDR */
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
+	if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0,
+				62, 2, 0x00))
+		return NICVF_ERR_RBDR_DISABLE;
+
+	while (1) {
+		status = nicvf_queue_reg_read(nic,
+				NIC_QSET_RBDR_0_1_PRFCH_STATUS,	qidx);
+		if ((status & 0xFFFFFFFF) == ((status >> 32) & 0xFFFFFFFF))
+			break;
+		nicvf_delay_us(2000);
+		timeout--;
+		if (!timeout)
+			return NICVF_ERR_RBDR_PREFETCH;
+	}
+
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
+			NICVF_RBDR_RESET);
+	if (nicvf_qset_poll_reg(nic, qidx,
+				NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
+		return NICVF_ERR_RBDR_RESET1;
+
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
+	if (nicvf_qset_poll_reg(nic, qidx,
+				NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
+		return NICVF_ERR_RBDR_RESET2;
+
+	return NICVF_OK;
+}
+
+static int
+nicvf_qsize_regbit(uint32_t len, uint32_t len_shift)
+{
+	int val;
+
+	val = ((uint32_t)log2(len) - len_shift);
+	assert(val >= 0);
+	assert(val <= 6);
+	return val;
+}
+
+int
+nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx)
+{
+	int ret;
+	uint64_t head, tail;
+	struct nicvf_rbdr *rbdr = nic->rbdr;
+	struct rbdr_cfg rbdr_cfg = {.value = 0};
+
+	ret = nicvf_qset_rbdr_reclaim(nic, qidx);
+	if (ret)
+		return ret;
+
+	/* Set descriptor base address */
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, rbdr->phys);
+
+	/* Enable RBDR  & set queue size */
+	rbdr_cfg.reserved_45_63 = 0,
+	rbdr_cfg.ena = 1;
+	rbdr_cfg.reset = 0;
+	rbdr_cfg.ldwb = 0;
+	rbdr_cfg.reserved_36_41 = 0;
+	rbdr_cfg.qsize = nicvf_qsize_regbit(rbdr->qlen_mask + 1,
+					RBDR_SIZE_SHIFT);
+	rbdr_cfg.reserved_25_31 = 0;
+	rbdr_cfg.avg_con = 0;
+	rbdr_cfg.reserved_12_15 = 0;
+	rbdr_cfg.lines = rbdr->buffsz / 128;
+
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, rbdr_cfg.value);
+
+	/* Verify proper RBDR reset */
+	head = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx);
+	tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx);
+
+	if (head | tail)
+		return NICVF_ERR_RBDR_RESET;
+
+	return NICVF_OK;
+}
+
+uint32_t
+nicvf_qsize_rbdr_roundup(uint32_t val)
+{
+	uint32_t list[] = {RBDR_QUEUE_SZ_8K, RBDR_QUEUE_SZ_16K,
+				RBDR_QUEUE_SZ_32K, RBDR_QUEUE_SZ_64K,
+				RBDR_QUEUE_SZ_128K, RBDR_QUEUE_SZ_256K,
+				RBDR_QUEUE_SZ_512K};
+	return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
+}
+
+int
+nicvf_qset_rbdr_precharge(struct nicvf *nic, uint16_t ridx,
+			  rbdr_pool_get_handler handler,
+			  void *opaque, uint32_t max_buffs)
+{
+	struct rbdr_entry_t *desc, *desc0;
+	struct nicvf_rbdr *rbdr = nic->rbdr;
+	uint32_t count;
+	nicvf_phys_addr_t phy;
+
+	assert(rbdr != NULL);
+	desc = rbdr->desc;
+	count = 0;
+	/* Don't fill beyond max numbers of desc */
+	while (count < (rbdr->qlen_mask)) {
+		if (count >= max_buffs)
+			break;
+		desc0 = desc + count;
+		phy = handler(opaque);
+		if (phy) {
+			desc0->full_addr = phy;
+			count++;
+		} else {
+			break;
+		}
+	}
+	nicvf_smp_wmb();
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, ridx, count);
+	rbdr->tail = nicvf_queue_reg_read(nic,
+				NIC_QSET_RBDR_0_1_TAIL, ridx) >> 3;
+	rbdr->next_tail = rbdr->tail;
+	nicvf_smp_rmb();
+	return 0;
+}
+
+int nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx)
+{
+	return nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
+}
+
+int
+nicvf_qset_sq_reclaim(struct nicvf *nic, uint16_t qidx)
+{
+	uint64_t head, tail;
+	struct sq_cfg sq_cfg;
+
+	sq_cfg.value = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
+
+	/* Disable send queue */
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
+
+	/* Check if SQ is stopped */
+	if (sq_cfg.ena && nicvf_qset_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS,
+				NICVF_SQ_STATUS_STOPPED_BIT, 1, 0x01))
+		return NICVF_ERR_SQ_DISABLE;
+
+	/* Reset send queue */
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
+	head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
+	tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
+	if (head | tail)
+		return  NICVF_ERR_SQ_RESET;
+
+	return 0;
+}
+
+int
+nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_txq *txq)
+{
+	int ret;
+	struct sq_cfg sq_cfg = {.value = 0};
+
+	ret = nicvf_qset_sq_reclaim(nic, qidx);
+	if (ret)
+		return ret;
+
+	/* Send a mailbox msg to PF to config SQ */
+	if (nicvf_mbox_sq_config(nic, qidx))
+		return  NICVF_ERR_SQ_PF_CFG;
+
+	/* Set queue base address */
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, txq->phys);
+
+	/* Enable send queue  & set queue size */
+	sq_cfg.ena = 1;
+	sq_cfg.reset = 0;
+	sq_cfg.ldwb = 0;
+	sq_cfg.qsize = nicvf_qsize_regbit(txq->qlen_mask + 1, SND_QSIZE_SHIFT);
+	sq_cfg.tstmp_bgx_intf = 0;
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg.value);
+
+	/* Ring doorbell so that H/W restarts processing SQEs */
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
+
+	return 0;
+}
+
+uint32_t
+nicvf_qsize_sq_roundup(uint32_t val)
+{
+	uint32_t list[] = {SND_QUEUE_SZ_1K, SND_QUEUE_SZ_2K,
+				SND_QUEUE_SZ_4K, SND_QUEUE_SZ_8K,
+				SND_QUEUE_SZ_16K, SND_QUEUE_SZ_32K,
+				SND_QUEUE_SZ_64K};
+	return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
+}
+
+int
+nicvf_qset_rq_reclaim(struct nicvf *nic, uint16_t qidx)
+{
+	/* Disable receive queue */
+	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
+	return nicvf_mbox_rq_sync(nic);
+}
+
+int
+nicvf_qset_rq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
+{
+	struct pf_rq_cfg pf_rq_cfg = {.value = 0};
+	struct rq_cfg rq_cfg = {.value = 0};
+
+	if (nicvf_qset_rq_reclaim(nic, qidx))
+		return NICVF_ERR_RQ_CLAIM;
+
+	pf_rq_cfg.strip_pre_l2 = 0;
+	/* First cache line of RBDR data will be allocated into L2C */
+	pf_rq_cfg.caching = RQ_CACHE_ALLOC_FIRST;
+	pf_rq_cfg.cq_qs = nic->vf_id;
+	pf_rq_cfg.cq_idx = qidx;
+	pf_rq_cfg.rbdr_cont_qs = nic->vf_id;
+	pf_rq_cfg.rbdr_cont_idx = 0;
+	pf_rq_cfg.rbdr_strt_qs = nic->vf_id;
+	pf_rq_cfg.rbdr_strt_idx = 0;
+
+	/* Send a mailbox msg to PF to config RQ */
+	if (nicvf_mbox_rq_config(nic, qidx, &pf_rq_cfg))
+		return NICVF_ERR_RQ_PF_CFG;
+
+	/* Select Rx backpressure */
+	if (nicvf_mbox_rq_bp_config(nic, qidx, rxq->rx_drop_en))
+		return NICVF_ERR_RQ_BP_CFG;
+
+	/* Send a mailbox msg to PF to config RQ drop */
+	if (nicvf_mbox_rq_drop_config(nic, qidx, rxq->rx_drop_en))
+		return NICVF_ERR_RQ_DROP_CFG;
+
+	/* Enable Receive queue */
+	rq_cfg.ena = 1;
+	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, rq_cfg.value);
+
+	return 0;
+}
+
+int
+nicvf_qset_cq_reclaim(struct nicvf *nic, uint16_t qidx)
+{
+	uint64_t tail, head;
+
+	/* Disable completion queue */
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
+	if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_CQ_0_7_CFG, 42, 1, 0))
+		return NICVF_ERR_CQ_DISABLE;
+
+	/* Reset completion queue */
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
+	tail = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, qidx) >> 9;
+	head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, qidx) >> 9;
+	if (head | tail)
+		return  NICVF_ERR_CQ_RESET;
+
+	/* Disable timer threshold (doesn't get reset upon CQ reset) */
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
+	return 0;
+}
+
+int
+nicvf_qset_cq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
+{
+	int ret;
+	struct cq_cfg cq_cfg = {.value = 0};
+
+	ret = nicvf_qset_cq_reclaim(nic, qidx);
+	if (ret)
+		return ret;
+
+	/* Set completion queue base address */
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, rxq->phys);
+
+	cq_cfg.ena = 1;
+	cq_cfg.reset = 0;
+	/* Writes of CQE will be allocated into L2C */
+	cq_cfg.caching = 1;
+	cq_cfg.qsize = nicvf_qsize_regbit(rxq->qlen_mask + 1, CMP_QSIZE_SHIFT);
+	cq_cfg.avg_con = 0;
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, cq_cfg.value);
+
+	/* Set threshold value for interrupt generation */
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, 0);
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
+	return 0;
+}
+
+uint32_t
+nicvf_qsize_cq_roundup(uint32_t val)
+{
+	uint32_t list[] = {CMP_QUEUE_SZ_1K, CMP_QUEUE_SZ_2K,
+				CMP_QUEUE_SZ_4K, CMP_QUEUE_SZ_8K,
+				CMP_QUEUE_SZ_16K, CMP_QUEUE_SZ_32K,
+				CMP_QUEUE_SZ_64K};
+	return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
+}
+
+
+void
+nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
+{
+	uint64_t val;
+
+	val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
+	if (enable)
+		val |= (STRIP_FIRST_VLAN << 25);
+	else
+		val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25);
+
+	nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
+}
+
+void
+nicvf_rss_set_key(struct nicvf *nic, uint8_t *key)
+{
+	int idx;
+	uint64_t addr, val;
+	uint64_t *keyptr = (uint64_t *)key;
+
+	addr = NIC_VNIC_RSS_KEY_0_4;
+	for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
+		val = nicvf_cpu_to_be_64(*keyptr);
+		nicvf_reg_write(nic, addr, val);
+		addr += sizeof(uint64_t);
+		keyptr++;
+	}
+}
+
+void
+nicvf_rss_get_key(struct nicvf *nic, uint8_t *key)
+{
+	int idx;
+	uint64_t addr, val;
+	uint64_t *keyptr = (uint64_t *)key;
+
+	addr = NIC_VNIC_RSS_KEY_0_4;
+	for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
+		val = nicvf_reg_read(nic, addr);
+		*keyptr = nicvf_be_to_cpu_64(val);
+		addr += sizeof(uint64_t);
+		keyptr++;
+	}
+}
+
+void
+nicvf_rss_set_cfg(struct nicvf *nic, uint64_t val)
+{
+	nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, val);
+}
+
+uint64_t
+nicvf_rss_get_cfg(struct nicvf *nic)
+{
+	return nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+}
+
+int
+nicvf_rss_reta_update(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
+{
+	uint32_t idx;
+	struct nicvf_rss_reta_info *rss = &nic->rss_info;
+
+	/* result will be stored in nic->rss_info.rss_size */
+	if (nicvf_mbox_get_rss_size(nic))
+		return NICVF_ERR_RSS_GET_SZ;
+
+	assert(rss->rss_size > 0);
+	rss->hash_bits = (uint8_t)log2(rss->rss_size);
+	for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
+		rss->ind_tbl[idx] = tbl[idx];
+
+	if (nicvf_mbox_config_rss(nic))
+		return NICVF_ERR_RSS_TBL_UPDATE;
+
+	return NICVF_OK;
+}
+
+int
+nicvf_rss_reta_query(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
+{
+	uint32_t idx;
+	struct nicvf_rss_reta_info *rss = &nic->rss_info;
+
+	/* result will be stored in nic->rss_info.rss_size */
+	if (nicvf_mbox_get_rss_size(nic))
+		return NICVF_ERR_RSS_GET_SZ;
+
+	assert(rss->rss_size > 0);
+	rss->hash_bits = (uint8_t)log2(rss->rss_size);
+	for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
+		tbl[idx] = rss->ind_tbl[idx];
+
+	return NICVF_OK;
+}
+
+int
+nicvf_rss_config(struct nicvf *nic, uint32_t  qcnt, uint64_t cfg)
+{
+	uint32_t idx;
+	uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
+	uint8_t default_key[RSS_HASH_KEY_BYTE_SIZE] = {
+		0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
+		0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
+		0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
+		0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
+		0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD
+	};
+
+	if (nic->cpi_alg != CPI_ALG_NONE)
+		return -EINVAL;
+
+	if (cfg == 0)
+		return -EINVAL;
+
+	/* Update default RSS key and cfg */
+	nicvf_rss_set_key(nic, default_key);
+	nicvf_rss_set_cfg(nic, cfg);
+
+	/* Update default RSS RETA */
+	for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
+		default_reta[idx] = idx % qcnt;
+
+	return nicvf_rss_reta_update(nic, default_reta,
+				NIC_MAX_RSS_IDR_TBL_SIZE);
+}
+
+int
+nicvf_rss_term(struct nicvf *nic)
+{
+	uint32_t idx;
+	uint8_t disable_rss[NIC_MAX_RSS_IDR_TBL_SIZE];
+
+	nicvf_rss_set_cfg(nic, 0);
+	/* Redirect the output to 0th queue  */
+	for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
+		disable_rss[idx] = 0;
+
+	return nicvf_rss_reta_update(nic, disable_rss,
+				NIC_MAX_RSS_IDR_TBL_SIZE);
+}
+
+int
+nicvf_loopback_config(struct nicvf *nic, bool enable)
+{
+	if (enable && nic->loopback_supported == 0)
+		return NICVF_ERR_LOOPBACK_CFG;
+
+	return nicvf_mbox_loopback_config(nic, enable);
+}
+
+void
+nicvf_hw_get_stats(struct nicvf *nic, struct nicvf_hw_stats *stats)
+{
+	stats->rx_bytes = NICVF_GET_RX_STATS(RX_OCTS);
+	stats->rx_ucast_frames = NICVF_GET_RX_STATS(RX_UCAST);
+	stats->rx_bcast_frames = NICVF_GET_RX_STATS(RX_BCAST);
+	stats->rx_mcast_frames = NICVF_GET_RX_STATS(RX_MCAST);
+	stats->rx_fcs_errors = NICVF_GET_RX_STATS(RX_FCS);
+	stats->rx_l2_errors = NICVF_GET_RX_STATS(RX_L2ERR);
+	stats->rx_drop_red = NICVF_GET_RX_STATS(RX_RED);
+	stats->rx_drop_red_bytes = NICVF_GET_RX_STATS(RX_RED_OCTS);
+	stats->rx_drop_overrun = NICVF_GET_RX_STATS(RX_ORUN);
+	stats->rx_drop_overrun_bytes = NICVF_GET_RX_STATS(RX_ORUN_OCTS);
+	stats->rx_drop_bcast = NICVF_GET_RX_STATS(RX_DRP_BCAST);
+	stats->rx_drop_mcast = NICVF_GET_RX_STATS(RX_DRP_MCAST);
+	stats->rx_drop_l3_bcast = NICVF_GET_RX_STATS(RX_DRP_L3BCAST);
+	stats->rx_drop_l3_mcast = NICVF_GET_RX_STATS(RX_DRP_L3MCAST);
+
+	stats->tx_bytes_ok = NICVF_GET_TX_STATS(TX_OCTS);
+	stats->tx_ucast_frames_ok = NICVF_GET_TX_STATS(TX_UCAST);
+	stats->tx_bcast_frames_ok = NICVF_GET_TX_STATS(TX_BCAST);
+	stats->tx_mcast_frames_ok = NICVF_GET_TX_STATS(TX_MCAST);
+	stats->tx_drops = NICVF_GET_TX_STATS(TX_DROP);
+}
+
+void
+nicvf_hw_get_rx_qstats(struct nicvf *nic, struct nicvf_hw_rx_qstats *qstats,
+		       uint16_t qidx)
+{
+	qstats->q_rx_bytes =
+		nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS0, qidx);
+	qstats->q_rx_packets =
+		nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS1, qidx);
+}
+
+void
+nicvf_hw_get_tx_qstats(struct nicvf *nic, struct nicvf_hw_tx_qstats *qstats,
+		       uint16_t qidx)
+{
+	qstats->q_tx_bytes =
+		nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS0, qidx);
+	qstats->q_tx_packets =
+		nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS1, qidx);
+}
diff --git a/drivers/net/thunderx/base/nicvf_hw.h b/drivers/net/thunderx/base/nicvf_hw.h
new file mode 100644
index 0000000..32357cc
--- /dev/null
+++ b/drivers/net/thunderx/base/nicvf_hw.h
@@ -0,0 +1,240 @@ 
+/*
+ *   BSD LICENSE
+ *
+ *   Copyright (C) Cavium networks Ltd. 2016.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Cavium networks nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _THUNDERX_NICVF_HW_H
+#define _THUNDERX_NICVF_HW_H
+
+#include <stdint.h>
+
+#include "nicvf_hw_defs.h"
+
+#define	PCI_VENDOR_ID_CAVIUM			0x177D
+#define	PCI_DEVICE_ID_THUNDERX_PASS1_NICVF	0x0011
+#define	PCI_DEVICE_ID_THUNDERX_PASS2_NICVF	0xA034
+#define	PCI_SUB_DEVICE_ID_THUNDERX_PASS1_NICVF	0xA11E
+#define	PCI_SUB_DEVICE_ID_THUNDERX_PASS2_NICVF	0xA134
+
+#define NICVF_ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+#define NICVF_GET_RX_STATS(reg) \
+	nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
+#define NICVF_GET_TX_STATS(reg) \
+	nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
+
+#define NICVF_PASS1	(PCI_SUB_DEVICE_ID_THUNDERX_PASS1_NICVF)
+#define NICVF_PASS2	(PCI_SUB_DEVICE_ID_THUNDERX_PASS2_NICVF)
+
+#define NICVF_CAP_TUNNEL_PARSING          (1ULL << 0)
+
+enum nicvf_tns_mode {
+	NIC_TNS_BYPASS_MODE = 0,
+	NIC_TNS_MODE,
+};
+
+enum nicvf_err_e {
+	NICVF_OK = 0,
+	NICVF_ERR_SET_QS = -8191,/* -8191 */
+	NICVF_ERR_RESET_QS,      /* -8190 */
+	NICVF_ERR_REG_POLL,      /* -8189 */
+	NICVF_ERR_RBDR_RESET,    /* -8188 */
+	NICVF_ERR_RBDR_DISABLE,  /* -8187 */
+	NICVF_ERR_RBDR_PREFETCH, /* -8186 */
+	NICVF_ERR_RBDR_RESET1,   /* -8185 */
+	NICVF_ERR_RBDR_RESET2,   /* -8184 */
+	NICVF_ERR_RQ_CLAIM,      /* -8183 */
+	NICVF_ERR_RQ_PF_CFG,	 /* -8182 */
+	NICVF_ERR_RQ_BP_CFG,	 /* -8181 */
+	NICVF_ERR_RQ_DROP_CFG,	 /* -8180 */
+	NICVF_ERR_CQ_DISABLE,	 /* -8179 */
+	NICVF_ERR_CQ_RESET,	 /* -8178 */
+	NICVF_ERR_SQ_DISABLE,	 /* -8177 */
+	NICVF_ERR_SQ_RESET,	 /* -8176 */
+	NICVF_ERR_SQ_PF_CFG,	 /* -8175 */
+	NICVF_ERR_RSS_TBL_UPDATE,/* -8174 */
+	NICVF_ERR_RSS_GET_SZ,    /* -8173 */
+	NICVF_ERR_BASE_INIT,     /* -8172 */
+	NICVF_ERR_LOOPBACK_CFG,  /* -8171 */
+};
+
+typedef nicvf_phys_addr_t (*rbdr_pool_get_handler)(void *opaque);
+
+struct nicvf_hw_rx_qstats {
+	uint64_t q_rx_bytes;
+	uint64_t q_rx_packets;
+};
+
+struct nicvf_hw_tx_qstats {
+	uint64_t q_tx_bytes;
+	uint64_t q_tx_packets;
+};
+
+struct nicvf_hw_stats {
+	uint64_t rx_bytes;
+	uint64_t rx_ucast_frames;
+	uint64_t rx_bcast_frames;
+	uint64_t rx_mcast_frames;
+	uint64_t rx_fcs_errors;
+	uint64_t rx_l2_errors;
+	uint64_t rx_drop_red;
+	uint64_t rx_drop_red_bytes;
+	uint64_t rx_drop_overrun;
+	uint64_t rx_drop_overrun_bytes;
+	uint64_t rx_drop_bcast;
+	uint64_t rx_drop_mcast;
+	uint64_t rx_drop_l3_bcast;
+	uint64_t rx_drop_l3_mcast;
+
+	uint64_t tx_bytes_ok;
+	uint64_t tx_ucast_frames_ok;
+	uint64_t tx_bcast_frames_ok;
+	uint64_t tx_mcast_frames_ok;
+	uint64_t tx_drops;
+};
+
+struct nicvf_rss_reta_info {
+	uint8_t hash_bits;
+	uint16_t rss_size;
+	uint8_t ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
+};
+
+/* Common structs used in DPDK and base layer are defined in DPDK layer */
+#include "../nicvf_struct.h"
+
+NICVF_STATIC_ASSERT(sizeof(struct nicvf_rbdr) <= 128);
+NICVF_STATIC_ASSERT(sizeof(struct nicvf_txq) <= 128);
+NICVF_STATIC_ASSERT(sizeof(struct nicvf_rxq) <= 128);
+
+static inline void
+nicvf_reg_write(struct nicvf *nic, uint32_t offset, uint64_t val)
+{
+	nicvf_addr_write(nic->reg_base + offset, val);
+}
+
+static inline uint64_t
+nicvf_reg_read(struct nicvf *nic, uint32_t offset)
+{
+	return nicvf_addr_read(nic->reg_base + offset);
+}
+
+static inline uintptr_t
+nicvf_qset_base(struct nicvf *nic, uint32_t qidx)
+{
+	return nic->reg_base + (qidx << NIC_Q_NUM_SHIFT);
+}
+
+static inline void
+nicvf_queue_reg_write(struct nicvf *nic, uint32_t offset, uint32_t qidx,
+		      uint64_t val)
+{
+	nicvf_addr_write(nicvf_qset_base(nic, qidx) + offset, val);
+}
+
+static inline uint64_t
+nicvf_queue_reg_read(struct nicvf *nic, uint32_t offset, uint32_t qidx)
+{
+	return	nicvf_addr_read(nicvf_qset_base(nic, qidx) + offset);
+}
+
+static inline void
+nicvf_disable_all_interrupts(struct nicvf *nic)
+{
+	nicvf_reg_write(nic, NIC_VF_ENA_W1C, NICVF_INTR_ALL_MASK);
+	nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_ALL_MASK);
+}
+
+static inline uint32_t
+nicvf_hw_version(struct nicvf *nic)
+{
+	return nic->subsystem_device_id;
+}
+
+static inline uint64_t
+nicvf_hw_cap(struct nicvf *nic)
+{
+	return nic->hwcap;
+}
+
+int nicvf_base_init(struct nicvf *nic);
+
+int nicvf_reg_get_count(void);
+int nicvf_reg_poll_interrupts(struct nicvf *nic);
+int nicvf_reg_dump(struct nicvf *nic, uint64_t *data);
+
+int nicvf_qset_config(struct nicvf *nic);
+int nicvf_qset_reclaim(struct nicvf *nic);
+
+int nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx);
+int nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx);
+int nicvf_qset_rbdr_precharge(struct nicvf *nic, uint16_t ridx,
+			      rbdr_pool_get_handler handler, void *opaque,
+			      uint32_t max_buffs);
+int nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx);
+
+int nicvf_qset_rq_config(struct nicvf *nic, uint16_t qidx,
+			 struct nicvf_rxq *rxq);
+int nicvf_qset_rq_reclaim(struct nicvf *nic, uint16_t qidx);
+
+int nicvf_qset_cq_config(struct nicvf *nic, uint16_t qidx,
+			 struct nicvf_rxq *rxq);
+int nicvf_qset_cq_reclaim(struct nicvf *nic, uint16_t qidx);
+
+int nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx,
+			 struct nicvf_txq *txq);
+int nicvf_qset_sq_reclaim(struct nicvf *nic, uint16_t qidx);
+
+uint32_t nicvf_qsize_rbdr_roundup(uint32_t val);
+uint32_t nicvf_qsize_cq_roundup(uint32_t val);
+uint32_t nicvf_qsize_sq_roundup(uint32_t val);
+
+void nicvf_vlan_hw_strip(struct nicvf *nic, bool enable);
+
+int nicvf_rss_config(struct nicvf *nic, uint32_t  qcnt, uint64_t cfg);
+int nicvf_rss_term(struct nicvf *nic);
+
+int nicvf_rss_reta_update(struct nicvf *nic, uint8_t *tbl, uint32_t max_count);
+int nicvf_rss_reta_query(struct nicvf *nic, uint8_t *tbl, uint32_t max_count);
+
+void nicvf_rss_set_key(struct nicvf *nic, uint8_t *key);
+void nicvf_rss_get_key(struct nicvf *nic, uint8_t *key);
+
+void nicvf_rss_set_cfg(struct nicvf *nic, uint64_t val);
+uint64_t nicvf_rss_get_cfg(struct nicvf *nic);
+
+int nicvf_loopback_config(struct nicvf *nic, bool enable);
+
+void nicvf_hw_get_stats(struct nicvf *nic, struct nicvf_hw_stats *stats);
+void nicvf_hw_get_rx_qstats(struct nicvf *nic,
+			    struct nicvf_hw_rx_qstats *qstats, uint16_t qidx);
+void nicvf_hw_get_tx_qstats(struct nicvf *nic,
+			    struct nicvf_hw_tx_qstats *qstats, uint16_t qidx);
+
+#endif /* _THUNDERX_NICVF_HW_H */
diff --git a/drivers/net/thunderx/base/nicvf_hw_defs.h b/drivers/net/thunderx/base/nicvf_hw_defs.h
new file mode 100644
index 0000000..ef9354b
--- /dev/null
+++ b/drivers/net/thunderx/base/nicvf_hw_defs.h
@@ -0,0 +1,1216 @@ 
+/*
+ *   BSD LICENSE
+ *
+ *   Copyright (C) Cavium networks Ltd. 2016.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Cavium networks nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _THUNDERX_NICVF_HW_DEFS_H
+#define _THUNDERX_NICVF_HW_DEFS_H
+
+#include <stdint.h>
+#include <stdbool.h>
+
+/* Virtual function register offsets */
+
+#define NIC_VF_CFG                      (0x000020)
+#define NIC_VF_PF_MAILBOX_0_1           (0x000130)
+#define NIC_VF_INT                      (0x000200)
+#define NIC_VF_INT_W1S                  (0x000220)
+#define NIC_VF_ENA_W1C                  (0x000240)
+#define NIC_VF_ENA_W1S                  (0x000260)
+
+#define NIC_VNIC_RSS_CFG                (0x0020E0)
+#define NIC_VNIC_RSS_KEY_0_4            (0x002200)
+#define NIC_VNIC_TX_STAT_0_4            (0x004000)
+#define NIC_VNIC_RX_STAT_0_13           (0x004100)
+#define NIC_VNIC_RQ_GEN_CFG             (0x010010)
+
+#define NIC_QSET_CQ_0_7_CFG             (0x010400)
+#define NIC_QSET_CQ_0_7_CFG2            (0x010408)
+#define NIC_QSET_CQ_0_7_THRESH          (0x010410)
+#define NIC_QSET_CQ_0_7_BASE            (0x010420)
+#define NIC_QSET_CQ_0_7_HEAD            (0x010428)
+#define NIC_QSET_CQ_0_7_TAIL            (0x010430)
+#define NIC_QSET_CQ_0_7_DOOR            (0x010438)
+#define NIC_QSET_CQ_0_7_STATUS          (0x010440)
+#define NIC_QSET_CQ_0_7_STATUS2         (0x010448)
+#define NIC_QSET_CQ_0_7_DEBUG           (0x010450)
+
+#define NIC_QSET_RQ_0_7_CFG             (0x010600)
+#define NIC_QSET_RQ_0_7_STATUS0         (0x010700)
+#define NIC_QSET_RQ_0_7_STATUS1         (0x010708)
+
+#define NIC_QSET_SQ_0_7_CFG             (0x010800)
+#define NIC_QSET_SQ_0_7_THRESH          (0x010810)
+#define NIC_QSET_SQ_0_7_BASE            (0x010820)
+#define NIC_QSET_SQ_0_7_HEAD            (0x010828)
+#define NIC_QSET_SQ_0_7_TAIL            (0x010830)
+#define NIC_QSET_SQ_0_7_DOOR            (0x010838)
+#define NIC_QSET_SQ_0_7_STATUS          (0x010840)
+#define NIC_QSET_SQ_0_7_DEBUG           (0x010848)
+#define NIC_QSET_SQ_0_7_STATUS0         (0x010900)
+#define NIC_QSET_SQ_0_7_STATUS1         (0x010908)
+
+#define NIC_QSET_RBDR_0_1_CFG           (0x010C00)
+#define NIC_QSET_RBDR_0_1_THRESH        (0x010C10)
+#define NIC_QSET_RBDR_0_1_BASE          (0x010C20)
+#define NIC_QSET_RBDR_0_1_HEAD          (0x010C28)
+#define NIC_QSET_RBDR_0_1_TAIL          (0x010C30)
+#define NIC_QSET_RBDR_0_1_DOOR          (0x010C38)
+#define NIC_QSET_RBDR_0_1_STATUS0       (0x010C40)
+#define NIC_QSET_RBDR_0_1_STATUS1       (0x010C48)
+#define NIC_QSET_RBDR_0_1_PRFCH_STATUS  (0x010C50)
+
+/* vNIC HW Constants */
+
+#define NIC_Q_NUM_SHIFT                 18
+
+#define MAX_QUEUE_SET                   128
+#define MAX_RCV_QUEUES_PER_QS           8
+#define MAX_RCV_BUF_DESC_RINGS_PER_QS   2
+#define MAX_SND_QUEUES_PER_QS           8
+#define MAX_CMP_QUEUES_PER_QS           8
+
+#define NICVF_INTR_CQ_SHIFT             0
+#define NICVF_INTR_SQ_SHIFT             8
+#define NICVF_INTR_RBDR_SHIFT           16
+#define NICVF_INTR_PKT_DROP_SHIFT       20
+#define NICVF_INTR_TCP_TIMER_SHIFT      21
+#define NICVF_INTR_MBOX_SHIFT           22
+#define NICVF_INTR_QS_ERR_SHIFT         23
+
+#define NICVF_INTR_CQ_MASK              (0xFF << NICVF_INTR_CQ_SHIFT)
+#define NICVF_INTR_SQ_MASK              (0xFF << NICVF_INTR_SQ_SHIFT)
+#define NICVF_INTR_RBDR_MASK            (0x03 << NICVF_INTR_RBDR_SHIFT)
+#define NICVF_INTR_PKT_DROP_MASK        (1 << NICVF_INTR_PKT_DROP_SHIFT)
+#define NICVF_INTR_TCP_TIMER_MASK       (1 << NICVF_INTR_TCP_TIMER_SHIFT)
+#define NICVF_INTR_MBOX_MASK            (1 << NICVF_INTR_MBOX_SHIFT)
+#define NICVF_INTR_QS_ERR_MASK          (1 << NICVF_INTR_QS_ERR_SHIFT)
+#define NICVF_INTR_ALL_MASK             (0x7FFFFF)
+
+#define NICVF_CQ_WR_FULL                (1ULL << 26)
+#define NICVF_CQ_WR_DISABLE             (1ULL << 25)
+#define NICVF_CQ_WR_FAULT               (1ULL << 24)
+#define NICVF_CQ_ERR_MASK               (NICVF_CQ_WR_FULL |\
+					 NICVF_CQ_WR_DISABLE |\
+					 NICVF_CQ_WR_FAULT)
+#define NICVF_CQ_CQE_COUNT_MASK         (0xFFFF)
+
+#define NICVF_SQ_ERR_STOPPED            (1ULL << 21)
+#define NICVF_SQ_ERR_SEND               (1ULL << 20)
+#define NICVF_SQ_ERR_DPE                (1ULL << 19)
+#define NICVF_SQ_ERR_MASK               (NICVF_SQ_ERR_STOPPED |\
+					 NICVF_SQ_ERR_SEND |\
+					 NICVF_SQ_ERR_DPE)
+#define NICVF_SQ_STATUS_STOPPED_BIT     (21)
+
+#define NICVF_RBDR_FIFO_STATE_SHIFT     (62)
+#define NICVF_RBDR_FIFO_STATE_MASK      (3ULL << NICVF_RBDR_FIFO_STATE_SHIFT)
+#define NICVF_RBDR_COUNT_MASK           (0x7FFFF)
+
+/* Queue reset */
+#define NICVF_CQ_RESET                  (1ULL << 41)
+#define NICVF_SQ_RESET                  (1ULL << 17)
+#define NICVF_RBDR_RESET                (1ULL << 43)
+
+/* RSS constants */
+#define NIC_MAX_RSS_HASH_BITS           (8)
+#define NIC_MAX_RSS_IDR_TBL_SIZE        (1 << NIC_MAX_RSS_HASH_BITS)
+#define RSS_HASH_KEY_SIZE               (5) /* 320 bit key */
+#define RSS_HASH_KEY_BYTE_SIZE          (40) /* 320 bit key */
+
+#define RSS_L2_EXTENDED_HASH_ENA        (1 << 0)
+#define RSS_IP_ENA                      (1 << 1)
+#define RSS_TCP_ENA                     (1 << 2)
+#define RSS_TCP_SYN_ENA                 (1 << 3)
+#define RSS_UDP_ENA                     (1 << 4)
+#define RSS_L4_EXTENDED_ENA             (1 << 5)
+#define RSS_L3_BI_DIRECTION_ENA         (1 << 7)
+#define RSS_L4_BI_DIRECTION_ENA         (1 << 8)
+#define RSS_TUN_VXLAN_ENA               (1 << 9)
+#define RSS_TUN_GENEVE_ENA              (1 << 10)
+#define RSS_TUN_NVGRE_ENA               (1 << 11)
+
+#define RBDR_QUEUE_SZ_8K                (8 * 1024)
+#define RBDR_QUEUE_SZ_16K               (16 * 1024)
+#define RBDR_QUEUE_SZ_32K               (32 * 1024)
+#define RBDR_QUEUE_SZ_64K               (64 * 1024)
+#define RBDR_QUEUE_SZ_128K              (128 * 1024)
+#define RBDR_QUEUE_SZ_256K              (256 * 1024)
+#define RBDR_QUEUE_SZ_512K              (512 * 1024)
+
+#define RBDR_SIZE_SHIFT                 (13) /* 8k */
+
+#define SND_QUEUE_SZ_1K                 (1 * 1024)
+#define SND_QUEUE_SZ_2K                 (2 * 1024)
+#define SND_QUEUE_SZ_4K                 (4 * 1024)
+#define SND_QUEUE_SZ_8K                 (8 * 1024)
+#define SND_QUEUE_SZ_16K                (16 * 1024)
+#define SND_QUEUE_SZ_32K                (32 * 1024)
+#define SND_QUEUE_SZ_64K                (64 * 1024)
+
+#define SND_QSIZE_SHIFT                 (10) /* 1k */
+
+#define CMP_QUEUE_SZ_1K                 (1 * 1024)
+#define CMP_QUEUE_SZ_2K                 (2 * 1024)
+#define CMP_QUEUE_SZ_4K                 (4 * 1024)
+#define CMP_QUEUE_SZ_8K                 (8 * 1024)
+#define CMP_QUEUE_SZ_16K                (16 * 1024)
+#define CMP_QUEUE_SZ_32K                (32 * 1024)
+#define CMP_QUEUE_SZ_64K                (64 * 1024)
+
+#define CMP_QSIZE_SHIFT                 (10) /* 1k */
+
+/* Min/Max packet size */
+#define NIC_HW_MIN_FRS			64
+#define NIC_HW_MAX_FRS			9200 /* 9216 max packet including FCS */
+#define NIC_HW_MAX_SEGS			12
+
+/* Descriptor alignments */
+#define NICVF_RBDR_BASE_ALIGN_BYTES	128 /* 7 bits */
+#define NICVF_CQ_BASE_ALIGN_BYTES	512 /* 9 bits */
+#define NICVF_SQ_BASE_ALIGN_BYTES	128 /* 7 bits */
+
+/* vNIC HW Enumerations */
+
+enum nic_send_ld_type_e {
+	NIC_SEND_LD_TYPE_E_LDD = 0x0,
+	NIC_SEND_LD_TYPE_E_LDT = 0x1,
+	NIC_SEND_LD_TYPE_E_LDWB = 0x2,
+	NIC_SEND_LD_TYPE_E_ENUM_LAST = 0x3,
+};
+
+enum ether_type_algorithm {
+	ETYPE_ALG_NONE = 0x0,
+	ETYPE_ALG_SKIP = 0x1,
+	ETYPE_ALG_ENDPARSE = 0x2,
+	ETYPE_ALG_VLAN = 0x3,
+	ETYPE_ALG_VLAN_STRIP = 0x4,
+};
+
+enum layer3_type {
+	L3TYPE_NONE = 0x0,
+	L3TYPE_GRH = 0x1,
+	L3TYPE_IPV4 = 0x4,
+	L3TYPE_IPV4_OPTIONS = 0x5,
+	L3TYPE_IPV6 = 0x6,
+	L3TYPE_IPV6_OPTIONS = 0x7,
+	L3TYPE_ET_STOP = 0xD,
+	L3TYPE_OTHER = 0xE,
+};
+
+#define NICVF_L3TYPE_OPTIONS_MASK	((uint8_t)1)
+#define NICVF_L3TYPE_IPVX_MASK		((uint8_t)0x06)
+
+enum layer4_type {
+	L4TYPE_NONE = 0x0,
+	L4TYPE_IPSEC_ESP = 0x1,
+	L4TYPE_IPFRAG = 0x2,
+	L4TYPE_IPCOMP = 0x3,
+	L4TYPE_TCP = 0x4,
+	L4TYPE_UDP = 0x5,
+	L4TYPE_SCTP = 0x6,
+	L4TYPE_GRE = 0x7,
+	L4TYPE_ROCE_BTH = 0x8,
+	L4TYPE_OTHER = 0xE,
+};
+
+/* CPI and RSSI configuration */
+enum cpi_algorithm_type {
+	CPI_ALG_NONE = 0x0,
+	CPI_ALG_VLAN = 0x1,
+	CPI_ALG_VLAN16 = 0x2,
+	CPI_ALG_DIFF = 0x3,
+};
+
+enum rss_algorithm_type {
+	RSS_ALG_NONE = 0x00,
+	RSS_ALG_PORT = 0x01,
+	RSS_ALG_IP = 0x02,
+	RSS_ALG_TCP_IP = 0x03,
+	RSS_ALG_UDP_IP = 0x04,
+	RSS_ALG_SCTP_IP = 0x05,
+	RSS_ALG_GRE_IP = 0x06,
+	RSS_ALG_ROCE = 0x07,
+};
+
+enum rss_hash_cfg {
+	RSS_HASH_L2ETC = 0x00,
+	RSS_HASH_IP = 0x01,
+	RSS_HASH_TCP = 0x02,
+	RSS_HASH_TCP_SYN_DIS = 0x03,
+	RSS_HASH_UDP = 0x04,
+	RSS_HASH_L4ETC = 0x05,
+	RSS_HASH_ROCE = 0x06,
+	RSS_L3_BIDI = 0x07,
+	RSS_L4_BIDI = 0x08,
+};
+
+/* Completion queue entry types */
+enum cqe_type {
+	CQE_TYPE_INVALID = 0x0,
+	CQE_TYPE_RX = 0x2,
+	CQE_TYPE_RX_SPLIT = 0x3,
+	CQE_TYPE_RX_TCP = 0x4,
+	CQE_TYPE_SEND = 0x8,
+	CQE_TYPE_SEND_PTP = 0x9,
+};
+
+enum cqe_rx_tcp_status {
+	CQE_RX_STATUS_VALID_TCP_CNXT = 0x00,
+	CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F,
+};
+
+enum cqe_send_status {
+	CQE_SEND_STATUS_GOOD = 0x00,
+	CQE_SEND_STATUS_DESC_FAULT = 0x01,
+	CQE_SEND_STATUS_HDR_CONS_ERR = 0x11,
+	CQE_SEND_STATUS_SUBDESC_ERR = 0x12,
+	CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80,
+	CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81,
+	CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82,
+	CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83,
+	CQE_SEND_STATUS_LOCK_VIOL = 0x84,
+	CQE_SEND_STATUS_LOCK_UFLOW = 0x85,
+	CQE_SEND_STATUS_DATA_FAULT = 0x86,
+	CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87,
+	CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88,
+	CQE_SEND_STATUS_MEM_FAULT = 0x89,
+	CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A,
+	CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B,
+};
+
+enum cqe_rx_tcp_end_reason {
+	CQE_RX_TCP_END_FIN_FLAG_DET = 0,
+	CQE_RX_TCP_END_INVALID_FLAG = 1,
+	CQE_RX_TCP_END_TIMEOUT = 2,
+	CQE_RX_TCP_END_OUT_OF_SEQ = 3,
+	CQE_RX_TCP_END_PKT_ERR = 4,
+	CQE_RX_TCP_END_QS_DISABLED = 0x0F,
+};
+
+/* Packet protocol level error enumeration */
+enum cqe_rx_err_level {
+	CQE_RX_ERRLVL_RE = 0x0,
+	CQE_RX_ERRLVL_L2 = 0x1,
+	CQE_RX_ERRLVL_L3 = 0x2,
+	CQE_RX_ERRLVL_L4 = 0x3,
+};
+
+/* Packet protocol level error type enumeration */
+enum cqe_rx_err_opcode {
+	CQE_RX_ERR_RE_NONE = 0x0,
+	CQE_RX_ERR_RE_PARTIAL = 0x1,
+	CQE_RX_ERR_RE_JABBER = 0x2,
+	CQE_RX_ERR_RE_FCS = 0x7,
+	CQE_RX_ERR_RE_TERMINATE = 0x9,
+	CQE_RX_ERR_RE_RX_CTL = 0xb,
+	CQE_RX_ERR_PREL2_ERR = 0x1f,
+	CQE_RX_ERR_L2_FRAGMENT = 0x20,
+	CQE_RX_ERR_L2_OVERRUN = 0x21,
+	CQE_RX_ERR_L2_PFCS = 0x22,
+	CQE_RX_ERR_L2_PUNY = 0x23,
+	CQE_RX_ERR_L2_MAL = 0x24,
+	CQE_RX_ERR_L2_OVERSIZE = 0x25,
+	CQE_RX_ERR_L2_UNDERSIZE = 0x26,
+	CQE_RX_ERR_L2_LENMISM = 0x27,
+	CQE_RX_ERR_L2_PCLP = 0x28,
+	CQE_RX_ERR_IP_NOT = 0x41,
+	CQE_RX_ERR_IP_CHK = 0x42,
+	CQE_RX_ERR_IP_MAL = 0x43,
+	CQE_RX_ERR_IP_MALD = 0x44,
+	CQE_RX_ERR_IP_HOP = 0x45,
+	CQE_RX_ERR_L3_ICRC = 0x46,
+	CQE_RX_ERR_L3_PCLP = 0x47,
+	CQE_RX_ERR_L4_MAL = 0x61,
+	CQE_RX_ERR_L4_CHK = 0x62,
+	CQE_RX_ERR_UDP_LEN = 0x63,
+	CQE_RX_ERR_L4_PORT = 0x64,
+	CQE_RX_ERR_TCP_FLAG = 0x65,
+	CQE_RX_ERR_TCP_OFFSET = 0x66,
+	CQE_RX_ERR_L4_PCLP = 0x67,
+	CQE_RX_ERR_RBDR_TRUNC = 0x70,
+};
+
+enum send_l4_csum_type {
+	SEND_L4_CSUM_DISABLE = 0x00,
+	SEND_L4_CSUM_UDP = 0x01,
+	SEND_L4_CSUM_TCP = 0x02,
+};
+
+enum send_crc_alg {
+	SEND_CRCALG_CRC32 = 0x00,
+	SEND_CRCALG_CRC32C = 0x01,
+	SEND_CRCALG_ICRC = 0x02,
+};
+
+enum send_load_type {
+	SEND_LD_TYPE_LDD = 0x00,
+	SEND_LD_TYPE_LDT = 0x01,
+	SEND_LD_TYPE_LDWB = 0x02,
+};
+
+enum send_mem_alg_type {
+	SEND_MEMALG_SET = 0x00,
+	SEND_MEMALG_ADD = 0x08,
+	SEND_MEMALG_SUB = 0x09,
+	SEND_MEMALG_ADDLEN = 0x0A,
+	SEND_MEMALG_SUBLEN = 0x0B,
+};
+
+enum send_mem_dsz_type {
+	SEND_MEMDSZ_B64 = 0x00,
+	SEND_MEMDSZ_B32 = 0x01,
+	SEND_MEMDSZ_B8 = 0x03,
+};
+
+enum sq_subdesc_type {
+	SQ_DESC_TYPE_INVALID = 0x00,
+	SQ_DESC_TYPE_HEADER = 0x01,
+	SQ_DESC_TYPE_CRC = 0x02,
+	SQ_DESC_TYPE_IMMEDIATE = 0x03,
+	SQ_DESC_TYPE_GATHER = 0x04,
+	SQ_DESC_TYPE_MEMORY = 0x05,
+};
+
+enum l3_type_t {
+	L3_NONE		= 0x00,
+	L3_IPV4		= 0x04,
+	L3_IPV4_OPT	= 0x05,
+	L3_IPV6		= 0x06,
+	L3_IPV6_OPT	= 0x07,
+	L3_ET_STOP	= 0x0D,
+	L3_OTHER	= 0x0E
+};
+
+enum l4_type_t {
+	L4_NONE		= 0x00,
+	L4_IPSEC_ESP	= 0x01,
+	L4_IPFRAG	= 0x02,
+	L4_IPCOMP	= 0x03,
+	L4_TCP		= 0x04,
+	L4_UDP_PASS1	= 0x05,
+	L4_GRE		= 0x07,
+	L4_UDP_PASS2	= 0x08,
+	L4_UDP_GENEVE	= 0x09,
+	L4_UDP_VXLAN	= 0x0A,
+	L4_NVGRE	= 0x0C,
+	L4_OTHER	= 0x0E
+};
+
+enum vlan_strip {
+	NO_STRIP = 0x0,
+	STRIP_FIRST_VLAN = 0x1,
+	STRIP_SECOND_VLAN = 0x2,
+	STRIP_RESERV = 0x3
+};
+
+enum rbdr_state {
+	RBDR_FIFO_STATE_INACTIVE = 0,
+	RBDR_FIFO_STATE_ACTIVE   = 1,
+	RBDR_FIFO_STATE_RESET    = 2,
+	RBDR_FIFO_STATE_FAIL     = 3
+};
+
+enum rq_cache_allocation {
+	RQ_CACHE_ALLOC_OFF      = 0,
+	RQ_CACHE_ALLOC_ALL      = 1,
+	RQ_CACHE_ALLOC_FIRST    = 2,
+	RQ_CACHE_ALLOC_TWO      = 3,
+};
+
+enum cq_rx_errlvl_e {
+	CQ_ERRLVL_MAC,
+	CQ_ERRLVL_L2,
+	CQ_ERRLVL_L3,
+	CQ_ERRLVL_L4,
+};
+
+enum cq_rx_errop_e {
+	CQ_RX_ERROP_RE_NONE = 0x0,
+	CQ_RX_ERROP_RE_PARTIAL = 0x1,
+	CQ_RX_ERROP_RE_JABBER = 0x2,
+	CQ_RX_ERROP_RE_FCS = 0x7,
+	CQ_RX_ERROP_RE_TERMINATE = 0x9,
+	CQ_RX_ERROP_RE_RX_CTL = 0xb,
+	CQ_RX_ERROP_PREL2_ERR = 0x1f,
+	CQ_RX_ERROP_L2_FRAGMENT = 0x20,
+	CQ_RX_ERROP_L2_OVERRUN = 0x21,
+	CQ_RX_ERROP_L2_PFCS = 0x22,
+	CQ_RX_ERROP_L2_PUNY = 0x23,
+	CQ_RX_ERROP_L2_MAL = 0x24,
+	CQ_RX_ERROP_L2_OVERSIZE = 0x25,
+	CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
+	CQ_RX_ERROP_L2_LENMISM = 0x27,
+	CQ_RX_ERROP_L2_PCLP = 0x28,
+	CQ_RX_ERROP_IP_NOT = 0x41,
+	CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
+	CQ_RX_ERROP_IP_MAL = 0x43,
+	CQ_RX_ERROP_IP_MALD = 0x44,
+	CQ_RX_ERROP_IP_HOP = 0x45,
+	CQ_RX_ERROP_L3_ICRC = 0x46,
+	CQ_RX_ERROP_L3_PCLP = 0x47,
+	CQ_RX_ERROP_L4_MAL = 0x61,
+	CQ_RX_ERROP_L4_CHK = 0x62,
+	CQ_RX_ERROP_UDP_LEN = 0x63,
+	CQ_RX_ERROP_L4_PORT = 0x64,
+	CQ_RX_ERROP_TCP_FLAG = 0x65,
+	CQ_RX_ERROP_TCP_OFFSET = 0x66,
+	CQ_RX_ERROP_L4_PCLP = 0x67,
+	CQ_RX_ERROP_RBDR_TRUNC = 0x70,
+};
+
+enum cq_tx_errop_e {
+	CQ_TX_ERROP_GOOD = 0x0,
+	CQ_TX_ERROP_DESC_FAULT = 0x10,
+	CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
+	CQ_TX_ERROP_SUBDC_ERR = 0x12,
+	CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
+	CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
+	CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
+	CQ_TX_ERROP_LOCK_VIOL = 0x83,
+	CQ_TX_ERROP_DATA_FAULT = 0x84,
+	CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
+	CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
+	CQ_TX_ERROP_MEM_FAULT = 0x87,
+	CQ_TX_ERROP_CK_OVERLAP = 0x88,
+	CQ_TX_ERROP_CK_OFLOW = 0x89,
+	CQ_TX_ERROP_ENUM_LAST = 0x8a,
+};
+
+enum rq_sq_stats_reg_offset {
+	RQ_SQ_STATS_OCTS = 0x0,
+	RQ_SQ_STATS_PKTS = 0x1,
+};
+
+enum nic_stat_vnic_rx_e {
+	RX_OCTS = 0,
+	RX_UCAST,
+	RX_BCAST,
+	RX_MCAST,
+	RX_RED,
+	RX_RED_OCTS,
+	RX_ORUN,
+	RX_ORUN_OCTS,
+	RX_FCS,
+	RX_L2ERR,
+	RX_DRP_BCAST,
+	RX_DRP_MCAST,
+	RX_DRP_L3BCAST,
+	RX_DRP_L3MCAST,
+};
+
+enum nic_stat_vnic_tx_e {
+	TX_OCTS = 0,
+	TX_UCAST,
+	TX_BCAST,
+	TX_MCAST,
+	TX_DROP,
+};
+
+#define NICVF_STATIC_ASSERT(s) _Static_assert(s, #s)
+
+typedef uint64_t nicvf_phys_addr_t;
+
+#ifndef __BYTE_ORDER__
+#error __BYTE_ORDER__ not defined
+#endif
+
+/* vNIC HW Structures */
+
+#define NICVF_CQE_RBPTR_WORD         6
+#define NICVF_CQE_RX2_RBPTR_WORD     7
+
+typedef union {
+	uint64_t u64;
+	struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+		uint64_t cqe_type:4;
+		uint64_t stdn_fault:1;
+		uint64_t rsvd0:1;
+		uint64_t rq_qs:7;
+		uint64_t rq_idx:3;
+		uint64_t rsvd1:12;
+		uint64_t rss_alg:4;
+		uint64_t rsvd2:4;
+		uint64_t rb_cnt:4;
+		uint64_t vlan_found:1;
+		uint64_t vlan_stripped:1;
+		uint64_t vlan2_found:1;
+		uint64_t vlan2_stripped:1;
+		uint64_t l4_type:4;
+		uint64_t l3_type:4;
+		uint64_t l2_present:1;
+		uint64_t err_level:3;
+		uint64_t err_opcode:8;
+#else
+		uint64_t err_opcode:8;
+		uint64_t err_level:3;
+		uint64_t l2_present:1;
+		uint64_t l3_type:4;
+		uint64_t l4_type:4;
+		uint64_t vlan2_stripped:1;
+		uint64_t vlan2_found:1;
+		uint64_t vlan_stripped:1;
+		uint64_t vlan_found:1;
+		uint64_t rb_cnt:4;
+		uint64_t rsvd2:4;
+		uint64_t rss_alg:4;
+		uint64_t rsvd1:12;
+		uint64_t rq_idx:3;
+		uint64_t rq_qs:7;
+		uint64_t rsvd0:1;
+		uint64_t stdn_fault:1;
+		uint64_t cqe_type:4;
+#endif
+	};
+} cqe_rx_word0_t;
+
+typedef union {
+	uint64_t u64;
+	struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+		uint64_t pkt_len:16;
+		uint64_t l2_ptr:8;
+		uint64_t l3_ptr:8;
+		uint64_t l4_ptr:8;
+		uint64_t cq_pkt_len:8;
+		uint64_t align_pad:3;
+		uint64_t rsvd3:1;
+		uint64_t chan:12;
+#else
+		uint64_t chan:12;
+		uint64_t rsvd3:1;
+		uint64_t align_pad:3;
+		uint64_t cq_pkt_len:8;
+		uint64_t l4_ptr:8;
+		uint64_t l3_ptr:8;
+		uint64_t l2_ptr:8;
+		uint64_t pkt_len:16;
+#endif
+	};
+} cqe_rx_word1_t;
+
+typedef union {
+	uint64_t u64;
+	struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+		uint64_t rss_tag:32;
+		uint64_t vlan_tci:16;
+		uint64_t vlan_ptr:8;
+		uint64_t vlan2_ptr:8;
+#else
+		uint64_t vlan2_ptr:8;
+		uint64_t vlan_ptr:8;
+		uint64_t vlan_tci:16;
+		uint64_t rss_tag:32;
+#endif
+	};
+} cqe_rx_word2_t;
+
+typedef union {
+	uint64_t u64;
+	struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+		uint16_t rb3_sz;
+		uint16_t rb2_sz;
+		uint16_t rb1_sz;
+		uint16_t rb0_sz;
+#else
+		uint16_t rb0_sz;
+		uint16_t rb1_sz;
+		uint16_t rb2_sz;
+		uint16_t rb3_sz;
+#endif
+	};
+} cqe_rx_word3_t;
+
+typedef union {
+	uint64_t u64;
+	struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+		uint16_t rb7_sz;
+		uint16_t rb6_sz;
+		uint16_t rb5_sz;
+		uint16_t rb4_sz;
+#else
+		uint16_t rb4_sz;
+		uint16_t rb5_sz;
+		uint16_t rb6_sz;
+		uint16_t rb7_sz;
+#endif
+	};
+} cqe_rx_word4_t;
+
+typedef union {
+	uint64_t u64;
+	struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+		uint16_t rb11_sz;
+		uint16_t rb10_sz;
+		uint16_t rb9_sz;
+		uint16_t rb8_sz;
+#else
+		uint16_t rb8_sz;
+		uint16_t rb9_sz;
+		uint16_t rb10_sz;
+		uint16_t rb11_sz;
+#endif
+	};
+} cqe_rx_word5_t;
+
+typedef union {
+	uint64_t u64;
+	struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+		uint64_t vlan_found:1;
+		uint64_t vlan_stripped:1;
+		uint64_t vlan2_found:1;
+		uint64_t vlan2_stripped:1;
+		uint64_t rsvd2:3;
+		uint64_t inner_l2:1;
+		uint64_t inner_l4type:4;
+		uint64_t inner_l3type:4;
+		uint64_t vlan_ptr:8;
+		uint64_t vlan2_ptr:8;
+		uint64_t rsvd1:8;
+		uint64_t rsvd0:8;
+		uint64_t inner_l3ptr:8;
+		uint64_t inner_l4ptr:8;
+#else
+		uint64_t inner_l4ptr:8;
+		uint64_t inner_l3ptr:8;
+		uint64_t rsvd0:8;
+		uint64_t rsvd1:8;
+		uint64_t vlan2_ptr:8;
+		uint64_t vlan_ptr:8;
+		uint64_t inner_l3type:4;
+		uint64_t inner_l4type:4;
+		uint64_t inner_l2:1;
+		uint64_t rsvd2:3;
+		uint64_t vlan2_stripped:1;
+		uint64_t vlan2_found:1;
+		uint64_t vlan_stripped:1;
+		uint64_t vlan_found:1;
+#endif
+	};
+} cqe_rx2_word6_t;
+
+struct cqe_rx_t {
+	cqe_rx_word0_t word0;
+	cqe_rx_word1_t word1;
+	cqe_rx_word2_t word2;
+	cqe_rx_word3_t word3;
+	cqe_rx_word4_t word4;
+	cqe_rx_word5_t word5;
+	cqe_rx2_word6_t word6; /* if NIC_PF_RX_CFG[CQE_RX2_ENA] set */
+};
+
+struct cqe_rx_tcp_err_t {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+	uint64_t   cqe_type:4; /* W0 */
+	uint64_t   rsvd0:60;
+
+	uint64_t   rsvd1:4; /* W1 */
+	uint64_t   partial_first:1;
+	uint64_t   rsvd2:27;
+	uint64_t   rbdr_bytes:8;
+	uint64_t   rsvd3:24;
+#else
+	uint64_t   rsvd0:60;
+	uint64_t   cqe_type:4;
+
+	uint64_t   rsvd3:24;
+	uint64_t   rbdr_bytes:8;
+	uint64_t   rsvd2:27;
+	uint64_t   partial_first:1;
+	uint64_t   rsvd1:4;
+#endif
+};
+
+struct cqe_rx_tcp_t {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+	uint64_t   cqe_type:4; /* W0 */
+	uint64_t   rsvd0:52;
+	uint64_t   cq_tcp_status:8;
+
+	uint64_t   rsvd1:32; /* W1 */
+	uint64_t   tcp_cntx_bytes:8;
+	uint64_t   rsvd2:8;
+	uint64_t   tcp_err_bytes:16;
+#else
+	uint64_t   cq_tcp_status:8;
+	uint64_t   rsvd0:52;
+	uint64_t   cqe_type:4; /* W0 */
+
+	uint64_t   tcp_err_bytes:16;
+	uint64_t   rsvd2:8;
+	uint64_t   tcp_cntx_bytes:8;
+	uint64_t   rsvd1:32; /* W1 */
+#endif
+};
+
+struct cqe_send_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	uint64_t   cqe_type:4; /* W0 */
+	uint64_t   rsvd0:4;
+	uint64_t   sqe_ptr:16;
+	uint64_t   rsvd1:4;
+	uint64_t   rsvd2:10;
+	uint64_t   sq_qs:7;
+	uint64_t   sq_idx:3;
+	uint64_t   rsvd3:8;
+	uint64_t   send_status:8;
+
+	uint64_t   ptp_timestamp:64; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	uint64_t   send_status:8;
+	uint64_t   rsvd3:8;
+	uint64_t   sq_idx:3;
+	uint64_t   sq_qs:7;
+	uint64_t   rsvd2:10;
+	uint64_t   rsvd1:4;
+	uint64_t   sqe_ptr:16;
+	uint64_t   rsvd0:4;
+	uint64_t   cqe_type:4; /* W0 */
+
+	uint64_t   ptp_timestamp:64;
+#endif
+};
+
+struct cq_entry_type_t {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+	uint64_t cqe_type:4;
+	uint64_t __pad:60;
+#else
+	uint64_t __pad:60;
+	uint64_t cqe_type:4;
+#endif
+};
+
+union cq_entry_t {
+	uint64_t u[64];
+	struct cq_entry_type_t type;
+	struct cqe_rx_t rx_hdr;
+	struct cqe_rx_tcp_t rx_tcp_hdr;
+	struct cqe_rx_tcp_err_t rx_tcp_err_hdr;
+	struct cqe_send_t cqe_send;
+};
+
+NICVF_STATIC_ASSERT(sizeof(union cq_entry_t) == 512);
+
+struct rbdr_entry_t {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+	union {
+		struct {
+			uint64_t   rsvd0:15;
+			uint64_t   buf_addr:42;
+			uint64_t   cache_align:7;
+		};
+		nicvf_phys_addr_t full_addr;
+	};
+#else
+	union {
+		struct {
+			uint64_t   cache_align:7;
+			uint64_t   buf_addr:42;
+			uint64_t   rsvd0:15;
+		};
+		nicvf_phys_addr_t full_addr;
+	};
+#endif
+};
+
+NICVF_STATIC_ASSERT(sizeof(struct rbdr_entry_t) == sizeof(uint64_t));
+
+/* TCP reassembly context */
+struct rbe_tcp_cnxt_t {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+	uint64_t   tcp_pkt_cnt:12;
+	uint64_t   rsvd1:4;
+	uint64_t   align_hdr_bytes:4;
+	uint64_t   align_ptr_bytes:4;
+	uint64_t   ptr_bytes:16;
+	uint64_t   rsvd2:24;
+	uint64_t   cqe_type:4;
+	uint64_t   rsvd0:54;
+	uint64_t   tcp_end_reason:2;
+	uint64_t   tcp_status:4;
+#else
+	uint64_t   tcp_status:4;
+	uint64_t   tcp_end_reason:2;
+	uint64_t   rsvd0:54;
+	uint64_t   cqe_type:4;
+	uint64_t   rsvd2:24;
+	uint64_t   ptr_bytes:16;
+	uint64_t   align_ptr_bytes:4;
+	uint64_t   align_hdr_bytes:4;
+	uint64_t   rsvd1:4;
+	uint64_t   tcp_pkt_cnt:12;
+#endif
+};
+
+/* Always Big endian */
+struct rx_hdr_t {
+	uint64_t   opaque:32;
+	uint64_t   rss_flow:8;
+	uint64_t   skip_length:6;
+	uint64_t   disable_rss:1;
+	uint64_t   disable_tcp_reassembly:1;
+	uint64_t   nodrop:1;
+	uint64_t   dest_alg:2;
+	uint64_t   rsvd0:2;
+	uint64_t   dest_rq:11;
+};
+
+struct sq_crc_subdesc {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+	uint64_t    rsvd1:32;
+	uint64_t    crc_ival:32;
+	uint64_t    subdesc_type:4;
+	uint64_t    crc_alg:2;
+	uint64_t    rsvd0:10;
+	uint64_t    crc_insert_pos:16;
+	uint64_t    hdr_start:16;
+	uint64_t    crc_len:16;
+#else
+	uint64_t    crc_len:16;
+	uint64_t    hdr_start:16;
+	uint64_t    crc_insert_pos:16;
+	uint64_t    rsvd0:10;
+	uint64_t    crc_alg:2;
+	uint64_t    subdesc_type:4;
+	uint64_t    crc_ival:32;
+	uint64_t    rsvd1:32;
+#endif
+};
+
+struct sq_gather_subdesc {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+	uint64_t    subdesc_type:4; /* W0 */
+	uint64_t    ld_type:2;
+	uint64_t    rsvd0:42;
+	uint64_t    size:16;
+
+	uint64_t    rsvd1:15; /* W1 */
+	uint64_t    addr:49;
+#else
+	uint64_t    size:16;
+	uint64_t    rsvd0:42;
+	uint64_t    ld_type:2;
+	uint64_t    subdesc_type:4; /* W0 */
+
+	uint64_t    addr:49;
+	uint64_t    rsvd1:15; /* W1 */
+#endif
+};
+
+/* SQ immediate subdescriptor */
+struct sq_imm_subdesc {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+	uint64_t    subdesc_type:4; /* W0 */
+	uint64_t    rsvd0:46;
+	uint64_t    len:14;
+
+	uint64_t    data:64; /* W1 */
+#else
+	uint64_t    len:14;
+	uint64_t    rsvd0:46;
+	uint64_t    subdesc_type:4; /* W0 */
+
+	uint64_t    data:64; /* W1 */
+#endif
+};
+
+struct sq_mem_subdesc {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+	uint64_t    subdesc_type:4; /* W0 */
+	uint64_t    mem_alg:4;
+	uint64_t    mem_dsz:2;
+	uint64_t    wmem:1;
+	uint64_t    rsvd0:21;
+	uint64_t    offset:32;
+
+	uint64_t    rsvd1:15; /* W1 */
+	uint64_t    addr:49;
+#else
+	uint64_t    offset:32;
+	uint64_t    rsvd0:21;
+	uint64_t    wmem:1;
+	uint64_t    mem_dsz:2;
+	uint64_t    mem_alg:4;
+	uint64_t    subdesc_type:4; /* W0 */
+
+	uint64_t    addr:49;
+	uint64_t    rsvd1:15; /* W1 */
+#endif
+};
+
+struct sq_hdr_subdesc {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+	uint64_t    subdesc_type:4;
+	uint64_t    tso:1;
+	uint64_t    post_cqe:1; /* Post CQE on no error also */
+	uint64_t    dont_send:1;
+	uint64_t    tstmp:1;
+	uint64_t    subdesc_cnt:8;
+	uint64_t    csum_l4:2;
+	uint64_t    csum_l3:1;
+	uint64_t    csum_inner_l4:2;
+	uint64_t    csum_inner_l3:1;
+	uint64_t    rsvd0:2;
+	uint64_t    l4_offset:8;
+	uint64_t    l3_offset:8;
+	uint64_t    rsvd1:4;
+	uint64_t    tot_len:20; /* W0 */
+
+	uint64_t    rsvd2:24;
+	uint64_t    inner_l4_offset:8;
+	uint64_t    inner_l3_offset:8;
+	uint64_t    tso_start:8;
+	uint64_t    rsvd3:2;
+	uint64_t    tso_max_paysize:14; /* W1 */
+#else
+	uint64_t    tot_len:20;
+	uint64_t    rsvd1:4;
+	uint64_t    l3_offset:8;
+	uint64_t    l4_offset:8;
+	uint64_t    rsvd0:2;
+	uint64_t    csum_inner_l3:1;
+	uint64_t    csum_inner_l4:2;
+	uint64_t    csum_l3:1;
+	uint64_t    csum_l4:2;
+	uint64_t    subdesc_cnt:8;
+	uint64_t    tstmp:1;
+	uint64_t    dont_send:1;
+	uint64_t    post_cqe:1; /* Post CQE on no error also */
+	uint64_t    tso:1;
+	uint64_t    subdesc_type:4; /* W0 */
+
+	uint64_t    tso_max_paysize:14;
+	uint64_t    rsvd3:2;
+	uint64_t    tso_start:8;
+	uint64_t    inner_l3_offset:8;
+	uint64_t    inner_l4_offset:8;
+	uint64_t    rsvd2:24; /* W1 */
+#endif
+};
+
+/* Each sq entry is 128 bits wide */
+union sq_entry_t {
+	uint64_t buff[2];
+	struct sq_hdr_subdesc hdr;
+	struct sq_imm_subdesc imm;
+	struct sq_gather_subdesc gather;
+	struct sq_crc_subdesc crc;
+	struct sq_mem_subdesc mem;
+};
+
+NICVF_STATIC_ASSERT(sizeof(union sq_entry_t) == 16);
+
+/* Queue config register formats */
+struct rq_cfg { union { struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+	uint64_t reserved_2_63:62;
+	uint64_t ena:1;
+	uint64_t reserved_0:1;
+#else
+	uint64_t reserved_0:1;
+	uint64_t ena:1;
+	uint64_t reserved_2_63:62;
+#endif
+	};
+	uint64_t value;
+}; };
+
+struct cq_cfg { union { struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+	uint64_t reserved_43_63:21;
+	uint64_t ena:1;
+	uint64_t reset:1;
+	uint64_t caching:1;
+	uint64_t reserved_35_39:5;
+	uint64_t qsize:3;
+	uint64_t reserved_25_31:7;
+	uint64_t avg_con:9;
+	uint64_t reserved_0_15:16;
+#else
+	uint64_t reserved_0_15:16;
+	uint64_t avg_con:9;
+	uint64_t reserved_25_31:7;
+	uint64_t qsize:3;
+	uint64_t reserved_35_39:5;
+	uint64_t caching:1;
+	uint64_t reset:1;
+	uint64_t ena:1;
+	uint64_t reserved_43_63:21;
+#endif
+	};
+	uint64_t value;
+}; };
+
+struct sq_cfg { union { struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+	uint64_t reserved_20_63:44;
+	uint64_t ena:1;
+	uint64_t reserved_18_18:1;
+	uint64_t reset:1;
+	uint64_t ldwb:1;
+	uint64_t reserved_11_15:5;
+	uint64_t qsize:3;
+	uint64_t reserved_3_7:5;
+	uint64_t tstmp_bgx_intf:3;
+#else
+	uint64_t tstmp_bgx_intf:3;
+	uint64_t reserved_3_7:5;
+	uint64_t qsize:3;
+	uint64_t reserved_11_15:5;
+	uint64_t ldwb:1;
+	uint64_t reset:1;
+	uint64_t reserved_18_18:1;
+	uint64_t ena:1;
+	uint64_t reserved_20_63:44;
+#endif
+	};
+	uint64_t value;
+}; };
+
+struct rbdr_cfg { union { struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+	uint64_t reserved_45_63:19;
+	uint64_t ena:1;
+	uint64_t reset:1;
+	uint64_t ldwb:1;
+	uint64_t reserved_36_41:6;
+	uint64_t qsize:4;
+	uint64_t reserved_25_31:7;
+	uint64_t avg_con:9;
+	uint64_t reserved_12_15:4;
+	uint64_t lines:12;
+#else
+	uint64_t lines:12;
+	uint64_t reserved_12_15:4;
+	uint64_t avg_con:9;
+	uint64_t reserved_25_31:7;
+	uint64_t qsize:4;
+	uint64_t reserved_36_41:6;
+	uint64_t ldwb:1;
+	uint64_t reset:1;
+	uint64_t ena: 1;
+	uint64_t reserved_45_63:19;
+#endif
+	};
+	uint64_t value;
+}; };
+
+struct pf_qs_cfg { union { struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+	uint64_t reserved_32_63:32;
+	uint64_t ena:1;
+	uint64_t reserved_27_30:4;
+	uint64_t sq_ins_ena:1;
+	uint64_t sq_ins_pos:6;
+	uint64_t lock_ena:1;
+	uint64_t lock_viol_cqe_ena:1;
+	uint64_t send_tstmp_ena:1;
+	uint64_t be:1;
+	uint64_t reserved_7_15:9;
+	uint64_t vnic:7;
+#else
+	uint64_t vnic:7;
+	uint64_t reserved_7_15:9;
+	uint64_t be:1;
+	uint64_t send_tstmp_ena:1;
+	uint64_t lock_viol_cqe_ena:1;
+	uint64_t lock_ena:1;
+	uint64_t sq_ins_pos:6;
+	uint64_t sq_ins_ena:1;
+	uint64_t reserved_27_30:4;
+	uint64_t ena:1;
+	uint64_t reserved_32_63:32;
+#endif
+	};
+	uint64_t value;
+}; };
+
+struct pf_rq_cfg { union { struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+	uint64_t reserverd1:1;
+	uint64_t reserverd0:34;
+	uint64_t strip_pre_l2:1;
+	uint64_t caching:2;
+	uint64_t cq_qs:7;
+	uint64_t cq_idx:3;
+	uint64_t rbdr_cont_qs:7;
+	uint64_t rbdr_cont_idx:1;
+	uint64_t rbdr_strt_qs:7;
+	uint64_t rbdr_strt_idx:1;
+#else
+	uint64_t rbdr_strt_idx:1;
+	uint64_t rbdr_strt_qs:7;
+	uint64_t rbdr_cont_idx:1;
+	uint64_t rbdr_cont_qs:7;
+	uint64_t cq_idx:3;
+	uint64_t cq_qs:7;
+	uint64_t caching:2;
+	uint64_t strip_pre_l2:1;
+	uint64_t reserverd0:34;
+	uint64_t reserverd1:1;
+#endif
+	};
+	uint64_t value;
+}; };
+
+struct pf_rq_drop_cfg { union { struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+	uint64_t rbdr_red:1;
+	uint64_t cq_red:1;
+	uint64_t reserved3:14;
+	uint64_t rbdr_pass:8;
+	uint64_t rbdr_drop:8;
+	uint64_t reserved2:8;
+	uint64_t cq_pass:8;
+	uint64_t cq_drop:8;
+	uint64_t reserved1:8;
+#else
+	uint64_t reserved1:8;
+	uint64_t cq_drop:8;
+	uint64_t cq_pass:8;
+	uint64_t reserved2:8;
+	uint64_t rbdr_drop:8;
+	uint64_t rbdr_pass:8;
+	uint64_t reserved3:14;
+	uint64_t cq_red:1;
+	uint64_t rbdr_red:1;
+#endif
+	};
+	uint64_t value;
+}; };
+
+#endif /* _THUNDERX_NICVF_HW_DEFS_H */
diff --git a/drivers/net/thunderx/base/nicvf_mbox.c b/drivers/net/thunderx/base/nicvf_mbox.c
new file mode 100644
index 0000000..715c7c3
--- /dev/null
+++ b/drivers/net/thunderx/base/nicvf_mbox.c
@@ -0,0 +1,416 @@ 
+/*
+ *   BSD LICENSE
+ *
+ *   Copyright (C) Cavium networks Ltd. 2016.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Cavium networks nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "nicvf_plat.h"
+
+static const char *mbox_message[NIC_MBOX_MSG_MAX] =  {
+	[NIC_MBOX_MSG_INVALID]            = "NIC_MBOX_MSG_INVALID",
+	[NIC_MBOX_MSG_READY]              = "NIC_MBOX_MSG_READY",
+	[NIC_MBOX_MSG_ACK]                = "NIC_MBOX_MSG_ACK",
+	[NIC_MBOX_MSG_NACK]               = "NIC_MBOX_MSG_ACK",
+	[NIC_MBOX_MSG_QS_CFG]             = "NIC_MBOX_MSG_QS_CFG",
+	[NIC_MBOX_MSG_RQ_CFG]             = "NIC_MBOX_MSG_RQ_CFG",
+	[NIC_MBOX_MSG_SQ_CFG]             = "NIC_MBOX_MSG_SQ_CFG",
+	[NIC_MBOX_MSG_RQ_DROP_CFG]        = "NIC_MBOX_MSG_RQ_DROP_CFG",
+	[NIC_MBOX_MSG_SET_MAC]            = "NIC_MBOX_MSG_SET_MAC",
+	[NIC_MBOX_MSG_SET_MAX_FRS]        = "NIC_MBOX_MSG_SET_MAX_FRS",
+	[NIC_MBOX_MSG_CPI_CFG]            = "NIC_MBOX_MSG_CPI_CFG",
+	[NIC_MBOX_MSG_RSS_SIZE]           = "NIC_MBOX_MSG_RSS_SIZE",
+	[NIC_MBOX_MSG_RSS_CFG]            = "NIC_MBOX_MSG_RSS_CFG",
+	[NIC_MBOX_MSG_RSS_CFG_CONT]       = "NIC_MBOX_MSG_RSS_CFG_CONT",
+	[NIC_MBOX_MSG_RQ_BP_CFG]          = "NIC_MBOX_MSG_RQ_BP_CFG",
+	[NIC_MBOX_MSG_RQ_SW_SYNC]         = "NIC_MBOX_MSG_RQ_SW_SYNC",
+	[NIC_MBOX_MSG_BGX_LINK_CHANGE]    = "NIC_MBOX_MSG_BGX_LINK_CHANGE",
+	[NIC_MBOX_MSG_ALLOC_SQS]          = "NIC_MBOX_MSG_ALLOC_SQS",
+	[NIC_MBOX_MSG_LOOPBACK]           = "NIC_MBOX_MSG_LOOPBACK",
+	[NIC_MBOX_MSG_RESET_STAT_COUNTER] = "NIC_MBOX_MSG_RESET_STAT_COUNTER",
+	[NIC_MBOX_MSG_CFG_DONE]           = "NIC_MBOX_MSG_CFG_DONE",
+	[NIC_MBOX_MSG_SHUTDOWN]           = "NIC_MBOX_MSG_SHUTDOWN",
+};
+
+static inline const char *
+nicvf_mbox_msg_str(int msg)
+{
+	assert(msg >= 0 && msg < NIC_MBOX_MSG_MAX);
+	/* undefined messages */
+	if (mbox_message[msg] == NULL)
+		msg = 0;
+	return mbox_message[msg];
+}
+
+static inline void
+nicvf_mbox_send_msg_to_pf_raw(struct nicvf *nic, struct nic_mbx *mbx)
+{
+	uint64_t *mbx_data;
+	uint64_t mbx_addr;
+	int i;
+
+	mbx_addr = NIC_VF_PF_MAILBOX_0_1;
+	mbx_data = (uint64_t *)mbx;
+	for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
+		nicvf_reg_write(nic, mbx_addr, *mbx_data);
+		mbx_data++;
+		mbx_addr += sizeof(uint64_t);
+	}
+	nicvf_mbox_log("msg sent %s (VF%d)",
+			nicvf_mbox_msg_str(mbx->msg.msg), nic->vf_id);
+}
+
+static inline void
+nicvf_mbox_send_async_msg_to_pf(struct nicvf *nic, struct nic_mbx *mbx)
+{
+	nicvf_mbox_send_msg_to_pf_raw(nic, mbx);
+	/* Messages without ack are racy!*/
+	nicvf_delay_us(1000);
+}
+
+static inline int
+nicvf_mbox_send_msg_to_pf(struct nicvf *nic, struct nic_mbx *mbx)
+{
+	long timeout;
+	long sleep = 10;
+	int i, retry = 5;
+
+	for (i = 0; i < retry; i++) {
+		nic->pf_acked = false;
+		nic->pf_nacked = false;
+		nicvf_smp_wmb();
+
+		nicvf_mbox_send_msg_to_pf_raw(nic, mbx);
+		/* Give some time to get PF response */
+		nicvf_delay_us(1000);
+		timeout = NIC_MBOX_MSG_TIMEOUT;
+		while (timeout > 0) {
+			/* Periodic poll happens from nicvf_interrupt() */
+			nicvf_smp_rmb();
+
+			if (nic->pf_nacked)
+				return -EINVAL;
+			if (nic->pf_acked)
+				return 0;
+
+			nicvf_delay_us(1000);
+			timeout -= sleep;
+		}
+		nicvf_log_error("PF didn't ack to msg 0x%02x %s VF%d (%d/%d)",
+				mbx->msg.msg, nicvf_mbox_msg_str(mbx->msg.msg),
+				nic->vf_id, i, retry);
+	}
+	return -EBUSY;
+}
+
+
+int
+nicvf_handle_mbx_intr(struct nicvf *nic)
+{
+	struct nic_mbx mbx;
+	uint64_t *mbx_data = (uint64_t *)&mbx;
+	uint64_t mbx_addr = NIC_VF_PF_MAILBOX_0_1;
+	size_t i;
+
+	for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
+		*mbx_data = nicvf_reg_read(nic, mbx_addr);
+		mbx_data++;
+		mbx_addr += sizeof(uint64_t);
+	}
+
+	/* Overwrite the message so we won't receive it again */
+	nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1, 0x0);
+
+	nicvf_mbox_log("msg received id=0x%hhx %s (VF%d)", mbx.msg.msg,
+			nicvf_mbox_msg_str(mbx.msg.msg), nic->vf_id);
+
+	switch (mbx.msg.msg) {
+	case NIC_MBOX_MSG_READY:
+		nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
+		nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
+		nic->node = mbx.nic_cfg.node_id;
+		nic->sqs_mode = mbx.nic_cfg.sqs_mode;
+		nic->loopback_supported = mbx.nic_cfg.loopback_supported;
+		ether_addr_copy((struct ether_addr *)mbx.nic_cfg.mac_addr,
+				(struct ether_addr *)nic->mac_addr);
+		nic->pf_acked = true;
+		break;
+	case NIC_MBOX_MSG_ACK:
+		nic->pf_acked = true;
+		break;
+	case NIC_MBOX_MSG_NACK:
+		nic->pf_nacked = true;
+		break;
+	case NIC_MBOX_MSG_RSS_SIZE:
+		nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
+		nic->pf_acked = true;
+		break;
+	case NIC_MBOX_MSG_BGX_LINK_CHANGE:
+		nic->link_up = mbx.link_status.link_up;
+		nic->duplex = mbx.link_status.duplex;
+		nic->speed = mbx.link_status.speed;
+		nic->pf_acked = true;
+		break;
+	default:
+		nicvf_log_error("Invalid message from PF, msg_id=0x%hhx %s",
+				mbx.msg.msg, nicvf_mbox_msg_str(mbx.msg.msg));
+		break;
+	}
+	nicvf_smp_wmb();
+
+	return mbx.msg.msg;
+}
+
+/*
+ * Checks if VF is able to communicate with PF
+ * and also gets the VNIC number this VF is associated to.
+ */
+int
+nicvf_mbox_check_pf_ready(struct nicvf *nic)
+{
+	struct nic_mbx mbx = { .msg = {.msg = NIC_MBOX_MSG_READY} };
+
+	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_set_mac_addr(struct nicvf *nic,
+			const uint8_t mac[NICVF_MAC_ADDR_SIZE])
+{
+	struct nic_mbx mbx = { .msg = {0} };
+	int i;
+
+	mbx.msg.msg = NIC_MBOX_MSG_SET_MAC;
+	mbx.mac.vf_id = nic->vf_id;
+	for (i = 0; i < 6; i++)
+		mbx.mac.mac_addr[i] = mac[i];
+
+	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_config_cpi(struct nicvf *nic, uint32_t qcnt)
+{
+	struct nic_mbx mbx = { .msg = { 0 } };
+
+	mbx.msg.msg = NIC_MBOX_MSG_CPI_CFG;
+	mbx.cpi_cfg.vf_id = nic->vf_id;
+	mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
+	mbx.cpi_cfg.rq_cnt = qcnt;
+
+	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_get_rss_size(struct nicvf *nic)
+{
+	struct nic_mbx mbx = { .msg = { 0 } };
+
+	mbx.msg.msg = NIC_MBOX_MSG_RSS_SIZE;
+	mbx.rss_size.vf_id = nic->vf_id;
+
+	/* Result will be stored in nic->rss_info.rss_size */
+	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_config_rss(struct nicvf *nic)
+{
+	struct nic_mbx mbx = { .msg = { 0 } };
+	struct nicvf_rss_reta_info *rss = &nic->rss_info;
+	size_t tot_len = rss->rss_size;
+	size_t cur_len;
+	size_t cur_idx = 0;
+	size_t i;
+
+	mbx.rss_cfg.vf_id = nic->vf_id;
+	mbx.rss_cfg.hash_bits = rss->hash_bits;
+	mbx.rss_cfg.tbl_len = 0;
+	mbx.rss_cfg.tbl_offset = 0;
+
+	while (cur_idx < tot_len) {
+		cur_len = nicvf_min(tot_len - cur_idx,
+				(size_t)RSS_IND_TBL_LEN_PER_MBX_MSG);
+		mbx.msg.msg = (cur_idx > 0) ?
+			NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
+		mbx.rss_cfg.tbl_offset = cur_idx;
+		mbx.rss_cfg.tbl_len = cur_len;
+		for (i = 0; i < cur_len; i++)
+			mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[cur_idx++];
+
+		if (nicvf_mbox_send_msg_to_pf(nic, &mbx))
+			return NICVF_ERR_RSS_TBL_UPDATE;
+	}
+
+	return 0;
+}
+
+int
+nicvf_mbox_rq_config(struct nicvf *nic, uint16_t qidx,
+		     struct pf_rq_cfg *pf_rq_cfg)
+{
+	struct nic_mbx mbx = { .msg = { 0 } };
+
+	mbx.msg.msg = NIC_MBOX_MSG_RQ_CFG;
+	mbx.rq.qs_num = nic->vf_id;
+	mbx.rq.rq_num = qidx;
+	mbx.rq.cfg = pf_rq_cfg->value;
+	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_sq_config(struct nicvf *nic, uint16_t qidx)
+{
+	struct nic_mbx mbx = { .msg = { 0 } };
+
+	mbx.msg.msg = NIC_MBOX_MSG_SQ_CFG;
+	mbx.sq.qs_num = nic->vf_id;
+	mbx.sq.sq_num = qidx;
+	mbx.sq.sqs_mode = nic->sqs_mode;
+	mbx.sq.cfg = (nic->vf_id << 3) | qidx;
+	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_qset_config(struct nicvf *nic, struct pf_qs_cfg *qs_cfg)
+{
+	struct nic_mbx mbx = { .msg = { 0 } };
+
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+	qs_cfg->be = 1;
+#endif
+	/* Send a mailbox msg to PF to config Qset */
+	mbx.msg.msg = NIC_MBOX_MSG_QS_CFG;
+	mbx.qs.num = nic->vf_id;
+	mbx.qs.cfg = qs_cfg->value;
+	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_rq_drop_config(struct nicvf *nic, uint16_t qidx, bool enable)
+{
+	struct nic_mbx mbx = { .msg = { 0 } };
+	struct pf_rq_drop_cfg *drop_cfg;
+
+	/* Enable CQ drop to reserve sufficient CQEs for all tx packets */
+	mbx.msg.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
+	mbx.rq.qs_num = nic->vf_id;
+	mbx.rq.rq_num = qidx;
+	drop_cfg = (struct pf_rq_drop_cfg *)&mbx.rq.cfg;
+	drop_cfg->value = 0;
+	if (enable) {
+		drop_cfg->cq_red = 1;
+		drop_cfg->cq_drop = 2;
+	}
+	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_update_hw_max_frs(struct nicvf *nic, uint16_t mtu)
+{
+	struct nic_mbx mbx = { .msg = { 0 } };
+
+	mbx.msg.msg = NIC_MBOX_MSG_SET_MAX_FRS;
+	mbx.frs.max_frs = mtu;
+	mbx.frs.vf_id = nic->vf_id;
+	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_rq_sync(struct nicvf *nic)
+{
+	struct nic_mbx mbx = { .msg = { 0 } };
+
+	/* Make sure all packets in the pipeline are written back into mem */
+	mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
+	mbx.rq.cfg = 0;
+	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_rq_bp_config(struct nicvf *nic, uint16_t qidx, bool enable)
+{
+	struct nic_mbx mbx = { .msg = { 0 } };
+
+	mbx.msg.msg = NIC_MBOX_MSG_RQ_BP_CFG;
+	mbx.rq.qs_num = nic->vf_id;
+	mbx.rq.rq_num = qidx;
+	mbx.rq.cfg = 0;
+	if (enable)
+		mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (nic->vf_id << 0);
+	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_loopback_config(struct nicvf *nic, bool enable)
+{
+	struct nic_mbx mbx = { .msg = { 0 } };
+
+	mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK;
+	mbx.lbk.vf_id = nic->vf_id;
+	mbx.lbk.enable = enable;
+	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_reset_stat_counters(struct nicvf *nic, uint16_t rx_stat_mask,
+			       uint8_t tx_stat_mask, uint16_t rq_stat_mask,
+			       uint16_t sq_stat_mask)
+{
+	struct nic_mbx mbx = { .msg = { 0 } };
+
+	mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
+	mbx.reset_stat.rx_stat_mask = rx_stat_mask;
+	mbx.reset_stat.tx_stat_mask = tx_stat_mask;
+	mbx.reset_stat.rq_stat_mask = rq_stat_mask;
+	mbx.reset_stat.sq_stat_mask = sq_stat_mask;
+	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+void
+nicvf_mbox_shutdown(struct nicvf *nic)
+{
+	struct nic_mbx mbx = { .msg = { 0 } };
+
+	mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
+	nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+void
+nicvf_mbox_cfg_done(struct nicvf *nic)
+{
+	struct nic_mbx mbx = { .msg = { 0 } };
+
+	mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
+	nicvf_mbox_send_async_msg_to_pf(nic, &mbx);
+}
diff --git a/drivers/net/thunderx/base/nicvf_mbox.h b/drivers/net/thunderx/base/nicvf_mbox.h
new file mode 100644
index 0000000..7c0c6a9
--- /dev/null
+++ b/drivers/net/thunderx/base/nicvf_mbox.h
@@ -0,0 +1,232 @@ 
+/*
+ *   BSD LICENSE
+ *
+ *   Copyright (C) Cavium networks Ltd. 2016.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Cavium networks nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __THUNDERX_NICVF_MBOX__
+#define __THUNDERX_NICVF_MBOX__
+
+#include <stdint.h>
+
+#include "nicvf_plat.h"
+
+/* PF <--> VF Mailbox communication
+ * Two 64bit registers are shared between PF and VF for each VF
+ * Writing into second register means end of message.
+ */
+
+/* PF <--> VF mailbox communication */
+#define	NIC_PF_VF_MAILBOX_SIZE		2
+#define	NIC_MBOX_MSG_TIMEOUT		2000	/* ms */
+
+/* Mailbox message types */
+#define	NIC_MBOX_MSG_INVALID		0x00	/* Invalid message */
+#define	NIC_MBOX_MSG_READY		0x01	/* Is PF ready to rcv msgs */
+#define	NIC_MBOX_MSG_ACK		0x02	/* ACK the message received */
+#define	NIC_MBOX_MSG_NACK		0x03	/* NACK the message received */
+#define	NIC_MBOX_MSG_QS_CFG		0x04	/* Configure Qset */
+#define	NIC_MBOX_MSG_RQ_CFG		0x05	/* Configure receive queue */
+#define	NIC_MBOX_MSG_SQ_CFG		0x06	/* Configure Send queue */
+#define	NIC_MBOX_MSG_RQ_DROP_CFG	0x07	/* Configure receive queue */
+#define	NIC_MBOX_MSG_SET_MAC		0x08	/* Add MAC ID to DMAC filter */
+#define	NIC_MBOX_MSG_SET_MAX_FRS	0x09	/* Set max frame size */
+#define	NIC_MBOX_MSG_CPI_CFG		0x0A	/* Config CPI, RSSI */
+#define	NIC_MBOX_MSG_RSS_SIZE		0x0B	/* Get RSS indir_tbl size */
+#define	NIC_MBOX_MSG_RSS_CFG		0x0C	/* Config RSS table */
+#define	NIC_MBOX_MSG_RSS_CFG_CONT	0x0D	/* RSS config continuation */
+#define	NIC_MBOX_MSG_RQ_BP_CFG		0x0E	/* RQ backpressure config */
+#define	NIC_MBOX_MSG_RQ_SW_SYNC		0x0F	/* Flush inflight pkts to RQ */
+#define	NIC_MBOX_MSG_BGX_LINK_CHANGE	0x11	/* BGX:LMAC link status */
+#define	NIC_MBOX_MSG_ALLOC_SQS		0x12	/* Allocate secondary Qset */
+#define	NIC_MBOX_MSG_LOOPBACK		0x16	/* Set interface in loopback */
+#define	NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17	/* Reset statistics counters */
+#define	NIC_MBOX_MSG_CFG_DONE		0xF0	/* VF configuration done */
+#define	NIC_MBOX_MSG_SHUTDOWN		0xF1	/* VF is being shutdown */
+#define	NIC_MBOX_MSG_MAX		0x100	/* Maximum number of messages */
+
+/* Get vNIC VF configuration */
+struct nic_cfg_msg {
+	uint8_t    msg;
+	uint8_t    vf_id;
+	uint8_t    node_id;
+	bool	   tns_mode:1;
+	bool	   sqs_mode:1;
+	bool	   loopback_supported:1;
+	uint8_t    mac_addr[NICVF_MAC_ADDR_SIZE];
+};
+
+/* Qset configuration */
+struct qs_cfg_msg {
+	uint8_t    msg;
+	uint8_t    num;
+	uint8_t    sqs_count;
+	uint64_t   cfg;
+};
+
+/* Receive queue configuration */
+struct rq_cfg_msg {
+	uint8_t    msg;
+	uint8_t    qs_num;
+	uint8_t    rq_num;
+	uint64_t   cfg;
+};
+
+/* Send queue configuration */
+struct sq_cfg_msg {
+	uint8_t    msg;
+	uint8_t    qs_num;
+	uint8_t    sq_num;
+	bool       sqs_mode;
+	uint64_t   cfg;
+};
+
+/* Set VF's MAC address */
+struct set_mac_msg {
+	uint8_t    msg;
+	uint8_t    vf_id;
+	uint8_t    mac_addr[NICVF_MAC_ADDR_SIZE];
+};
+
+/* Set Maximum frame size */
+struct set_frs_msg {
+	uint8_t    msg;
+	uint8_t    vf_id;
+	uint16_t   max_frs;
+};
+
+/* Set CPI algorithm type */
+struct cpi_cfg_msg {
+	uint8_t    msg;
+	uint8_t    vf_id;
+	uint8_t    rq_cnt;
+	uint8_t    cpi_alg;
+};
+
+/* Get RSS table size */
+struct rss_sz_msg {
+	uint8_t    msg;
+	uint8_t    vf_id;
+	uint16_t   ind_tbl_size;
+};
+
+/* Set RSS configuration */
+struct rss_cfg_msg {
+	uint8_t    msg;
+	uint8_t    vf_id;
+	uint8_t    hash_bits;
+	uint8_t    tbl_len;
+	uint8_t    tbl_offset;
+#define RSS_IND_TBL_LEN_PER_MBX_MSG	8
+	uint8_t    ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
+};
+
+/* Physical interface link status */
+struct bgx_link_status {
+	uint8_t    msg;
+	uint8_t    link_up;
+	uint8_t    duplex;
+	uint32_t   speed;
+};
+
+/* Set interface in loopback mode */
+struct set_loopback {
+	uint8_t    msg;
+	uint8_t    vf_id;
+	bool	   enable;
+};
+
+/* Reset statistics counters */
+struct reset_stat_cfg {
+	uint8_t    msg;
+	/* Bitmap to select NIC_PF_VNIC(vf_id)_RX_STAT(0..13) */
+	uint16_t   rx_stat_mask;
+	/* Bitmap to select NIC_PF_VNIC(vf_id)_TX_STAT(0..4) */
+	uint8_t    tx_stat_mask;
+	/* Bitmap to select NIC_PF_QS(0..127)_RQ(0..7)_STAT(0..1)
+	 * bit14, bit15 NIC_PF_QS(vf_id)_RQ7_STAT(0..1)
+	 * bit12, bit13 NIC_PF_QS(vf_id)_RQ6_STAT(0..1)
+	 * ..
+	 * bit2, bit3 NIC_PF_QS(vf_id)_RQ1_STAT(0..1)
+	 * bit0, bit1 NIC_PF_QS(vf_id)_RQ0_STAT(0..1)
+	 */
+	uint16_t   rq_stat_mask;
+	/* Bitmap to select NIC_PF_QS(0..127)_SQ(0..7)_STAT(0..1)
+	 * bit14, bit15 NIC_PF_QS(vf_id)_SQ7_STAT(0..1)
+	 * bit12, bit13 NIC_PF_QS(vf_id)_SQ6_STAT(0..1)
+	 * ..
+	 * bit2, bit3 NIC_PF_QS(vf_id)_SQ1_STAT(0..1)
+	 * bit0, bit1 NIC_PF_QS(vf_id)_SQ0_STAT(0..1)
+	 */
+	uint16_t   sq_stat_mask;
+};
+
+struct nic_mbx {
+/* 128 bit shared memory between PF and each VF */
+union {
+	struct { uint8_t msg; }	msg;
+	struct nic_cfg_msg	nic_cfg;
+	struct qs_cfg_msg	qs;
+	struct rq_cfg_msg	rq;
+	struct sq_cfg_msg	sq;
+	struct set_mac_msg	mac;
+	struct set_frs_msg	frs;
+	struct cpi_cfg_msg	cpi_cfg;
+	struct rss_sz_msg	rss_size;
+	struct rss_cfg_msg	rss_cfg;
+	struct bgx_link_status  link_status;
+	struct set_loopback	lbk;
+	struct reset_stat_cfg	reset_stat;
+};
+};
+
+NICVF_STATIC_ASSERT(sizeof(struct nic_mbx) <= 16);
+
+int nicvf_handle_mbx_intr(struct nicvf *nic);
+int nicvf_mbox_check_pf_ready(struct nicvf *nic);
+int nicvf_mbox_qset_config(struct nicvf *nic, struct pf_qs_cfg *qs_cfg);
+int nicvf_mbox_rq_config(struct nicvf *nic, uint16_t qidx,
+			 struct pf_rq_cfg *pf_rq_cfg);
+int nicvf_mbox_sq_config(struct nicvf *nic, uint16_t qidx);
+int nicvf_mbox_rq_drop_config(struct nicvf *nic, uint16_t qidx, bool enable);
+int nicvf_mbox_rq_bp_config(struct nicvf *nic, uint16_t qidx, bool enable);
+int nicvf_mbox_set_mac_addr(struct nicvf *nic,
+			    const uint8_t mac[NICVF_MAC_ADDR_SIZE]);
+int nicvf_mbox_config_cpi(struct nicvf *nic, uint32_t qcnt);
+int nicvf_mbox_get_rss_size(struct nicvf *nic);
+int nicvf_mbox_config_rss(struct nicvf *nic);
+int nicvf_mbox_update_hw_max_frs(struct nicvf *nic, uint16_t mtu);
+int nicvf_mbox_rq_sync(struct nicvf *nic);
+int nicvf_mbox_loopback_config(struct nicvf *nic, bool enable);
+int nicvf_mbox_reset_stat_counters(struct nicvf *nic, uint16_t rx_stat_mask,
+	uint8_t tx_stat_mask, uint16_t rq_stat_mask, uint16_t sq_stat_mask);
+void nicvf_mbox_shutdown(struct nicvf *nic);
+void nicvf_mbox_cfg_done(struct nicvf *nic);
+
+#endif /* __THUNDERX_NICVF_MBOX__ */
diff --git a/drivers/net/thunderx/base/nicvf_plat.h b/drivers/net/thunderx/base/nicvf_plat.h
new file mode 100644
index 0000000..83c1844
--- /dev/null
+++ b/drivers/net/thunderx/base/nicvf_plat.h
@@ -0,0 +1,132 @@ 
+/*
+ *   BSD LICENSE
+ *
+ *   Copyright (C) Cavium networks Ltd. 2016.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Cavium networks nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _THUNDERX_NICVF_H
+#define _THUNDERX_NICVF_H
+
+/* Platform/OS/arch specific abstractions */
+
+/* log */
+#include <rte_log.h>
+#include "../nicvf_logs.h"
+
+#define nicvf_log_error(s, ...) PMD_DRV_LOG(ERR, s, ##__VA_ARGS__)
+
+#define nicvf_log_debug(s, ...) PMD_DRV_LOG(DEBUG, s, ##__VA_ARGS__)
+
+#define nicvf_mbox_log(s, ...) PMD_MBOX_LOG(DEBUG, s, ##__VA_ARGS__)
+
+#define nicvf_log(s, ...) fprintf(stderr, s, ##__VA_ARGS__)
+
+/* delay */
+#include <rte_cycles.h>
+#define nicvf_delay_us(x) rte_delay_us(x)
+
+/* barrier */
+#include <rte_atomic.h>
+#define nicvf_smp_wmb() rte_smp_wmb()
+#define nicvf_smp_rmb() rte_smp_rmb()
+
+/* utils */
+#include <rte_common.h>
+#define nicvf_min(x, y) RTE_MIN(x, y)
+
+/* byte order */
+#include <rte_byteorder.h>
+#define nicvf_cpu_to_be_64(x) rte_cpu_to_be_64(x)
+#define nicvf_be_to_cpu_64(x) rte_be_to_cpu_64(x)
+
+/* Constants */
+#include <rte_ether.h>
+#define NICVF_MAC_ADDR_SIZE ETHER_ADDR_LEN
+
+/* ARM64 specific functions */
+#if defined(RTE_ARCH_ARM64)
+#define nicvf_prefetch_store_keep(_ptr) ({\
+	asm volatile("prfm pstl1keep, %a0\n" : : "p" (_ptr)); })
+
+static inline void __attribute__((always_inline))
+nicvf_addr_write(uintptr_t addr, uint64_t val)
+{
+	asm volatile(
+		    "str %x[val], [%x[addr]]"
+		    :
+		    : [val] "r" (val), [addr] "r" (addr));
+}
+
+static inline uint64_t __attribute__((always_inline))
+nicvf_addr_read(uintptr_t addr)
+{
+	uint64_t val;
+
+	asm volatile(
+		    "ldr %x[val], [%x[addr]]"
+		    : [val] "=r" (val)
+		    : [addr] "r" (addr));
+	return val;
+}
+
+#define NICVF_LOAD_PAIR(reg1, reg2, addr) ({		\
+			asm volatile(			\
+			"ldp %x[x1], %x[x0], [%x[p1]]"	\
+			: [x1]"=r"(reg1), [x0]"=r"(reg2)\
+			: [p1]"r"(addr)			\
+			); })
+
+#else /* non optimized functions for building on non arm64 arch */
+
+#define nicvf_prefetch_store_keep(_ptr) do {} while (0)
+
+static inline void __attribute__((always_inline))
+nicvf_addr_write(uintptr_t addr, uint64_t val)
+{
+	*(volatile uint64_t *)addr = val;
+}
+
+static inline uint64_t __attribute__((always_inline))
+nicvf_addr_read(uintptr_t addr)
+{
+	return	*(volatile uint64_t *)addr;
+}
+
+#define NICVF_LOAD_PAIR(reg1, reg2, addr)		\
+do {							\
+	reg1 = nicvf_addr_read((uintptr_t)addr);	\
+	reg2 = nicvf_addr_read((uintptr_t)addr + 8);	\
+} while (0)
+
+#endif
+
+#include "nicvf_hw.h"
+#include "nicvf_mbox.h"
+
+#endif /* _THUNDERX_NICVF_H */