[dpdk-dev] [PATCH 07/11] net/vhostpci: get remote memory region and vring info
Zhiyong Yang
zhiyong.yang at intel.com
Thu Nov 30 10:46:53 CET 2017
Linking up status is triggered to indicate that remote memory regions
and vring info have been ready, Vhostpci PMD can get them.
Signed-off-by: Zhiyong Yang <zhiyong.yang at intel.com>
---
drivers/net/vhostpci/vhostpci_ethdev.c | 81 ++++++++++++++++++++++++++++++++++
1 file changed, 81 insertions(+)
diff --git a/drivers/net/vhostpci/vhostpci_ethdev.c b/drivers/net/vhostpci/vhostpci_ethdev.c
index 76353930a..0582f73b7 100644
--- a/drivers/net/vhostpci/vhostpci_ethdev.c
+++ b/drivers/net/vhostpci/vhostpci_ethdev.c
@@ -73,6 +73,9 @@ static int
vhostpci_dev_atomic_write_link_status(struct rte_eth_dev *dev,
struct rte_eth_link *link);
+static inline uint64_t
+remote_gpa_to_vva(struct vhostpci_net *vpnet, uint64_t remote_gpa);
+
static int
vhostpci_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc __rte_unused,
@@ -105,6 +108,9 @@ vhostpci_dev_close(struct rte_eth_dev *dev);
static void
vhostpci_dev_stop(struct rte_eth_dev *dev);
+static int
+vhostpci_get_remote_mem(struct rte_eth_dev *dev);
+
static const struct eth_dev_ops vhostpci_eth_dev_ops = {
.dev_start = vhostpci_dev_start,
.dev_stop = vhostpci_dev_stop,
@@ -335,6 +341,71 @@ vhostpci_dev_atomic_write_link_status(struct rte_eth_dev *dev,
return 0;
}
+static inline uint64_t
+remote_gpa_to_vva(struct vhostpci_net *vpnet, uint64_t remote_gpa)
+{
+ uint64_t mem_base = vpnet->mem_base;
+ uint32_t i, nregions = vpnet->mem.nregions;
+ struct vhostpci_mem_region *regions = vpnet->mem.regions;
+
+ for (i = 0; i < nregions; i++) {
+ if (remote_gpa > regions[i].start &&
+ remote_gpa < regions[i].end)
+
+ return (remote_gpa - regions[i].start
+ + regions[i].offset + mem_base);
+ }
+
+ return 0;
+}
+
+static int
+vhostpci_get_remote_mem(struct rte_eth_dev *dev)
+{
+ struct vpnet_metadata *metadata;
+ struct rte_mem_resource *mem_resource;
+ struct rte_pci_device *pci_device = RTE_ETH_DEV_TO_PCI(dev);
+ struct vhostpci_mem_region *regions;
+ struct vpnet_remote_vq *vq;
+ struct vhostpci_net *vpnet;
+ struct vhostpci_virtqueue *virtqueue[VHOSTPCI_MAX_QUEUE_PAIRS * 2];
+ struct vhostpci_hw *hw = dev->data->dev_private;
+ uint64_t offset = 0;
+ uint32_t i;
+
+ vpnet = hw->vpnet;
+ mem_resource = pci_device->mem_resource;
+ metadata = mem_resource[REMOTE_MEM_BAR_ID].addr;
+
+ vpnet->mem_base = (uint64_t)metadata + METADATA_SIZE;
+ vpnet->mem.nregions = metadata->nregions;
+ vpnet->nr_vring = metadata->nvqs;
+ regions = vpnet->mem.regions;
+
+ for (i = 0; i < metadata->nregions; i++) {
+ regions[i].guest_phys_addr = metadata->mem[i].gpa;
+ regions[i].size = metadata->mem[i].size;
+ regions[i].start = metadata->mem[i].gpa;
+ regions[i].end = metadata->mem[i].gpa + metadata->mem[i].size;
+ regions[i].offset = offset;
+ offset += regions[i].size;
+ }
+
+ vq = metadata->vq;
+ for (i = 0; i < vpnet->nr_vring; i++) {
+ virtqueue[i] = vpnet->virtqueue[i];
+ virtqueue[i]->desc = (struct vring_desc *)remote_gpa_to_vva(vpnet, vq[i].desc_gpa);
+ virtqueue[i]->avail = (struct vring_avail *)remote_gpa_to_vva(vpnet, vq[i].avail_gpa);
+ virtqueue[i]->used = (struct vring_used *)remote_gpa_to_vva(vpnet, vq[i].used_gpa);
+ virtqueue[i]->last_avail_idx = vq[i].last_avail_idx;
+ virtqueue[i]->enabled = vq[i].vring_enabled;
+ virtqueue[i]->last_used_idx = 0;
+ virtqueue[i]->size = VHOSTPCI_NUM_DESCRIPTORS;
+ }
+
+ return 0;
+}
+
static int
vhostpci_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete __rte_unused)
@@ -362,9 +433,19 @@ vhostpci_dev_link_update(struct rte_eth_dev *dev,
dev->data->port_id);
} else {
+ int ret;
+
link.link_status = ETH_LINK_UP;
PMD_INIT_LOG(DEBUG, "Port %d is up",
dev->data->port_id);
+
+ /* get the remote guest memory region and vring info */
+ vhostpci_get_remote_mem(dev);
+
+ ret = vhostpci_init_device(dev,
+ VHOSTPCI_PMD_DEFAULT_GUEST_FEATURES);
+ if (ret < 0)
+ return ret;
}
}
--
2.13.3
More information about the dev
mailing list