[18.11] vfio: fix DMA mapping of externally allocated heaps

Message ID b11cfd686662352931de545be84e9a5b390df05c.1572973899.git.anatoly.burakov@intel.com (mailing list archive)
State Not Applicable, archived
Delegated to: David Marchand
Headers
Series [18.11] vfio: fix DMA mapping of externally allocated heaps |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail apply issues

Commit Message

Burakov, Anatoly Nov. 5, 2019, 5:12 p.m. UTC
  Currently, externally created heaps are supposed to be automatically
mapped for VFIO DMA by EAL, however they only do so if, at the time of
heap creation, VFIO is initialized and has at least one device
available. If no devices are available at the time of heap creation (or
if devices were available, but were since hot-unplugged, thus dropping
all VFIO container mappings), then VFIO mapping code would have skipped
over externally allocated heaps.

The fix is to stop skipping external segments unconditionally, and
instead check if each segment belongs to an internal or external heap.
This way, we still skip over segments that are supposed to be mapped
through the user maps, but we now map all of the heap segments.

Fixes: 0f526d674f8e ("malloc: separate creating memseg list and malloc heap")
Cc: stable@dpdk.org

Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
---
 lib/librte_eal/linuxapp/eal/eal_vfio.c | 61 ++++++++++++++++++++++++--
 1 file changed, 57 insertions(+), 4 deletions(-)
  

Patch

diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio.c b/lib/librte_eal/linuxapp/eal/eal_vfio.c
index 830b320d70..463ed9999a 100644
--- a/lib/librte_eal/linuxapp/eal/eal_vfio.c
+++ b/lib/librte_eal/linuxapp/eal/eal_vfio.c
@@ -87,6 +87,35 @@  static const struct vfio_iommu_type iommu_types[] = {
 	},
 };
 
+static bool
+is_heap_memseg_list(const struct rte_memseg_list *msl)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	struct malloc_heap *heap;
+	int i;
+
+	/* internal memsegs are always heap memsegs */
+	if (!msl->external)
+		return true;
+
+	for (i = 0; i < RTE_MAX_HEAPS; i++) {
+		int socket_id;
+
+		heap = &mcfg->malloc_heaps[i];
+		if (heap->total_size == 0)
+			continue;
+
+		socket_id = heap->socket_id;
+
+		/* if socket ID of the heap matches that of memseg list, we
+		 * found the heap this segment belongs to.
+		 */
+		if (msl->socket_id == socket_id)
+			return true;
+	}
+	return false;
+}
+
 static int
 is_null_map(const struct user_mem_map *map)
 {
@@ -1238,6 +1267,7 @@  type1_map_contig(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
 {
 	int *vfio_container_fd = arg;
 
+	/* skip over all externally allocated segments */
 	if (msl->external)
 		return 0;
 
@@ -1251,7 +1281,15 @@  type1_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
 {
 	int *vfio_container_fd = arg;
 
-	if (msl->external)
+	/* internal memsegs have already been mapped if IOVA as VA mode */
+	if (rte_eal_iova_mode() == RTE_IOVA_VA && !msl->external)
+		return 0;
+
+	if (!is_heap_memseg_list(msl))
+		return 0;
+
+	/* external segments may not have valid IOVA addresses */
+	if (ms->iova == RTE_BAD_IOVA)
 		return 0;
 
 	return vfio_type1_dma_mem_map(*vfio_container_fd, ms->addr_64, ms->iova,
@@ -1302,12 +1340,19 @@  vfio_type1_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,
 static int
 vfio_type1_dma_map(int vfio_container_fd)
 {
+	int ret;
+
 	if (rte_eal_iova_mode() == RTE_IOVA_VA) {
 		/* with IOVA as VA mode, we can get away with mapping contiguous
 		 * chunks rather than going page-by-page.
 		 */
-		return rte_memseg_contig_walk(type1_map_contig,
+		ret = rte_memseg_contig_walk(type1_map_contig,
 				&vfio_container_fd);
+		if (ret != 0)
+			return ret;
+		/* fall through to regular memseg walk because we also need to
+		 * map external heaps.
+		 */
 	}
 	return rte_memseg_walk(type1_map, &vfio_container_fd);
 }
@@ -1382,7 +1427,11 @@  vfio_spapr_map_walk(const struct rte_memseg_list *msl,
 {
 	int *vfio_container_fd = arg;
 
-	if (msl->external)
+	if (!is_heap_memseg_list(msl))
+		return 0;
+
+	/* external segments may not have valid IOVA addresses */
+	if (ms->iova == RTE_BAD_IOVA)
 		return 0;
 
 	return vfio_spapr_dma_do_map(*vfio_container_fd, ms->addr_64, ms->iova,
@@ -1400,7 +1449,11 @@  vfio_spapr_window_size_walk(const struct rte_memseg_list *msl,
 	struct spapr_walk_param *param = arg;
 	uint64_t max = ms->iova + ms->len;
 
-	if (msl->external)
+	if (!is_heap_memseg_list(msl))
+		return 0;
+
+	/* external segments may not have valid IOVA addresses */
+	if (ms->iova == RTE_BAD_IOVA)
 		return 0;
 
 	if (max > param->window_size) {