[PATCH v3] vhost: exclude VM hugepages from coredumps

Mike Pattrick mkp at redhat.com
Wed Dec 7 17:54:08 CET 2022


Currently if an application wants to include shared hugepages in
coredumps in conjunction with the vhost library, the coredump will be
larger than expected and include unneeded virtual machine memory.

This patch will mark all vhost huge pages as DONTDUMP, except for some
select pages used by DPDK.

Signed-off-by: Mike Pattrick <mkp at redhat.com>

---
v2:
* Removed warning on unsupported platforms

v3:
* Removed pointer warning on 32bit platforms

---
 lib/vhost/iotlb.c      |  5 +++++
 lib/vhost/vhost.h      | 12 ++++++++++++
 lib/vhost/vhost_user.c | 10 ++++++++++
 3 files changed, 27 insertions(+)

diff --git a/lib/vhost/iotlb.c b/lib/vhost/iotlb.c
index 6a729e8804..a0b8fd7302 100644
--- a/lib/vhost/iotlb.c
+++ b/lib/vhost/iotlb.c
@@ -149,6 +149,7 @@ vhost_user_iotlb_cache_remove_all(struct vhost_virtqueue *vq)
 	rte_rwlock_write_lock(&vq->iotlb_lock);
 
 	RTE_TAILQ_FOREACH_SAFE(node, &vq->iotlb_list, next, temp_node) {
+		mem_set_dump((void *)(uintptr_t)node->uaddr, node->size, true);
 		TAILQ_REMOVE(&vq->iotlb_list, node, next);
 		vhost_user_iotlb_pool_put(vq, node);
 	}
@@ -170,6 +171,7 @@ vhost_user_iotlb_cache_random_evict(struct vhost_virtqueue *vq)
 
 	RTE_TAILQ_FOREACH_SAFE(node, &vq->iotlb_list, next, temp_node) {
 		if (!entry_idx) {
+			mem_set_dump((void *)(uintptr_t)node->uaddr, node->size, true);
 			TAILQ_REMOVE(&vq->iotlb_list, node, next);
 			vhost_user_iotlb_pool_put(vq, node);
 			vq->iotlb_cache_nr--;
@@ -222,12 +224,14 @@ vhost_user_iotlb_cache_insert(struct virtio_net *dev, struct vhost_virtqueue *vq
 			vhost_user_iotlb_pool_put(vq, new_node);
 			goto unlock;
 		} else if (node->iova > new_node->iova) {
+			mem_set_dump((void *)(uintptr_t)node->uaddr, node->size, true);
 			TAILQ_INSERT_BEFORE(node, new_node, next);
 			vq->iotlb_cache_nr++;
 			goto unlock;
 		}
 	}
 
+	mem_set_dump((void *)(uintptr_t)node->uaddr, node->size, true);
 	TAILQ_INSERT_TAIL(&vq->iotlb_list, new_node, next);
 	vq->iotlb_cache_nr++;
 
@@ -255,6 +259,7 @@ vhost_user_iotlb_cache_remove(struct vhost_virtqueue *vq,
 			break;
 
 		if (iova < node->iova + node->size) {
+			mem_set_dump((void *)(uintptr_t)node->uaddr, node->size, true);
 			TAILQ_REMOVE(&vq->iotlb_list, node, next);
 			vhost_user_iotlb_pool_put(vq, node);
 			vq->iotlb_cache_nr--;
diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index ef211ed519..1f913803f6 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -13,6 +13,7 @@
 #include <linux/virtio_net.h>
 #include <sys/socket.h>
 #include <linux/if.h>
+#include <sys/mman.h>
 
 #include <rte_log.h>
 #include <rte_ether.h>
@@ -987,4 +988,15 @@ mbuf_is_consumed(struct rte_mbuf *m)
 
 	return true;
 }
+
+static __rte_always_inline void
+mem_set_dump(__rte_unused void *ptr, __rte_unused size_t size, __rte_unused bool enable)
+{
+#ifdef MADV_DONTDUMP
+	if (madvise(ptr, size, enable ? MADV_DODUMP : MADV_DONTDUMP) == -1) {
+		rte_log(RTE_LOG_INFO, vhost_config_log_level,
+			"VHOST_CONFIG: could not set coredump preference (%s).\n", strerror(errno));
+	}
+#endif
+}
 #endif /* _VHOST_NET_CDEV_H_ */
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 9902ae9944..8f33d5f4d9 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -793,6 +793,9 @@ translate_ring_addresses(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
 			return;
 		}
 
+		mem_set_dump(vq->desc_packed, len, true);
+		mem_set_dump(vq->driver_event, len, true);
+		mem_set_dump(vq->device_event, len, true);
 		vq->access_ok = true;
 		return;
 	}
@@ -846,6 +849,9 @@ translate_ring_addresses(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
 			"some packets maybe resent for Tx and dropped for Rx\n");
 	}
 
+	mem_set_dump(vq->desc, len, true);
+	mem_set_dump(vq->avail, len, true);
+	mem_set_dump(vq->used, len, true);
 	vq->access_ok = true;
 
 	VHOST_LOG_CONFIG(dev->ifname, DEBUG, "mapped address desc: %p\n", vq->desc);
@@ -1224,6 +1230,7 @@ vhost_user_mmap_region(struct virtio_net *dev,
 	region->mmap_addr = mmap_addr;
 	region->mmap_size = mmap_size;
 	region->host_user_addr = (uint64_t)(uintptr_t)mmap_addr + mmap_offset;
+	mem_set_dump(mmap_addr, mmap_size, false);
 
 	if (dev->async_copy) {
 		if (add_guest_pages(dev, region, alignment) < 0) {
@@ -1528,6 +1535,7 @@ inflight_mem_alloc(struct virtio_net *dev, const char *name, size_t size, int *f
 		return NULL;
 	}
 
+	mem_set_dump(ptr, size, false);
 	*fd = mfd;
 	return ptr;
 }
@@ -1736,6 +1744,7 @@ vhost_user_set_inflight_fd(struct virtio_net **pdev,
 		dev->inflight_info->fd = -1;
 	}
 
+	mem_set_dump(addr, mmap_size, false);
 	dev->inflight_info->fd = fd;
 	dev->inflight_info->addr = addr;
 	dev->inflight_info->size = mmap_size;
@@ -2283,6 +2292,7 @@ vhost_user_set_log_base(struct virtio_net **pdev,
 	dev->log_addr = (uint64_t)(uintptr_t)addr;
 	dev->log_base = dev->log_addr + off;
 	dev->log_size = size;
+	mem_set_dump(addr, size, false);
 
 	for (i = 0; i < dev->nr_vring; i++) {
 		struct vhost_virtqueue *vq = dev->virtqueue[i];
-- 
2.31.1



More information about the dev mailing list