@@ -29,6 +29,34 @@ Three ways:
If headers are not found, the CUDA GPU driver library is not built.
+CPU map GPU memory
+~~~~~~~~~~~~~~~~~
+
+To enable this gpudev feature (i.e. implement the ``rte_gpu_mem_cpu_map``),
+you need the `GDRCopy <https://github.com/NVIDIA/gdrcopy>`_ library and driver
+installed on your system.
+
+A quick recipe to download, build and run GDRCopy library and driver:
+
+.. code-block:: console
+
+ $ git clone https://github.com/NVIDIA/gdrcopy.git
+ $ make
+ $ # make install to install GDRCopy library system wide
+ $ # Launch gdrdrv kernel module on the system
+ $ sudo ./insmod.sh
+
+You need to indicate to meson where GDRCopy headers files as in case of CUDA headers.
+An example would be:
+
+.. code-block:: console
+
+ $ meson build -Dc_args="-I/usr/local/cuda/include -I/path/to/gdrcopy/include"
+
+If headers are not found, the CUDA GPU driver library is built without the CPU map capability
+and will retun error if the application invokes the gpude ``rte_gpu_mem_cpu_map`` function.
+
+
CUDA Shared Library
-------------------
@@ -46,6 +74,30 @@ All CUDA API symbols are loaded at runtime as well.
For this reason, to build the CUDA driver library,
no need to install the CUDA library.
+CPU map GPU memory
+~~~~~~~~~~~~~~~~~
+
+Similarly to CUDA shared library, if the **libgdrapi.so** shared library is not
+installed in default locations (e.g. /usr/local/lib) you can use the
+``GDRCOPY_PATH_L``.
+
+As an example, to enable the CPU map feature sanity check, run the ``app/test-gpudev``
+application with:
+
+.. code-block:: console
+
+ $ sudo CUDA_PATH_L=/path/to/libcuda GDRCOPY_PATH_L=/path/to/libgdrapi ./build/app/dpdk-test-gpudev
+
+Additionally, the ``gdrdrv`` kernel module built with the GDRCopy project has to loaded
+on the system:
+
+.. code-block:: console
+
+ $ lsmod | egrep gdrdrv
+ gdrdrv 20480 0
+ nvidia 35307520 19 nvidia_uvm,nv_peer_mem,gdrdrv,nvidia_modeset
+
+
Design
------
@@ -11,3 +11,5 @@ Get device info =
Share CPU memory with device =
Allocate device memory =
Free memory =
+CPU map device memory =
+CPU unmap device memory =
@@ -16,6 +16,7 @@
#include <gpudev_driver.h>
#include <cuda.h>
#include <cudaTypedefs.h>
+#include "gdrcopy.h"
#define CUDA_DRIVER_MIN_VERSION 11040
#define CUDA_API_MIN_VERSION 3020
@@ -51,6 +52,7 @@ static PFN_cuFlushGPUDirectRDMAWrites pfn_cuFlushGPUDirectRDMAWrites;
static void *cudalib;
static unsigned int cuda_api_version;
static int cuda_driver_version;
+static gdr_t gdrc_h;
/* NVIDIA GPU vendor */
#define NVIDIA_GPU_VENDOR_ID (0x10de)
@@ -157,6 +159,7 @@ struct mem_entry {
CUcontext ctx;
cuda_ptr_key pkey;
enum mem_type mtype;
+ gdr_mh_t mh;
struct mem_entry *prev;
struct mem_entry *next;
};
@@ -797,6 +800,47 @@ cuda_mem_register(struct rte_gpu *dev, size_t size, void *ptr)
return 0;
}
+static int
+cuda_mem_cpu_map(struct rte_gpu *dev, __rte_unused size_t size, void *ptr_in, void **ptr_out)
+{
+ struct mem_entry *mem_item;
+ cuda_ptr_key hk;
+
+ if (dev == NULL)
+ return -ENODEV;
+
+ hk = get_hash_from_ptr((void *)ptr_in);
+
+ mem_item = mem_list_find_item(hk);
+ if (mem_item == NULL) {
+ rte_cuda_log(ERR, "Memory address 0x%p not found in driver memory.", ptr_in);
+ rte_errno = EPERM;
+ return -rte_errno;
+ }
+
+ if (mem_item->mtype != GPU_MEM) {
+ rte_cuda_log(ERR, "Memory address 0x%p is not GPU memory type.", ptr_in);
+ rte_errno = EPERM;
+ return -rte_errno;
+ }
+
+ if (mem_item->size != size)
+ rte_cuda_log(WARNING,
+ "Can't expose memory area with size (%zd) different from original size (%zd).",
+ size, mem_item->size);
+
+ if (gdrcopy_pin(&gdrc_h, &(mem_item->mh), (uint64_t)mem_item->ptr_d,
+ mem_item->size, &(mem_item->ptr_h))) {
+ rte_cuda_log(ERR, "Error exposing GPU memory address 0x%p.", ptr_in);
+ rte_errno = EPERM;
+ return -rte_errno;
+ }
+
+ *ptr_out = mem_item->ptr_h;
+
+ return 0;
+}
+
static int
cuda_mem_free(struct rte_gpu *dev, void *ptr)
{
@@ -874,6 +918,34 @@ cuda_mem_unregister(struct rte_gpu *dev, void *ptr)
return -rte_errno;
}
+static int
+cuda_mem_cpu_unmap(struct rte_gpu *dev, void *ptr_in)
+{
+ struct mem_entry *mem_item;
+ cuda_ptr_key hk;
+
+ if (dev == NULL)
+ return -ENODEV;
+
+ hk = get_hash_from_ptr((void *)ptr_in);
+
+ mem_item = mem_list_find_item(hk);
+ if (mem_item == NULL) {
+ rte_cuda_log(ERR, "Memory address 0x%p not found in driver memory.", ptr_in);
+ rte_errno = EPERM;
+ return -rte_errno;
+ }
+
+ if (gdrcopy_unpin(gdrc_h, mem_item->mh, (void *)mem_item->ptr_d,
+ mem_item->size)) {
+ rte_cuda_log(ERR, "Error unexposing GPU memory address 0x%p.", ptr_in);
+ rte_errno = EPERM;
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
static int
cuda_dev_close(struct rte_gpu *dev)
{
@@ -1040,6 +1112,8 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic
rte_errno = ENOTSUP;
return -rte_errno;
}
+
+ gdrc_h = NULL;
}
/* Fill HW specific part of device structure */
@@ -1182,8 +1256,8 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic
dev->ops.mem_free = cuda_mem_free;
dev->ops.mem_register = cuda_mem_register;
dev->ops.mem_unregister = cuda_mem_unregister;
- dev->ops.mem_cpu_map = NULL;
- dev->ops.mem_cpu_unmap = NULL;
+ dev->ops.mem_cpu_map = cuda_mem_cpu_map;
+ dev->ops.mem_cpu_unmap = cuda_mem_cpu_unmap;
dev->ops.wmb = cuda_wmb;
rte_gpu_complete_new(dev);
new file mode 100644
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright (c) 2022 NVIDIA Corporation & Affiliates
+*/
+#include <rte_common.h>
+#include <rte_log.h>
+#include "gdrcopy.h"
+
+static RTE_LOG_REGISTER_DEFAULT(cuda_gdr_logtype, NOTICE);
+
+/* Helper macro for logging */
+#define rte_cuda_gdrc_log(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, cuda_gdr_logtype, fmt "\n", ##__VA_ARGS__)
+
+static void *gdrclib;
+static gdr_t (*sym_gdr_open)(void);
+static int (*sym_gdr_close)(gdr_t g);
+static int (*sym_gdr_pin_buffer)(gdr_t g, unsigned long addr, size_t size, uint64_t p2p_token, uint32_t va_space, gdr_mh_t *handle);
+static int (*sym_gdr_unpin_buffer)(gdr_t g, gdr_mh_t handle);
+static int (*sym_gdr_map)(gdr_t g, gdr_mh_t handle, void **va, size_t size);
+static int (*sym_gdr_unmap)(gdr_t g, gdr_mh_t handle, void *va, size_t size);
+
+static int
+gdrcopy_loader(void)
+{
+ char gdrcopy_path[1024];
+
+ if (getenv("GDRCOPY_PATH_L") == NULL)
+ snprintf(gdrcopy_path, 1024, "%s", "libgdrapi.so");
+ else
+ snprintf(gdrcopy_path, 1024, "%s/%s", getenv("GDRCOPY_PATH_L"), "libgdrapi.so");
+
+ gdrclib = dlopen(gdrcopy_path, RTLD_LAZY);
+ if (gdrclib == NULL) {
+ rte_cuda_gdrc_log(ERR, "Failed to find GDRCopy library %s (GDRCOPY_PATH_L=%s)\n",
+ gdrcopy_path, getenv("GDRCOPY_PATH_L"));
+ return -1;
+ }
+
+ sym_gdr_open = dlsym(gdrclib, "gdr_open");
+ if (sym_gdr_open == NULL) {
+ rte_cuda_gdrc_log(ERR, "Failed to load GDRCopy symbols\n");
+ return -1;
+ }
+
+ sym_gdr_close = dlsym(gdrclib, "gdr_close");
+ if (sym_gdr_close == NULL) {
+ rte_cuda_gdrc_log(ERR, "Failed to load GDRCopy symbols\n");
+ return -1;
+ }
+
+ sym_gdr_pin_buffer = dlsym(gdrclib, "gdr_pin_buffer");
+ if (sym_gdr_pin_buffer == NULL) {
+ rte_cuda_gdrc_log(ERR, "Failed to load GDRCopy symbols\n");
+ return -1;
+ }
+
+ sym_gdr_unpin_buffer = dlsym(gdrclib, "gdr_unpin_buffer");
+ if (sym_gdr_unpin_buffer == NULL) {
+ rte_cuda_gdrc_log(ERR, "Failed to load GDRCopy symbols\n");
+ return -1;
+ }
+
+ sym_gdr_map = dlsym(gdrclib, "gdr_map");
+ if (sym_gdr_map == NULL) {
+ rte_cuda_gdrc_log(ERR, "Failed to load GDRCopy symbols\n");
+ return -1;
+ }
+
+ sym_gdr_unmap = dlsym(gdrclib, "gdr_unmap");
+ if (sym_gdr_unmap == NULL) {
+ rte_cuda_gdrc_log(ERR, "Failed to load GDRCopy symbols\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+gdrcopy_open(gdr_t *g)
+{
+ gdr_t g_;
+
+ g_ = sym_gdr_open();
+ if (!g_)
+ return -1;
+ *g = g_;
+
+ return 0;
+}
+
+static int
+gdrcopy_close(gdr_t *g)
+{
+ sym_gdr_close(*g);
+ return 0;
+}
+
+int
+gdrcopy_pin(gdr_t *gdrc_h, __rte_unused gdr_mh_t *mh, uint64_t d_addr, size_t size, void **h_addr)
+{
+#ifdef DRIVERS_GPU_CUDA_GDRCOPY_H
+ if (*gdrc_h == NULL) {
+ if (gdrcopy_loader())
+ return -ENOTSUP;
+
+ if (gdrcopy_open(gdrc_h)) {
+ rte_cuda_gdrc_log(ERR,
+ "GDRCopy gdrdrv kernel module not found. Can't CPU map GPU memory.");
+ return -EPERM;
+ }
+ }
+
+ /* Pin the device buffer */
+ if (sym_gdr_pin_buffer(*gdrc_h, d_addr, size, 0, 0, mh) != 0) {
+ rte_cuda_gdrc_log(ERR, "GDRCopy pin buffer error.");
+ return -1;
+ }
+
+ /* Map the buffer to user space */
+ if (sym_gdr_map(*gdrc_h, *mh, h_addr, size) != 0) {
+ rte_cuda_gdrc_log(ERR, "GDRCopy map buffer error.");
+ sym_gdr_unpin_buffer(*gdrc_h, *mh);
+ return -1;
+ }
+
+ return 0;
+#else
+ rte_cuda_gdrc_log(ERR,
+ "GDRCopy headers not provided at DPDK building time. Can't CPU map GPU memory.");
+ return -ENOTSUP;
+#endif
+}
+
+int
+gdrcopy_unpin(gdr_t gdrc_h, __rte_unused gdr_mh_t mh, void *d_addr, size_t size)
+{
+ if (gdrc_h == NULL)
+ return -EINVAL;
+
+#ifdef DRIVERS_GPU_CUDA_GDRCOPY_H
+ /* Unmap the buffer from user space */
+ if (sym_gdr_unmap(gdrc_h, mh, d_addr, size) != 0) {
+ rte_cuda_gdrc_log(ERR, "GDRCopy unmap buffer error.");
+ return -1;
+ }
+ /* Unpin the device buffer */
+ if (sym_gdr_unpin_buffer(gdrc_h, mh) != 0) {
+ rte_cuda_gdrc_log(ERR, "GDRCopy unpin buffer error.");
+ return -1;
+ }
+#endif
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+#ifndef _CUDA_GDRCOPY_H_
+#define _CUDA_GDRCOPY_H_
+
+#include <dlfcn.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_errno.h>
+
+#ifdef DRIVERS_GPU_CUDA_GDRCOPY_H
+ #include <gdrapi.h>
+#else
+ struct gdr;
+ typedef struct gdr *gdr_t;
+ struct gdr_mh_s;
+ typedef struct gdr_mh_s gdr_mh_t;
+#endif
+
+int gdrcopy_pin(gdr_t *gdrc_h, __rte_unused gdr_mh_t *mh, uint64_t d_addr, size_t size, void **h_addr);
+int gdrcopy_unpin(gdr_t gdrc_h, __rte_unused gdr_mh_t mh, void *d_addr, size_t size);
+
+#endif
+
@@ -17,5 +17,9 @@ if not cc.has_header('cudaTypedefs.h')
subdir_done()
endif
+if cc.has_header('gdrapi.h')
+ dpdk_conf.set('DRIVERS_GPU_CUDA_GDRCOPY_H', 1)
+endif
+
deps += ['gpudev', 'pci', 'bus_pci']
-sources = files('cuda.c')
+sources = files('cuda.c', 'gdrcopy.c')