[dpdk-dev] [PATCH] gpudev: introduce memory API

David Marchand david.marchand at redhat.com
Thu Jun 3 09:18:33 CEST 2021


Quick pass:

On Wed, Jun 2, 2021 at 10:36 PM Thomas Monjalon <thomas at monjalon.net> wrote:
> diff --git a/lib/gpudev/gpu_driver.h b/lib/gpudev/gpu_driver.h
> new file mode 100644
> index 0000000000..5ff609e49d
> --- /dev/null
> +++ b/lib/gpudev/gpu_driver.h
> @@ -0,0 +1,44 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright 2021 NVIDIA Corporation & Affiliates
> + */
> +
> +#ifndef GPU_DRIVER_H
> +#define GPU_DRIVER_H
> +
> +#include <stdint.h>
> +
> +#include <rte_common.h>
> +
> +#include "rte_gpudev.h"
> +
> +struct rte_gpu_dev;
> +
> +typedef int (*gpu_malloc_t)(struct rte_gpu_dev *dev, size_t size, void **ptr);
> +typedef int (*gpu_free_t)(struct rte_gpu_dev *dev, void *ptr);
> +

Great to see this structure hidden in a driver-only header.


> +struct rte_gpu_dev {

We could have a name[] field here, that will be later pointed at, in
rte_gpu_info.
Who is responsible for deciding of the device name?


> +       /* Backing device. */
> +       struct rte_device *device;
> +       /* GPU info structure. */
> +       struct rte_gpu_info info;
> +       /* Counter of processes using the device. */
> +       uint16_t process_cnt;
> +       /* If device is currently used or not. */
> +       enum rte_gpu_state state;
> +       /* FUNCTION: Allocate memory on the GPU. */
> +       gpu_malloc_t gpu_malloc;
> +       /* FUNCTION: Allocate memory on the CPU visible from the GPU. */
> +       gpu_malloc_t gpu_malloc_visible;
> +       /* FUNCTION: Free allocated memory on the GPU. */
> +       gpu_free_t gpu_free;
> +       /* Device interrupt handle. */
> +       struct rte_intr_handle *intr_handle;
> +       /* Driver-specific private data. */
> +       void *dev_private;
> +} __rte_cache_aligned;
> +
> +struct rte_gpu_dev *rte_gpu_dev_allocate(const char *name);
> +struct rte_gpu_dev *rte_gpu_dev_get_by_name(const char *name);

Those symbols will have to be marked internal (__rte_internal +
version.map) for drivers to see them.


> +int rte_gpu_dev_release(struct rte_gpu_dev *gpudev);
> +
> +#endif /* GPU_DRIVER_H */


> diff --git a/lib/gpudev/rte_gpudev.h b/lib/gpudev/rte_gpudev.h
> new file mode 100644
> index 0000000000..b12f35c17e
> --- /dev/null
> +++ b/lib/gpudev/rte_gpudev.h
> @@ -0,0 +1,183 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright 2021 NVIDIA Corporation & Affiliates
> + */
> +
> +#ifndef RTE_GPUDEV_H
> +#define RTE_GPUDEV_H
> +
> +#include <stdint.h>
> +#include <stdbool.h>
> +
> +#include <rte_common.h>
> +
> +/**
> + * @file
> + * Generic library to interact with a GPU.
> + *
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + */
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +/** Maximum number of GPU engines. */
> +#define RTE_GPU_MAX_DEVS UINT16_C(32)

Bleh.
Let's stop with max values.
The iterator _next should return a special value indicating there is
no more devs to list.



> +/** Maximum length of device name. */
> +#define RTE_GPU_NAME_MAX_LEN 128
> +
> +/** Flags indicate current state of GPU device. */
> +enum rte_gpu_state {
> +       RTE_GPU_STATE_UNUSED,        /**< not initialized */
> +       RTE_GPU_STATE_INITIALIZED,   /**< initialized */
> +};
> +
> +/** Store a list of info for a given GPU. */
> +struct rte_gpu_info {
> +       /** GPU device ID. */
> +       uint16_t gpu_id;
> +       /** Unique identifier name. */
> +       char name[RTE_GPU_NAME_MAX_LEN];

const char *name;

Then the gpu generic layer simply fills this with the
rte_gpu_dev->name field I proposed above.


> +       /** Total memory available on device. */
> +       size_t total_memory;
> +       /** Total processors available on device. */
> +       int processor_count;
> +};


-- 
David Marchand



More information about the dev mailing list