[dpdk-dev] [PATCH v4 4/5] coding style issue fix

Huawei Xie huawei.xie at intel.com
Fri Sep 12 12:55:03 CEST 2014


This vhost lib is based on old vhost example, and there are still plenty of
coding style issues left. Will fix those issues once this patch is applied. 

Signed-off-by: Huawei Xie <huawei.xie at intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev at intel.com>
Acked-by: Tommy Long <thomas.long at intel.com>
---
 lib/librte_vhost/rte_virtio_net.h |  52 ++++----
 lib/librte_vhost/vhost-net-cdev.c | 256 +++++++++++++++++++-------------------
 lib/librte_vhost/vhost-net-cdev.h |  40 +++---
 lib/librte_vhost/vhost_rxtx.c     |  15 ++-
 lib/librte_vhost/virtio-net.c     |  88 +++++++------
 5 files changed, 220 insertions(+), 231 deletions(-)

diff --git a/lib/librte_vhost/rte_virtio_net.h b/lib/librte_vhost/rte_virtio_net.h
index 08dc6f4..82eb993 100644
--- a/lib/librte_vhost/rte_virtio_net.h
+++ b/lib/librte_vhost/rte_virtio_net.h
@@ -43,44 +43,38 @@
 #include <rte_mempool.h>
 #include <rte_mbuf.h>
 
-/* Used to indicate that the device is running on a data core */
-#define VIRTIO_DEV_RUNNING 1
-
-/* Backend value set by guest. */
-#define VIRTIO_DEV_STOPPED -1
-
+#define VIRTIO_DEV_RUNNING 1  /**< Used to indicate that the device is running on a data core. */
+#define VIRTIO_DEV_STOPPED -1 /**< Backend value set by guest. */
 
 /* Enum for virtqueue management. */
 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
 
-
-/*
- * Structure contains variables relevant to TX/RX virtqueues.
+/**
+ * Structure contains variables relevant to RX/TX virtqueues.
  */
-struct vhost_virtqueue
-{
-	struct vring_desc	*desc;				/* Virtqueue descriptor ring. */
-	struct vring_avail	*avail;				/* Virtqueue available ring. */
-	struct vring_used	*used;				/* Virtqueue used ring. */
-	uint32_t			size;				/* Size of descriptor ring. */
-	uint32_t			backend;			/* Backend value to determine if device should started/stopped. */
-	uint16_t			vhost_hlen;			/* Vhost header length (varies depending on RX merge buffers. */
-	volatile uint16_t	last_used_idx;		/* Last index used on the available ring */
-	volatile uint16_t	last_used_idx_res;	/* Used for multiple devices reserving buffers. */
-	eventfd_t			callfd;				/* Currently unused as polling mode is enabled. */
-	eventfd_t			kickfd;				/* Used to notify the guest (trigger interrupt). */
+struct vhost_virtqueue {
+	struct vring_desc    *desc;             /**< descriptor ring. */
+	struct vring_avail   *avail;            /**< available ring. */
+	struct vring_used    *used;             /**< used ring. */
+	uint32_t             size;              /**< size of descriptor ring. */
+	uint32_t             backend;           /**< backend value to determine if device should be started/stopped. */
+	uint16_t             vhost_hlen;        /**< vhost header length (varies depending on RX merge buffers. */
+	volatile uint16_t    last_used_idx;     /**< last index used on the available ring. */
+	volatile uint16_t    last_used_idx_res; /**< used for multiple devices reserving buffers. */
+	eventfd_t            callfd;            /**< currently unused as polling mode is enabled. */
+	eventfd_t            kickfd;            /**< used to notify the guest (trigger interrupt). */
 } __rte_cache_aligned;
 
-
-/*
- * Information relating to memory regions including offsets to addresses in QEMUs memory file.
+/**
+ * Information relating to memory regions including offsets to
+ * addresses in QEMU memory file.
  */
 struct virtio_memory_regions {
-	uint64_t	guest_phys_address;		/* Base guest physical address of region. */
-	uint64_t	guest_phys_address_end;	/* End guest physical address of region. */
-	uint64_t	memory_size;			/* Size of region. */
-	uint64_t	userspace_address;		/* Base userspace address of region. */
-	uint64_t	address_offset;			/* Offset of region for address translation. */
+	uint64_t    guest_phys_address;     /**< base guest physical address of region. */
+	uint64_t    guest_phys_address_end; /**< end guest physical address of region. */
+	uint64_t    memory_size;            /**< size of region. */
+	uint64_t    userspace_address;      /**< base userspace address of region. */
+	uint64_t    address_offset;         /**< offset of region for address translation. */
 };
 
 
diff --git a/lib/librte_vhost/vhost-net-cdev.c b/lib/librte_vhost/vhost-net-cdev.c
index e73bf23..c3b580a 100644
--- a/lib/librte_vhost/vhost-net-cdev.c
+++ b/lib/librte_vhost/vhost-net-cdev.c
@@ -46,16 +46,16 @@
 
 #include "vhost-net-cdev.h"
 
-#define FUSE_OPT_DUMMY 		"\0\0"
-#define FUSE_OPT_FORE 		"-f\0\0"
-#define FUSE_OPT_NOMULTI 	"-s\0\0"
+#define FUSE_OPT_DUMMY    "\0\0"
+#define FUSE_OPT_FORE     "-f\0\0"
+#define FUSE_OPT_NOMULTI  "-s\0\0"
 
 static const uint32_t	default_major = 231;
 static const uint32_t	default_minor = 1;
 static const char	cuse_device_name[]	= "/dev/cuse";
 static const char	default_cdev[] = "vhost-net";
 
-static struct fuse_session			*session;
+static struct fuse_session		*session;
 static struct vhost_net_device_ops	const *ops;
 
 /*
@@ -113,61 +113,61 @@ vhost_net_release(fuse_req_t req, struct fuse_file_info *fi)
  * Boilerplate code for CUSE IOCTL
  * Implicit arguments: ctx, req, result.
  */
-#define VHOST_IOCTL(func) do {								\
-	result = (func)(ctx);									\
-	fuse_reply_ioctl(req, result, NULL, 0);					\
-} while(0)													
+#define VHOST_IOCTL(func) do { \
+	result = (func)(ctx); \
+	fuse_reply_ioctl(req, result, NULL, 0); \
+} while (0)
 
 /*
  * Boilerplate IOCTL RETRY
  * Implicit arguments: req.
  */
-#define VHOST_IOCTL_RETRY(size_r, size_w) do {									\
-	struct iovec iov_r = { arg, (size_r) };										\
-	struct iovec iov_w = { arg, (size_w) };										\
-	fuse_reply_ioctl_retry(req, &iov_r, (size_r)?1:0, &iov_w, (size_w)?1:0);	\
-} while(0)																		\
+#define VHOST_IOCTL_RETRY(size_r, size_w) do { \
+	struct iovec iov_r = { arg, (size_r) }; \
+	struct iovec iov_w = { arg, (size_w) }; \
+	fuse_reply_ioctl_retry(req, &iov_r, (size_r) ? 1 : 0, &iov_w, (size_w) ? 1 : 0); \
+} while (0)																		\
 
 /*
  * Boilerplate code for CUSE Read IOCTL
  * Implicit arguments: ctx, req, result, in_bufsz, in_buf.
  */
-#define VHOST_IOCTL_R(type, var, func) do {				\
-	if (!in_bufsz) {									\
-		VHOST_IOCTL_RETRY(sizeof(type), 0);				\
-	} else {											\
-		(var) = *(const type * ) in_buf;				\
-		result = func(ctx, &(var));						\
-		fuse_reply_ioctl(req, result, NULL, 0);			\
-	}													\
-} while(0)												\
+#define VHOST_IOCTL_R(type, var, func) do {             \
+	if (!in_bufsz) {                                \
+		VHOST_IOCTL_RETRY(sizeof(type), 0);     \
+	} else {                                        \
+		(var) = *(const type*)in_buf;          \
+		result = func(ctx, &(var));             \
+		fuse_reply_ioctl(req, result, NULL, 0); \
+	}                                               \
+} while (0)
 
 /*
- *	Boilerplate code for CUSE Write IOCTL
+ * Boilerplate code for CUSE Write IOCTL
  * Implicit arguments: ctx, req, result, out_bufsz.
  */
-#define	VHOST_IOCTL_W(type, var, func) do {						\
-	if (!out_bufsz) {											\
-		VHOST_IOCTL_RETRY(0, sizeof(type));						\
-	} else {													\
-		result = (func)(ctx, &(var));							\
-		fuse_reply_ioctl(req, result, &(var), sizeof(type));	\
-	}															\
-} while(0)														\
+#define	VHOST_IOCTL_W(type, var, func) do {              \
+	if (!out_bufsz) {                                \
+		VHOST_IOCTL_RETRY(0, sizeof(type));      \
+	} else {                                         \
+		result = (func)(ctx, &(var));            \
+		fuse_reply_ioctl(req, result, &(var), sizeof(type)); \
+	} \
+} while (0)
 
 /*
  * Boilerplate code for CUSE Read/Write IOCTL
  * Implicit arguments: ctx, req, result, in_bufsz, in_buf.
  */
-#define VHOST_IOCTL_RW(type1, var1, type2, var2, func) do {			\
-	if (!in_bufsz) {												\
-		VHOST_IOCTL_RETRY(sizeof(type1), sizeof(type2));			\
-	} else {														\
-		(var1) = *(const type1* ) (in_buf);							\
-		result = (func)(ctx, (var1), &(var2));						\
-		fuse_reply_ioctl(req, result, &(var2), sizeof(type2));		\
-	}																\
-} while(0)															\
+#define VHOST_IOCTL_RW(type1, var1, type2, var2, func) do {      \
+	if (!in_bufsz) {                                         \
+		VHOST_IOCTL_RETRY(sizeof(type1), sizeof(type2)); \
+	} else {                                                 \
+		(var1) = *(const type1*) (in_buf);              \
+		result = (func)(ctx, (var1), &(var2));           \
+		fuse_reply_ioctl(req, result, &(var2), sizeof(type2));  \
+	} \
+} while (0)
 
 /*
  * The IOCTLs are handled using CUSE/FUSE in userspace. Depending on
@@ -187,106 +187,104 @@ vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
 	uint32_t index;
 	int result = 0;
 
-	switch(cmd)
-	{
-		case VHOST_NET_SET_BACKEND:
-			LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_NET_SET_BACKEND\n", ctx.fh);
-			VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_backend);
+	switch (cmd) {
+	case VHOST_NET_SET_BACKEND:
+		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_NET_SET_BACKEND\n", ctx.fh);
+		VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_backend);
+		break;
+
+	case VHOST_GET_FEATURES:
+		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_GET_FEATURES\n", ctx.fh);
+		VHOST_IOCTL_W(uint64_t, features, ops->get_features);
+		break;
+
+	case VHOST_SET_FEATURES:
+		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_FEATURES\n", ctx.fh);
+		VHOST_IOCTL_R(uint64_t, features, ops->set_features);
+		break;
+
+	case VHOST_RESET_OWNER:
+		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_RESET_OWNER\n", ctx.fh);
+		VHOST_IOCTL(ops->reset_owner);
+		break;
+
+	case VHOST_SET_OWNER:
+		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_OWNER\n", ctx.fh);
+		VHOST_IOCTL(ops->set_owner);
+		break;
+
+	case VHOST_SET_MEM_TABLE:
+		/*TODO fix race condition.*/
+		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_MEM_TABLE\n", ctx.fh);
+		static struct vhost_memory mem_temp;
+
+		switch (in_bufsz) {
+		case 0:
+			VHOST_IOCTL_RETRY(sizeof(struct vhost_memory), 0);
 			break;
 
-		case VHOST_GET_FEATURES:
-			LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_GET_FEATURES\n", ctx.fh);
-			VHOST_IOCTL_W(uint64_t, features, ops->get_features);
-			break;
-
-		case VHOST_SET_FEATURES:
-			LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_FEATURES\n", ctx.fh);
-			VHOST_IOCTL_R(uint64_t, features, ops->set_features);
-			break;
-
-		case VHOST_RESET_OWNER:
-			LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_RESET_OWNER\n", ctx.fh);
-			VHOST_IOCTL(ops->reset_owner);
-			break;
-
-		case VHOST_SET_OWNER:
-			LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_OWNER\n", ctx.fh);
-			VHOST_IOCTL(ops->set_owner);
-			break;
-
-		case VHOST_SET_MEM_TABLE:
-			/*TODO fix race condition.*/
-			LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_MEM_TABLE\n", ctx.fh);
-			static struct vhost_memory mem_temp;
-
-			switch(in_bufsz){
-				case 0:
-					VHOST_IOCTL_RETRY(sizeof(struct vhost_memory), 0);
-					break;
-
-				case sizeof(struct vhost_memory):
-					mem_temp = *(const struct vhost_memory *) in_buf;
-
-					if (mem_temp.nregions > 0) {
-						VHOST_IOCTL_RETRY(sizeof(struct vhost_memory) + (sizeof(struct vhost_memory_region) * mem_temp.nregions), 0);
-					} else {
-						result = -1;
-						fuse_reply_ioctl(req, result, NULL, 0);
-					}
-					break;
-
-				default:
-					result = ops->set_mem_table(ctx, in_buf, mem_temp.nregions);
-					if (result)
-						fuse_reply_err(req, EINVAL);
-					else
-						fuse_reply_ioctl(req, result, NULL, 0);
+		case sizeof(struct vhost_memory):
+			mem_temp = *(const struct vhost_memory *) in_buf;
 
+			if (mem_temp.nregions > 0) {
+				VHOST_IOCTL_RETRY(sizeof(struct vhost_memory) + (sizeof(struct vhost_memory_region) * mem_temp.nregions), 0);
+			} else {
+				result = -1;
+				fuse_reply_ioctl(req, result, NULL, 0);
 			}
-
-			break;
-
-		case VHOST_SET_VRING_NUM:
-			LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_NUM\n", ctx.fh);
-			VHOST_IOCTL_R(struct vhost_vring_state, state, ops->set_vring_num);
-			break;
-
-		case VHOST_SET_VRING_BASE:
-			LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_BASE\n", ctx.fh);
-			VHOST_IOCTL_R(struct vhost_vring_state, state, ops->set_vring_base);
-			break;
-
-		case VHOST_GET_VRING_BASE:
-			LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_GET_VRING_BASE\n", ctx.fh);
-			VHOST_IOCTL_RW(uint32_t, index, struct vhost_vring_state, state, ops->get_vring_base);
-			break;
-
-		case VHOST_SET_VRING_ADDR:
-			LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_ADDR\n", ctx.fh);
-			VHOST_IOCTL_R(struct vhost_vring_addr, addr, ops->set_vring_addr);
-			break;
-
-		case VHOST_SET_VRING_KICK:
-			LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_KICK\n", ctx.fh);
-			VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_vring_kick);
-			break;
-
-		case VHOST_SET_VRING_CALL:
-			LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_CALL\n", ctx.fh);
-			VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_vring_call);
 			break;
 
 		default:
-			RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") IOCTL: DOESN NOT EXIST\n", ctx.fh);
-			result = -1;
-			fuse_reply_ioctl(req, result, NULL, 0);
+			result = ops->set_mem_table(ctx, in_buf, mem_temp.nregions);
+			if (result)
+				fuse_reply_err(req, EINVAL);
+			else
+				fuse_reply_ioctl(req, result, NULL, 0);
+
+		}
+
+		break;
+
+	case VHOST_SET_VRING_NUM:
+		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_NUM\n", ctx.fh);
+		VHOST_IOCTL_R(struct vhost_vring_state, state, ops->set_vring_num);
+		break;
+
+	case VHOST_SET_VRING_BASE:
+		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_BASE\n", ctx.fh);
+		VHOST_IOCTL_R(struct vhost_vring_state, state, ops->set_vring_base);
+		break;
+
+	case VHOST_GET_VRING_BASE:
+		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_GET_VRING_BASE\n", ctx.fh);
+		VHOST_IOCTL_RW(uint32_t, index, struct vhost_vring_state, state, ops->get_vring_base);
+		break;
+
+	case VHOST_SET_VRING_ADDR:
+		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_ADDR\n", ctx.fh);
+		VHOST_IOCTL_R(struct vhost_vring_addr, addr, ops->set_vring_addr);
+		break;
+
+	case VHOST_SET_VRING_KICK:
+		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_KICK\n", ctx.fh);
+		VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_vring_kick);
+		break;
+
+	case VHOST_SET_VRING_CALL:
+		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_CALL\n", ctx.fh);
+		VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_vring_call);
+		break;
+
+	default:
+		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") IOCTL: DOESN NOT EXIST\n", ctx.fh);
+		result = -1;
+		fuse_reply_ioctl(req, result, NULL, 0);
 	}
 
-	if (result < 0) {
+	if (result < 0)
 		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: FAIL\n", ctx.fh);
-	} else {
+	else
 		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: SUCCESS\n", ctx.fh);
-	}
 }
 
 /*
diff --git a/lib/librte_vhost/vhost-net-cdev.h b/lib/librte_vhost/vhost-net-cdev.h
index d9a5a9a..d1b2386 100644
--- a/lib/librte_vhost/vhost-net-cdev.h
+++ b/lib/librte_vhost/vhost-net-cdev.h
@@ -72,41 +72,39 @@
 #define VHOST_PRINT_PACKET(device, addr, size, header) do {} while (0)
 #endif
 
-
-/*
+/**
  * Structure used to identify device context.
  */
-struct vhost_device_ctx
-{
-	pid_t		pid;	/* PID of process calling the IOCTL. */
-	uint64_t 	fh;		/* Populated with fi->fh to track the device index. */
+struct vhost_device_ctx {
+	pid_t    pid; /**< PID of process calling the IOCTL. */
+	uint64_t fh;  /**< Populated with fi->fh to track the device index. */
 };
 
-/*
+/**
  * Structure contains function pointers to be defined in virtio-net.c. These
  * functions are called in CUSE context and are used to configure devices.
  */
 struct vhost_net_device_ops {
-	int (* new_device) 		(struct vhost_device_ctx);
-	void (* destroy_device) (struct vhost_device_ctx);
+	int (*new_device)(struct vhost_device_ctx);
+	void (*destroy_device)(struct vhost_device_ctx);
 
-	int (* get_features) 	(struct vhost_device_ctx, uint64_t *);
-	int (* set_features) 	(struct vhost_device_ctx, uint64_t *);
+	int (*get_features)(struct vhost_device_ctx, uint64_t *);
+	int (*set_features)(struct vhost_device_ctx, uint64_t *);
 
-	int (* set_mem_table) 	(struct vhost_device_ctx, const void *, uint32_t);
+	int (*set_mem_table)(struct vhost_device_ctx, const void *, uint32_t);
 
-	int (* set_vring_num) 	(struct vhost_device_ctx, struct vhost_vring_state *);
-	int (* set_vring_addr) 	(struct vhost_device_ctx, struct vhost_vring_addr *);
-	int (* set_vring_base) 	(struct vhost_device_ctx, struct vhost_vring_state *);
-	int (* get_vring_base) 	(struct vhost_device_ctx, uint32_t, struct vhost_vring_state *);
+	int (*set_vring_num)(struct vhost_device_ctx, struct vhost_vring_state *);
+	int (*set_vring_addr)(struct vhost_device_ctx, struct vhost_vring_addr *);
+	int (*set_vring_base)(struct vhost_device_ctx, struct vhost_vring_state *);
+	int (*get_vring_base)(struct vhost_device_ctx, uint32_t, struct vhost_vring_state *);
 
-	int (* set_vring_kick) 	(struct vhost_device_ctx, struct vhost_vring_file *);
-	int (* set_vring_call) 	(struct vhost_device_ctx, struct vhost_vring_file *);
+	int (*set_vring_kick)(struct vhost_device_ctx, struct vhost_vring_file *);
+	int (*set_vring_call)(struct vhost_device_ctx, struct vhost_vring_file *);
 
-	int (* set_backend) 	(struct vhost_device_ctx, struct vhost_vring_file *);
+	int (*set_backend)(struct vhost_device_ctx, struct vhost_vring_file *);
 
-	int (* set_owner) 		(struct vhost_device_ctx);
-	int (* reset_owner) 	(struct vhost_device_ctx);
+	int (*set_owner)(struct vhost_device_ctx);
+	int (*reset_owner)(struct vhost_device_ctx);
 };
 
 
diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c
index 0d96c43..65cfac5 100644
--- a/lib/librte_vhost/vhost_rxtx.c
+++ b/lib/librte_vhost/vhost_rxtx.c
@@ -56,7 +56,7 @@ rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id, struct rte_mb
 	struct vring_desc *desc;
 	struct rte_mbuf *buff;
 	/* The virtio_hdr is initialised to 0. */
-	struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0,0,0,0,0,0},0};
+	struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
 	uint64_t buff_addr = 0;
 	uint64_t buff_hdr_addr = 0;
 	uint32_t head[VHOST_MAX_PKT_BURST], packet_len = 0;
@@ -67,7 +67,7 @@ rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id, struct rte_mb
 	uint16_t free_entries;
 	uint8_t success = 0;
 
-	LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
+	LOG_DEBUG(VHOST_DATA, "(%"PRIu64") %s()\n", dev->device_fh, __func__);
 	if (unlikely(queue_id != VIRTIO_RXQ)) {
 		LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
 		return 0;
@@ -96,7 +96,9 @@ rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id, struct rte_mb
 		/* If there is contention here and failed, try again. */
 	} while (unlikely(success == 0));
 	res_cur_idx = res_base_idx;
-	LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n", dev->device_fh, res_cur_idx, res_end_idx);
+	LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n",
+			dev->device_fh,
+			res_cur_idx, res_end_idx);
 
 	/* Prefetch available ring to retrieve indexes. */
 	rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]);
@@ -120,7 +122,7 @@ rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id, struct rte_mb
 		/* Convert from gpa to vva (guest physical addr -> vhost virtual addr) */
 		buff_addr = gpa_to_vva(dev, desc->addr);
 		/* Prefetch buffer address. */
-		rte_prefetch0((void*)(uintptr_t)buff_addr);
+		rte_prefetch0((void *)(uintptr_t)buff_addr);
 
 		if (mergeable && (mrg_count != 0)) {
 			desc->len = packet_len = rte_pktmbuf_data_len(buff);
@@ -155,12 +157,13 @@ rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id, struct rte_mb
 		rte_memcpy((void *)(uintptr_t)buff_addr,
 			(const void *)buff->pkt.data,
 			rte_pktmbuf_data_len(buff));
+
 		VHOST_PRINT_PACKET(dev, (uintptr_t)buff_addr,
 			rte_pktmbuf_data_len(buff), 0);
 
 		res_cur_idx++;
 		packet_success++;
-	
+
 		/* If mergeable is disabled then a header is required per buffer. */
 		if (!mergeable) {
 			rte_memcpy((void *)(uintptr_t)buff_hdr_addr, (const void *)&virtio_hdr, vq->vhost_hlen);
@@ -255,7 +258,7 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, struct rte_me
 		/* Buffer address translation. */
 		buff_addr = gpa_to_vva(dev, desc->addr);
 		/* Prefetch buffer address. */
-		rte_prefetch0((void*)(uintptr_t)buff_addr);
+		rte_prefetch0((void *)(uintptr_t)buff_addr);
 
 		used_idx = vq->last_used_idx & (vq->size - 1);
 
diff --git a/lib/librte_vhost/virtio-net.c b/lib/librte_vhost/virtio-net.c
index 9852961..e6d331a 100644
--- a/lib/librte_vhost/virtio-net.c
+++ b/lib/librte_vhost/virtio-net.c
@@ -63,9 +63,9 @@ struct virtio_net_config_ll {
 static const char eventfd_cdev[] = "/dev/eventfd-link";
 
 /* device ops to add/remove device to data core. */
-static struct virtio_net_device_ops const * notify_ops;
+static struct virtio_net_device_ops const *notify_ops;
 /* Root address of the linked list in the configuration core. */
-static struct virtio_net_config_ll			*ll_root = NULL;
+static struct virtio_net_config_ll *ll_root;
 
 /* Features supported by this library. */
 #define VHOST_SUPPORTED_FEATURES (1ULL << VIRTIO_NET_F_MRG_RXBUF)
@@ -81,16 +81,15 @@ static const uint32_t BUFSIZE = PATH_MAX;
 #define PROCMAP_SZ 8
 
 /* Structure containing information gathered from maps file. */
-struct procmap
-{
-	uint64_t	va_start;			/* Start virtual address in file. */
-	uint64_t	len;				/* Size of file. */
-	uint64_t	pgoff;				/* Not used. */
-	uint32_t	maj;				/* Not used. */
-	uint32_t	min;				/* Not used. */
-	uint32_t	ino;				/* Not used. */
-	char		prot[PROT_SZ];		/* Not used. */
-	char		fname[PATH_MAX];	/* File name. */
+struct procmap {
+	uint64_t    va_start;         /* Start virtual address in file. */
+	uint64_t    len;              /* Size of file. */
+	uint64_t    pgoff;            /* Not used. */
+	uint32_t    maj;              /* Not used. */
+	uint32_t    min;              /* Not used. */
+	uint32_t    ino;              /* Not used. */
+	char        prot[PROT_SZ];    /* Not used. */
+	char        fname[PATH_MAX];  /* File name. */
 };
 
 /*
@@ -108,8 +107,8 @@ qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
 	for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
 		region = &dev->mem->regions[regionidx];
 		if ((qemu_va >= region->userspace_address) &&
-				(qemu_va <= region->userspace_address +
-			 	region->memory_size)) {
+			(qemu_va <= region->userspace_address +
+			region->memory_size)) {
 			vhost_va = dev->mem->mapped_address + qemu_va - dev->mem->base_address;
 			break;
 		}
@@ -121,7 +120,7 @@ qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
  * Locate the file containing QEMU's memory space and map it to our address space.
  */
 static int
-host_memory_map (struct virtio_net *dev, struct virtio_memory *mem, pid_t pid, uint64_t addr)
+host_memory_map(struct virtio_net *dev, struct virtio_memory *mem, pid_t pid, uint64_t addr)
 {
 	struct dirent *dptr = NULL;
 	struct procmap procmap;
@@ -132,18 +131,18 @@ host_memory_map (struct virtio_net *dev, struct virtio_memory *mem, pid_t pid, u
 	char mapfile[PATH_MAX];
 	char procdir[PATH_MAX];
 	char resolved_path[PATH_MAX];
-	FILE		*fmap;
-	void		*map;
-	uint8_t 	found = 0;
-	char 		line[BUFSIZE];
+	FILE *fmap;
+	void *map;
+	uint8_t	found = 0;
+	char line[BUFSIZE];
 	char dlm[] = "-   :   ";
 	char *str, *sp, *in[PROCMAP_SZ];
 	char *end = NULL;
 
 	/* Path where mem files are located. */
-	snprintf (procdir, PATH_MAX, "/proc/%u/fd/", pid);
+	snprintf(procdir, PATH_MAX, "/proc/%u/fd/", pid);
 	/* Maps file used to locate mem file. */
-	snprintf (mapfile, PATH_MAX, "/proc/%u/maps", pid);
+	snprintf(mapfile, PATH_MAX, "/proc/%u/maps", pid);
 
 	fmap = fopen(mapfile, "r");
 	if (fmap == NULL) {
@@ -157,7 +156,8 @@ host_memory_map (struct virtio_net *dev, struct virtio_memory *mem, pid_t pid, u
 		errno = 0;
 		/* Split line in to fields. */
 		for (i = 0; i < PROCMAP_SZ; i++) {
-			if (((in[i] = strtok_r(str, &dlm[i], &sp)) == NULL) || (errno != 0)) {
+			in[i] = strtok_r(str, &dlm[i], &sp);
+			if ((in[i] == NULL) || (errno != 0)) {
 				fclose(fmap);
 				return -1;
 			}
@@ -220,7 +220,7 @@ host_memory_map (struct virtio_net *dev, struct virtio_memory *mem, pid_t pid, u
 	/* Find the guest memory file among the process fds. */
 	dp = opendir(procdir);
 	if (dp == NULL) {
-		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Cannot open pid %d process directory \n", dev->device_fh, pid);
+		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Cannot open pid %d process directory\n", dev->device_fh, pid);
 		return -1;
 
 	}
@@ -229,8 +229,8 @@ host_memory_map (struct virtio_net *dev, struct virtio_memory *mem, pid_t pid, u
 
 	/* Read the fd directory contents. */
 	while (NULL != (dptr = readdir(dp))) {
-		snprintf (memfile, PATH_MAX, "/proc/%u/fd/%s", pid, dptr->d_name);
-	    realpath(memfile, resolved_path);
+		snprintf(memfile, PATH_MAX, "/proc/%u/fd/%s", pid, dptr->d_name);
+		realpath(memfile, resolved_path);
 		if (resolved_path == NULL) {
 			RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to resolve fd directory\n", dev->device_fh);
 			closedir(dp);
@@ -258,7 +258,7 @@ host_memory_map (struct virtio_net *dev, struct virtio_memory *mem, pid_t pid, u
 	}
 
 	map = mmap(0, (size_t)procmap.len, PROT_READ|PROT_WRITE , MAP_POPULATE|MAP_SHARED, fd, 0);
-	close (fd);
+	close(fd);
 
 	if (map == MAP_FAILED) {
 		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Error mapping the file %s for pid %d\n",  dev->device_fh, memfile, pid);
@@ -304,9 +304,8 @@ get_device(struct vhost_device_ctx ctx)
 	ll_dev = get_config_ll_entry(ctx);
 
 	/* If a matching entry is found in the linked list, return the device in that entry. */
-	if (ll_dev) {
+	if (ll_dev)
 		return &ll_dev->dev;
-	}
 
 	RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Device not found in linked list.\n", ctx.fh);
 	return NULL;
@@ -351,7 +350,7 @@ cleanup_device(struct virtio_net *dev)
 {
 	/* Unmap QEMU memory file if mapped. */
 	if (dev->mem) {
-		munmap((void*)(uintptr_t)dev->mem->mapped_address, (size_t)dev->mem->mapped_size);
+		munmap((void *)(uintptr_t)dev->mem->mapped_address, (size_t)dev->mem->mapped_size);
 		free(dev->mem);
 	}
 
@@ -416,7 +415,7 @@ init_device(struct virtio_net *dev)
 	vq_offset = offsetof(struct virtio_net, mem);
 
 	/* Set everything to 0. */
-	memset((void*)(uintptr_t)((uint64_t)(uintptr_t)dev + vq_offset), 0,
+	memset((void *)(uintptr_t)((uint64_t)(uintptr_t)dev + vq_offset), 0,
 		(sizeof(struct virtio_net) - (size_t)vq_offset));
 	memset(dev->virtqueue[VIRTIO_RXQ], 0, sizeof(struct vhost_virtqueue));
 	memset(dev->virtqueue[VIRTIO_TXQ], 0, sizeof(struct vhost_virtqueue));
@@ -495,6 +494,7 @@ destroy_device(struct vhost_device_ctx ctx)
 			if ((ll_dev_cur->dev.flags & VIRTIO_DEV_RUNNING))
 				notify_ops->destroy_device(&(ll_dev_cur->dev));
 			ll_dev_cur = rm_config_ll_entry(ll_dev_cur, ll_dev_last);
+			/*TODO return here? */
 		} else {
 			ll_dev_last = ll_dev_cur;
 			ll_dev_cur = ll_dev_cur->next;
@@ -603,7 +603,7 @@ set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr, uint32_
 		return -1;
 
 	if (dev->mem) {
-		munmap((void*)(uintptr_t)dev->mem->mapped_address, (size_t)dev->mem->mapped_size);
+		munmap((void *)(uintptr_t)dev->mem->mapped_address, (size_t)dev->mem->mapped_size);
 		free(dev->mem);
 	}
 
@@ -616,7 +616,7 @@ set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr, uint32_
 
 	mem->nregions = nregions;
 
-	mem_regions = (void*)(uintptr_t)((uint64_t)(uintptr_t)mem_regions_addr + size);
+	mem_regions = (void *)(uintptr_t)((uint64_t)(uintptr_t)mem_regions_addr + size);
 
 	for (regionidx = 0; regionidx < mem->nregions; regionidx++) {
 		/* Populate the region structure for each region. */
@@ -627,8 +627,8 @@ set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr, uint32_
 		mem->regions[regionidx].userspace_address = mem_regions[regionidx].userspace_addr;
 
 		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") REGION: %u - GPA: %p - QEMU VA: %p - SIZE (%"PRIu64")\n", dev->device_fh,
-				regionidx, (void*)(uintptr_t)mem->regions[regionidx].guest_phys_address,
-				(void*)(uintptr_t)mem->regions[regionidx].userspace_address,
+				regionidx, (void *)(uintptr_t)mem->regions[regionidx].guest_phys_address,
+				(void *)(uintptr_t)mem->regions[regionidx].userspace_address,
 				mem->regions[regionidx].memory_size);
 
 		/*set the base address mapping*/
@@ -682,11 +682,10 @@ set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr, uint32_
 	 * Calculate the address offset for each region. This offset is used to identify the vhost virtual address
 	 * corresponding to a QEMU guest physical address.
 	 */
-	for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
+	for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++)
 		dev->mem->regions[regionidx].address_offset = dev->mem->regions[regionidx].userspace_address - dev->mem->base_address
 			+ dev->mem->mapped_address - dev->mem->regions[regionidx].guest_phys_address;
 
-	}
 	return 0;
 }
 
@@ -728,19 +727,19 @@ set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
 	vq = dev->virtqueue[addr->index];
 
 	/* The addresses are converted from QEMU virtual to Vhost virtual. */
-	vq->desc = (struct vring_desc*)(uintptr_t)qva_to_vva(dev, addr->desc_user_addr);
+	vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev, addr->desc_user_addr);
 	if (vq->desc == 0) {
 		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find descriptor ring address.\n", dev->device_fh);
 		return -1;
 	}
 
-	vq->avail = (struct vring_avail*)(uintptr_t)qva_to_vva(dev, addr->avail_user_addr);
+	vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev, addr->avail_user_addr);
 	if (vq->avail == 0) {
 		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find available ring address.\n", dev->device_fh);
 		return -1;
 	}
 
-	vq->used = (struct vring_used*)(uintptr_t)qva_to_vva(dev, addr->used_user_addr);
+	vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev, addr->used_user_addr);
 	if (vq->used == 0) {
 		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find used ring address.\n", dev->device_fh);
 		return -1;
@@ -885,7 +884,7 @@ set_vring_kick(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
 	eventfd_call.target_pid = ctx.pid;
 
 	if (eventfd_copy(dev, &eventfd_call))
-        return -1;
+		return -1;
 
 	return 0;
 }
@@ -903,9 +902,8 @@ set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
 	struct virtio_net *dev;
 
 	dev = get_device(ctx);
-	if (dev == NULL) {
+	if (dev == NULL)
 		return -1;
-	}
 
 	/* file->index refers to the queue index. The TX queue is 1, RX queue is 0. */
 	dev->virtqueue[file->index]->backend = file->fd;
@@ -917,9 +915,8 @@ set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
 			return notify_ops->new_device(dev);
 	/* Otherwise we remove it. */
 	} else
-		if (file->fd == VIRTIO_DEV_STOPPED) {
+		if (file->fd == VIRTIO_DEV_STOPPED)
 			notify_ops->destroy_device(dev);
-		}
 	return 0;
 }
 
@@ -927,8 +924,7 @@ set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
  * Function pointers are set for the device operations to allow CUSE to call functions
  * when an IOCTL, device_add or device_release is received.
  */
-static const struct vhost_net_device_ops vhost_device_ops =
-{
+static const struct vhost_net_device_ops vhost_device_ops = {
 	.new_device = new_device,
 	.destroy_device = destroy_device,
 
-- 
1.8.1.4



More information about the dev mailing list