@@ -85,7 +85,9 @@ struct rte_vhost_vring {
struct vring_used *used;
uint64_t log_guest_addr;
+ /** Deprecated, use rte_vhost_vring_call() instead. */
int callfd;
+
int kickfd;
uint16_t size;
};
@@ -435,6 +437,19 @@ int rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem);
int rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
struct rte_vhost_vring *vring);
+/**
+ * Notify the guest that used descriptors have been added to the vring. This
+ * function acts as a memory barrier.
+ *
+ * @param vid
+ * vhost device ID
+ * @param vring_idx
+ * vring index
+ * @return
+ * 0 on success, -1 on failure
+ */
+int rte_vhost_vring_call(int vid, uint16_t vring_idx);
+
/**
* Get vhost RX queue avail count.
*
@@ -207,13 +207,8 @@ vs_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
*(volatile uint16_t *)&vr->used->idx += count;
queue->last_used_idx += count;
- /* flush used->idx update before we read avail->flags. */
- rte_mb();
+ rte_vhost_vring_call(dev->vid, queue_id);
- /* Kick the guest if necessary. */
- if (!(vr->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
- && (vr->callfd >= 0))
- eventfd_write(vr->callfd, (eventfd_t)1);
return count;
}
@@ -396,9 +391,7 @@ vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
vr->used->idx += i;
- if (!(vr->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
- && (vr->callfd >= 0))
- eventfd_write(vr->callfd, (eventfd_t)1);
+ rte_vhost_vring_call(dev->vid, queue_id);
return i;
}
@@ -110,7 +110,7 @@ descriptor_is_wr(struct vring_desc *cur_desc)
}
static void
-submit_completion(struct vhost_scsi_task *task)
+submit_completion(struct vhost_scsi_task *task, uint32_t q_idx)
{
struct rte_vhost_vring *vq;
struct vring_used *used;
@@ -131,7 +131,7 @@ submit_completion(struct vhost_scsi_task *task)
/* Send an interrupt back to the guest VM so that it knows
* a completion is ready to be processed.
*/
- eventfd_write(vq->callfd, (eventfd_t)1);
+ rte_vhost_vring_call(task->bdev->vid, q_idx);
}
static void
@@ -263,7 +263,7 @@ process_requestq(struct vhost_scsi_ctrlr *ctrlr, uint32_t q_idx)
task->resp->status = 0;
task->resp->resid = 0;
}
- submit_completion(task);
+ submit_completion(task, q_idx);
rte_free(task);
}
}
@@ -519,6 +519,33 @@ rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
return 0;
}
+int
+rte_vhost_vring_call(int vid, uint16_t vring_idx)
+{
+ struct virtio_net *dev;
+ struct vhost_virtqueue *vq;
+
+ dev = get_device(vid);
+ if (!dev)
+ return -1;
+
+ if (vring_idx >= VHOST_MAX_VRING)
+ return -1;
+
+ vq = dev->virtqueue[vring_idx];
+ if (!vq)
+ return -1;
+
+ /* flush used->idx update before we read avail->flags. */
+ rte_mb();
+
+ /* Kick the guest if necessary. */
+ if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
+ && (vq->callfd >= 0))
+ eventfd_write(vq->callfd, (eventfd_t)1);
+ return 0;
+}
+
uint16_t
rte_vhost_avail_entries(int vid, uint16_t queue_id)
{
@@ -408,13 +408,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
offsetof(struct vring_used, idx),
sizeof(vq->used->idx));
- /* flush used->idx update before we read avail->flags. */
- rte_mb();
-
- /* Kick the guest if necessary. */
- if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
- && (vq->callfd >= 0))
- eventfd_write(vq->callfd, (eventfd_t)1);
+ rte_vhost_vring_call(dev->vid, queue_id);
out:
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_rd_unlock(vq);
@@ -701,14 +695,7 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
if (likely(vq->shadow_used_idx)) {
flush_shadow_used_ring(dev, vq);
-
- /* flush used->idx update before we read avail->flags. */
- rte_mb();
-
- /* Kick the guest if necessary. */
- if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
- && (vq->callfd >= 0))
- eventfd_write(vq->callfd, (eventfd_t)1);
+ rte_vhost_vring_call(dev->vid, queue_id);
}
out:
@@ -1095,7 +1082,7 @@ update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
static __rte_always_inline void
update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq,
- uint32_t count)
+ uint16_t queue_id, uint32_t count)
{
if (unlikely(count == 0))
return;
@@ -1106,11 +1093,7 @@ update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq,
vq->used->idx += count;
vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
sizeof(vq->used->idx));
-
- /* Kick guest if required. */
- if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
- && (vq->callfd >= 0))
- eventfd_write(vq->callfd, (eventfd_t)1);
+ rte_vhost_vring_call(dev->vid, queue_id);
}
static __rte_always_inline struct zcopy_mbuf *
@@ -1213,7 +1196,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
}
}
- update_used_idx(dev, vq, nr_updated);
+ update_used_idx(dev, vq, queue_id, nr_updated);
}
/*
@@ -1349,7 +1332,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
if (likely(dev->dequeue_zero_copy == 0)) {
do_data_copy_dequeue(vq);
vq->last_used_idx += i;
- update_used_idx(dev, vq, i);
+ update_used_idx(dev, vq, queue_id, i);
}
out:
@@ -52,3 +52,10 @@ DPDK_17.08 {
rte_vhost_rx_queue_count;
} DPDK_17.05;
+
+EXPERIMENTAL {
+ global:
+
+ rte_vhost_vring_call;
+
+} DPDK_17.08