[dpdk-dev,RFC] net/failsafe: add Rx interrupts

Message ID 1511946476-47368-1-git-send-email-motih@mellanox.com (mailing list archive)
State Superseded, archived
Headers

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation fail Compilation issues

Commit Message

Moti Haimovsky Nov. 29, 2017, 9:07 a.m. UTC
  This RFC suggests support for registering and waiting for Rx
interrupts in failsafe PMD. This allows applications to wait
for Rx events from the PMD using the DPDK rte_epoll subsystem.
The failsafe PMD presents to the application a facade of a single device
to be handled by the application while internally it manages several devices
on its behalf including packets transmission and reception.
The Proposed failsafe Rx interrupt scheme follows this approach.
The failsafe PMD will present the application with a single set of Rx
interrupt vectors representing the failsafe Rx queues, while internally it will
serve as an interrupt proxy for its subdevices. This will allow applications to
wait for Rx traffic from the failsafe PMD by registering and waiting for Rx
events from its Rx queues.
In order to support this the following is suggested:
  * Every Rx queue in the failsafe (virtual) device will be assigned a Linux
    event file descriptor (efd) and an enable_interrupts flag.
  * The failsafe PMD will fill in its rte_intr_handle structure with the Rx
    efds assigned previously and register them with the EAL.
  * The failsafe driver will create a private epoll fd (epfd) and will allocate
    enough space to handle all the Rx events from all its subdevices.
  * Acting as an application,
    for each Rx queue in each active subdevice the failsafe will:
      o Register the Rx queue with the EAL.
      o Pass the EAL the failsafe private epoll fd as the epfd to register the
        Rx queue event on.
      o Pass the EAL, as a parameter, the pointer to the failsafe Rx queue that
        handles this subdevice Rx queue.
      o Using the DPDK service callbacks, the failsafe PMD will launch an
        Rx proxy service that will Wait on the epoll fd for Rx events from the
        sub-devices.
      o For each Rx event received the proxy service will
         - Retrieve the pointer to failsafe Rx queue that handles this
           subdevice Rx queue from the user info returned by the EAL.
         - Trigger a failsafe Rx event on that queue by writing to the event fd
           unless interrupts are disabled for that queue.
  * The failsafe pmd will also implement the rx_queue_intr_enable and
    rx_queue_intr_disable routines that will enable and disable Rx interrupts
    respectively on both on the failsafe and its subdevices.

Signed-off-by: Moti Haimovsky <motih@mellanox.com>
---
 drivers/net/failsafe/Makefile           |   1 +
 drivers/net/failsafe/failsafe.c         |   4 +
 drivers/net/failsafe/failsafe_ether.c   |   1 +
 drivers/net/failsafe/failsafe_intr.c    | 524 ++++++++++++++++++++++++++++++++
 drivers/net/failsafe/failsafe_ops.c     |  15 +
 drivers/net/failsafe/failsafe_private.h |  42 +++
 6 files changed, 587 insertions(+)
 create mode 100644 drivers/net/failsafe/failsafe_intr.c
  

Patch

diff --git a/drivers/net/failsafe/Makefile b/drivers/net/failsafe/Makefile
index ea2a8fe..91a734b 100644
--- a/drivers/net/failsafe/Makefile
+++ b/drivers/net/failsafe/Makefile
@@ -46,6 +46,7 @@  SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_ops.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_rxtx.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_ether.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_intr.c
 
 # No exported include files
 
diff --git a/drivers/net/failsafe/failsafe.c b/drivers/net/failsafe/failsafe.c
index 6bc5aba..3b5e059 100644
--- a/drivers/net/failsafe/failsafe.c
+++ b/drivers/net/failsafe/failsafe.c
@@ -239,6 +239,10 @@ 
 		mac->addr_bytes[2], mac->addr_bytes[3],
 		mac->addr_bytes[4], mac->addr_bytes[5]);
 	dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+	PRIV(dev)->intr_handle = (struct rte_intr_handle){
+		.fd = -1,
+		.type = RTE_INTR_HANDLE_EXT,
+	};
 	return 0;
 free_args:
 	failsafe_args_free(dev);
diff --git a/drivers/net/failsafe/failsafe_ether.c b/drivers/net/failsafe/failsafe_ether.c
index 21392e5..80741ba 100644
--- a/drivers/net/failsafe/failsafe_ether.c
+++ b/drivers/net/failsafe/failsafe_ether.c
@@ -283,6 +283,7 @@ 
 		return;
 	switch (sdev->state) {
 	case DEV_STARTED:
+		failsafe_rx_intr_uninstall_subdevice(sdev);
 		rte_eth_dev_stop(PORT_ID(sdev));
 		sdev->state = DEV_ACTIVE;
 		/* fallthrough */
diff --git a/drivers/net/failsafe/failsafe_intr.c b/drivers/net/failsafe/failsafe_intr.c
new file mode 100644
index 0000000..3ea57c4
--- /dev/null
+++ b/drivers/net/failsafe/failsafe_intr.c
@@ -0,0 +1,524 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright 2017 Mellanox
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of the copyright holder nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ * Interrupts handling for failsafe driver.
+ */
+
+#include <sys/eventfd.h>
+#include <sys/epoll.h>
+#include <unistd.h>
+
+#include <rte_alarm.h>
+#include <rte_errno.h>
+#include <rte_ethdev.h>
+#include <rte_interrupts.h>
+#include <rte_io.h>
+#include <rte_service_component.h>
+
+#include "failsafe_private.h"
+
+#define NUM_RX_PROXIES (FAILSAFE_MAX_ETHPORTS * RTE_MAX_RXTX_INTR_VEC_ID)
+
+/**
+ * Install failsafe Rx event proxy service.
+ * The Rx event proxy is the service that listens to Rx events from the
+ * subdevices and triggers failsafe Rx events accordingly.
+ *
+ * @param priv
+ *   Pointer to failsafe private structure.
+ * @return
+ *   0 on success, negative errno value otherwise.
+ */
+static int
+fs_rx_event_proxy_service(void *data)
+{
+	struct fs_priv *priv = data;
+	struct rxq *rxq;
+	struct rte_epoll_event *events = priv->rxp.evec;
+	uint64_t u64 = 1;
+	int i, n, rc = 0;
+
+	n = rte_epoll_wait(priv->rxp.efd, events, NUM_RX_PROXIES, -1);
+	for (i = 0; i < n; i++) {
+		rxq = (struct rxq *)events[i].epdata.data;
+		if (rxq->enable_events && rxq->event_fd != -1) {
+			if (write(rxq->event_fd, &u64, sizeof(u64)) !=
+			    sizeof(u64)) {
+				ERROR("failed to proxy Rx event to socket %d",
+				       rxq->event_fd);
+				rc = -EIO;
+			} else {
+				INFO("Proxied Rx event to %d", rxq->event_fd);
+			}
+		}
+	}
+	return rc;
+}
+
+/**
+ * Install the failsafe Rx event proxy service.
+ *
+ * @param priv
+ *   Pointer to failsafe private structure.
+ * @return
+ *   0 on success, negative errno value otherwise.
+ */
+static int
+fs_rx_event_proxy_service_install(struct fs_priv *priv)
+{
+	struct rte_service_spec service;
+
+	if (priv->rxp.sstate == SS_NO_SREVICE) {
+		/* allocate epoll array   */
+		memset(&service, 0, sizeof(struct rte_service_spec));
+		snprintf(service.name, sizeof(service.name), "%s_service",
+			 priv->dev->device->name);
+		service.socket_id = priv->dev->data->numa_node;
+		service.callback = fs_rx_event_proxy_service;
+		service.callback_userdata = (void *)priv;
+		int32_t ret = rte_service_component_register(&service,
+							     &priv->rxp.sid);
+		if (ret) {
+			ERROR("service register() failed");
+			return -ENOEXEC;
+		}
+		rte_service_component_runstate_set(priv->rxp.sid, 1);
+		priv->rxp.sstate = SS_REGISTERED;
+	}
+	return 0;
+}
+
+/**
+ * Uninstall failsafe Rx event proxy service.
+ *
+ * @param priv
+ *   Pointer to failsafe private structure.
+ */
+static void
+fs_rx_event_proxy_service_uninstall(struct fs_priv *priv)
+{
+	/* Unregister the event service. */
+	if (priv->rxp.sid > 0) {
+		rte_service_runstate_set(priv->rxp.sid, 0);
+		rte_service_component_runstate_set(priv->rxp.sid, 0);
+		rte_service_component_unregister(priv->rxp.sid);
+		priv->rxp.sid = 0;
+		priv->rxp.sstate = SS_NO_SREVICE;
+	}
+}
+
+/**
+ * Install failsafe Rx event proxy subsystem.
+ * This is the way the failsafe PMD generates Rx events on behalf of its
+ * subdevices.
+ *
+ * @param priv
+ *   Pointer to failsafe private structure.
+ * @return
+ *   0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+fs_rx_event_proxy_install(struct fs_priv *priv)
+{
+	int rc = 0;
+
+	/* create the epoll to wait on for Rx events form subdevices */
+	priv->rxp.efd = epoll_create1(0);
+	if (priv->rxp.efd < 0) {
+		rte_errno = errno;
+		ERROR("failed to create epoll,"
+		      " Rx interrupts will not be supported");
+		return -rte_errno;
+
+	}
+	/* allocate memory for receiving the Rx events from the subdevices. */
+	priv->rxp.evec = calloc(NUM_RX_PROXIES, sizeof(*priv->rxp.evec));
+	if (priv->rxp.evec == NULL) {
+		ERROR("failed to allocate memory for event vectors,"
+		      " Rx interrupts will not be supported");
+		rc = -ENOMEM;
+		goto error;
+	}
+	if (fs_rx_event_proxy_service_install(priv) < 0) {
+		rc = -rte_errno;
+		goto error;
+	}
+	return 0;
+error:
+	if (priv->rxp.efd >= 0)
+		close(priv->rxp.efd);
+	if (priv->rxp.evec)
+		free(priv->rxp.evec);
+	rte_errno = -rc;
+	return rc;
+}
+
+/**
+ * Uninstall failsafe Rx event proxy.
+ *
+ * @param priv
+ *   Pointer to failsafe private structure.
+ */
+static void
+fs_rx_event_proxy_uninstall(struct fs_priv *priv)
+{
+	fs_rx_event_proxy_service_uninstall(priv);
+	if (priv->rxp.evec) {
+		free(priv->rxp.evec);
+		priv->rxp.evec = NULL;
+	}
+	if (priv->rxp.efd <= 0) {
+		WARN("epoll fd is invalid");
+	} else {
+		close(priv->rxp.efd);
+		priv->rxp.efd = -1;
+	}
+}
+
+/**
+ * Uninstall failsafe interrupt vector.
+ *
+ * @param priv
+ *   Pointer to failsafe private structure.
+ */
+static void
+fs_rx_intr_vec_uninstall(struct fs_priv *priv)
+{
+	struct rte_intr_handle *intr_handle = &priv->intr_handle;
+
+	if (intr_handle->intr_vec) {
+		free(intr_handle->intr_vec);
+		intr_handle->intr_vec = NULL;
+	}
+	intr_handle->nb_efd = 0;
+}
+/**
+ * Installs failsafe interrupt vector to be registered with EAL later on.
+ *
+ * @param priv
+ *   Pointer to failsafe private structure.
+ *
+ * @return
+ *   0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+fs_rx_intr_vec_install(struct fs_priv *priv)
+{
+	unsigned int i;
+	unsigned int rxqs_n = priv->dev->data->nb_rx_queues;
+	unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
+	unsigned int count = 0;
+	struct rte_intr_handle *intr_handle = &priv->intr_handle;
+
+	/* Allocate the interrupt vector of the failsafe Rx proxy interrupts */
+	intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
+	if (intr_handle->intr_vec == NULL) {
+		fs_rx_intr_vec_uninstall(priv);
+		rte_errno = ENOMEM;
+		ERROR("failed to allocate memory for interrupt vector,"
+		      " Rx interrupts will not be supported");
+		return -rte_errno;
+	}
+	for (i = 0; i < n; i++) {
+		struct rxq *rxq = priv->dev->data->rx_queues[i];
+
+		/* Skip queues that cannot request interrupts. */
+		if (!rxq || (rxq->event_fd < 0)) {
+			/* Use invalid intr_vec[] index to disable entry. */
+			intr_handle->intr_vec[i] =
+				RTE_INTR_VEC_RXTX_OFFSET +
+				RTE_MAX_RXTX_INTR_VEC_ID;
+			continue;
+		}
+		if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
+			rte_errno = E2BIG;
+			ERROR("too many Rx queues for interrupt vector size"
+			      " (%d), Rx interrupts cannot be enabled",
+			      RTE_MAX_RXTX_INTR_VEC_ID);
+			fs_rx_intr_vec_uninstall(priv);
+			return -rte_errno;
+		}
+		intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
+		intr_handle->efds[count] = rxq->event_fd;
+		count++;
+	}
+	if (!count)
+		fs_rx_intr_vec_uninstall(priv);
+	else
+		intr_handle->nb_efd = count;
+	return 0;
+}
+
+/**
+ * RX Interrupt control per port.
+ *
+ * @param sdev
+ *   Pointer to sub-device structure.
+ * @param op
+ *   The operation be performed for the vector.
+ *   Operation type of {RTE_INTR_EVENT_ADD, RTE_INTR_EVENT_DEL}.
+ * @return
+ *   - On success, zero.
+ *   - On failure, a negative value.
+ */
+static int
+fs_eth_sdev_rx_intr_ctl(struct sub_device *sdev, int op)
+{
+	struct rte_eth_dev *dev = ETH(sdev);
+	struct rte_eth_dev *fsdev = sdev->fs_dev;
+	struct rxq *fsrxq;
+	uint16_t qid;
+	int epfd = PRIV(sdev->fs_dev)->rxp.efd;
+	uint16_t pid = PORT_ID(sdev);
+
+	if (epfd <= 0) {
+		ERROR("proxy events are not initialized");
+		return -EBADFD;
+	}
+	if (ETH(sdev)->data->nb_rx_queues > fsdev->data->nb_rx_queues) {
+		ERROR("Subdevice has too many queues,"
+			" Interrupts will not be enabled");
+		return -E2BIG;
+	}
+	for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
+		fsrxq = fsdev->data->rx_queues[qid];
+		rte_eth_dev_rx_intr_ctl_q(pid, qid, epfd, op, (void *)fsrxq);
+	}
+	return 0;
+}
+
+/**
+ * Apply interrupt control operation on all subdevices.
+  *
+ * @param priv
+ *   Pointer to failsafe provate data structure.
+ * @param op
+ *   The operation to perform.
+ *   Operation type of {RTE_INTR_EVENT_ADD, RTE_INTR_EVENT_DEL}.
+ * @return
+ *   0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+slaves_rx_inter_ctrl(struct fs_priv *priv, int op)
+{
+	struct sub_device *sdev;
+	uint8_t i;
+	int ret;
+	int rc = 0;
+
+	FOREACH_SUBDEV_STATE(sdev, i, priv->dev,
+			     op == RTE_INTR_EVENT_DEL ?
+			     DEV_UNDEFINED : DEV_ACTIVE) {
+		ret = fs_eth_sdev_rx_intr_ctl(sdev, op);
+		if (ret < 0)
+			rc = ret;
+	}
+	return rc;
+}
+
+
+/**
+ * Install Rx interrupts subsystem for a subdevice.
+ * This is a support for dynamically adding subdevices.
+ *
+ * @param sdev
+ *   Pointer to subdevice structure.
+ *
+ * @return
+ *   0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int failsafe_rx_intr_install_subdevice(struct sub_device *sdev)
+{
+	return fs_eth_sdev_rx_intr_ctl(sdev, RTE_INTR_EVENT_ADD);
+}
+
+/**
+ * Uninstall Rx interrupts subsystem for a subdevice.
+ * This is a support for dynamically removing subdevices.
+ *
+ * @param sdev
+ *   Pointer to subdevice structure.
+ *
+ * @return
+ *   0 on success, negative errno value otherwise and rte_errno is set.
+ */
+void failsafe_rx_intr_uninstall_subdevice(struct sub_device *sdev)
+{
+	fs_eth_sdev_rx_intr_ctl(sdev, RTE_INTR_EVENT_DEL);
+}
+
+/**
+ * Uninstall failsafe Rx interrupts subsystem.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ *
+ * @return
+ *   0 on success, negative errno value otherwise and rte_errno is set.
+ */
+void
+failsafe_rx_intr_uninstall(struct rte_eth_dev *dev)
+{
+	struct fs_priv *priv = PRIV(dev);
+	struct rte_intr_handle *intr_handle = &priv->intr_handle;
+
+	slaves_rx_inter_ctrl(priv, RTE_INTR_EVENT_DEL);
+	fs_rx_event_proxy_uninstall(priv);
+	rte_intr_free_epoll_fd(intr_handle);
+	if (intr_handle->intr_vec) {
+		free(intr_handle->intr_vec);
+		intr_handle->intr_vec = NULL;
+	}
+	intr_handle->nb_efd = 0;
+}
+
+/**
+ * Install failsafe Rx interrupts subsystem.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ *
+ * @return
+ *   0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+failsafe_rx_intr_install(struct rte_eth_dev *dev)
+{
+	struct fs_priv *priv = PRIV(dev);
+	const struct rte_intr_conf *const intr_conf =
+			&priv->dev->data->dev_conf.intr_conf;
+	int rc = 0;
+
+	failsafe_rx_intr_uninstall(dev);
+	if (!intr_conf->rxq)
+		return 0;
+	if (fs_rx_intr_vec_install(priv) < 0)
+		return -rte_errno;
+	if (fs_rx_event_proxy_install(priv) < 0) {
+		fs_rx_intr_vec_uninstall(priv);
+		return -rte_errno;
+	}
+	rc = slaves_rx_inter_ctrl(priv, RTE_INTR_EVENT_ADD);
+	if (rc) {
+		fs_rx_intr_vec_uninstall(priv);
+		fs_rx_intr_vec_uninstall(priv);
+		return rc;
+	}
+	return 0;
+}
+
+
+/**
+ * DPDK callback for Rx queue interrupt disable.
+ *
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param idx
+ *   Rx queue index.
+ *
+ * @return
+ *   0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+failsafe_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
+{
+	struct rxq *rxq = dev->data->rx_queues[idx];
+	struct sub_device *sdev;
+	uint64_t u64;
+	uint8_t i;
+	int rc = 0;
+	int ret;
+
+	if (!rxq || (rxq->event_fd <= 0)) {
+		rte_errno = EINVAL;
+		return -rte_errno;
+	}
+	rxq->enable_events = 0;
+	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+		ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx);
+		if ((ret != -ENOTSUP) && (ret != -ENODEV))
+			rc = rc;
+	}
+	/* Clear pending events */
+	while (read(rxq->event_fd, &u64, sizeof(uint64_t)) >  0)
+		;
+	if (rc)
+		rte_errno = -rc;
+	return rc;
+}
+
+/**
+ * DPDK callback for Rx queue interrupt enable.
+ *
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param idx
+ *   Rx queue index.
+ *
+ * @return
+ *   0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+failsafe_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
+{
+	struct rxq *rxq = dev->data->rx_queues[idx];
+	struct sub_device *sdev;
+	uint8_t i;
+	int rc = 0;
+	int ret;
+
+	if (!rxq || (rxq->event_fd <= 0)) {
+		rte_errno = EINVAL;
+		return -rte_errno;
+	}
+	/* Let the proxy service run. */
+	if (PRIV(dev)->rxp.sstate != SS_RUNNING) {
+		rc = rte_service_runstate_set(PRIV(dev)->rxp.sid, 1);
+		if (rc) {
+			ERROR("Failed to run failsafe interrupt services.");
+			goto exit;
+		}
+		PRIV(dev)->rxp.sstate = SS_RUNNING;
+	}
+	rxq->enable_events = 1;
+	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+		ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx);
+		if ((ret != -ENOTSUP) && (ret != -ENODEV))
+			rc = ret;
+	}
+exit:
+	if (rc)
+		rte_errno = -rc;
+	return rc;
+}
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
index e16a590..6626d93 100644
--- a/drivers/net/failsafe/failsafe_ops.c
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -32,6 +32,7 @@ 
  */
 
 #include <stdint.h>
+#include <unistd.h>
 
 #include <rte_debug.h>
 #include <rte_atomic.h>
@@ -167,6 +168,9 @@ 
 			return ret;
 		sdev->state = DEV_STARTED;
 	}
+	ret = failsafe_rx_intr_install(dev);
+	if (ret)
+		WARN("Rx interrupts are not supported");
 	if (PRIV(dev)->state < DEV_STARTED)
 		PRIV(dev)->state = DEV_STARTED;
 	fs_switch_dev(dev, NULL);
@@ -179,8 +183,10 @@ 
 	struct sub_device *sdev;
 	uint8_t i;
 
+	failsafe_rx_intr_uninstall(dev);
 	PRIV(dev)->state = DEV_STARTED - 1;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
+		failsafe_rx_intr_uninstall_subdevice(sdev);
 		rte_eth_dev_stop(PORT_ID(sdev));
 		sdev->state = DEV_STARTED - 1;
 	}
@@ -254,6 +260,8 @@ 
 	if (queue == NULL)
 		return;
 	rxq = queue;
+	if (rxq->event_fd > 0)
+		close(rxq->event_fd);
 	dev = rxq->priv->dev;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
 		SUBOPS(sdev, rx_queue_release)
@@ -294,6 +302,11 @@ 
 	rxq->info.conf = *rx_conf;
 	rxq->info.nb_desc = nb_rx_desc;
 	rxq->priv = PRIV(dev);
+	rxq->event_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+	if (rxq->event_fd < 0) {
+		ERROR("Rx event_fd error, %s", strerror(errno));
+		return -errno;
+	}
 	dev->data->rx_queues[rx_queue_id] = rxq;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
@@ -767,4 +780,6 @@ 
 	.mac_addr_add = fs_mac_addr_add,
 	.mac_addr_set = fs_mac_addr_set,
 	.filter_ctrl = fs_filter_ctrl,
+	.rx_queue_intr_enable = failsafe_rx_intr_enable,
+	.rx_queue_intr_disable = failsafe_rx_intr_disable,
 };
diff --git a/drivers/net/failsafe/failsafe_private.h b/drivers/net/failsafe/failsafe_private.h
index d81cc3c..538fdc0 100644
--- a/drivers/net/failsafe/failsafe_private.h
+++ b/drivers/net/failsafe/failsafe_private.h
@@ -34,6 +34,7 @@ 
 #ifndef _RTE_ETH_FAILSAFE_PRIVATE_H_
 #define _RTE_ETH_FAILSAFE_PRIVATE_H_
 
+#include <sys/eventfd.h>
 #include <sys/queue.h>
 
 #include <rte_atomic.h>
@@ -57,6 +58,13 @@ 
 #define FAILSAFE_MAX_ETHPORTS 2
 #define FAILSAFE_MAX_ETHADDR 128
 
+enum rxp_service_state {
+	SS_NO_SREVICE = 0,
+	SS_REGISTERED,
+	SS_READY,
+	SS_RUNNING,
+};
+
 /* TYPES */
 
 struct rxq {
@@ -65,10 +73,23 @@  struct rxq {
 	/* id of last sub_device polled */
 	uint8_t last_polled;
 	unsigned int socket_id;
+	int event_fd;
+	unsigned int enable_events:1;
 	struct rte_eth_rxq_info info;
 	rte_atomic64_t refcnt[];
 };
 
+struct rx_proxy {
+	/* epoll file descriptor */
+	int efd;
+	/* event vector to be used by epoll */
+	struct rte_epoll_event *evec;
+	/* rte service id */
+	uint32_t sid;
+	enum rxp_service_state sstate;
+
+};
+
 struct txq {
 	struct fs_priv *priv;
 	uint16_t qid;
@@ -139,6 +160,7 @@  struct fs_priv {
 	uint32_t mac_addr_pool[FAILSAFE_MAX_ETHADDR];
 	/* current capabilities */
 	struct rte_eth_dev_info infos;
+	struct rte_intr_handle intr_handle; /* Port interrupt handle. */
 	/*
 	 * Fail-safe state machine.
 	 * This level will be tracking state of the EAL and eth
@@ -151,8 +173,28 @@  struct fs_priv {
 	unsigned int pending_alarm:1; /* An alarm is pending */
 	/* flow isolation state */
 	int flow_isolated:1;
+	/*
+	 * Rx interrupts/events proxy.
+	 * The PMD issues Rx events to the EAL on behalf of its subdevices,
+	 *  it does that by registering event queues to the EAL. Each such
+	 *  queue represents a failsafe Rx queue. A PMD service thread listens
+	 *  to all the Rx events of of all the failsafe subdevices.
+	 *  When an Rx event is issued by a subdevice Rx queue it will be
+	 *  caught by the service and delivered by it to the appropriate
+	 *  failsafe event queue.
+	 */
+	struct rx_proxy rxp;
 };
 
+/* FAILSAFE_INTR */
+
+int failsafe_rx_intr_install(struct rte_eth_dev *dev);
+void failsafe_rx_intr_uninstall(struct rte_eth_dev *dev);
+int failsafe_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx);
+int failsafe_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx);
+int failsafe_rx_intr_install_subdevice(struct sub_device *sdev);
+void failsafe_rx_intr_uninstall_subdevice(struct sub_device *sdev);
+
 /* MISC */
 
 int failsafe_hotplug_alarm_install(struct rte_eth_dev *dev);