[dpdk-dev,3/6] net/e1000: implement descriptor status API (igb)
Checks
Commit Message
Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
---
drivers/net/e1000/e1000_ethdev.h | 5 +++++
drivers/net/e1000/igb_ethdev.c | 2 ++
drivers/net/e1000/igb_rxtx.c | 46 ++++++++++++++++++++++++++++++++++++++++
3 files changed, 53 insertions(+)
Comments
Hi Olivier,
> -----Original Message-----
> From: Olivier Matz [mailto:olivier.matz@6wind.com]
> Sent: Thursday, March 2, 2017 1:19 AM
> To: dev@dpdk.org; thomas.monjalon@6wind.com; Ananyev, Konstantin; Lu,
> Wenzhuo; Zhang, Helin; Wu, Jingjing; adrien.mazarguil@6wind.com;
> nelio.laranjeiro@6wind.com
> Cc: Yigit, Ferruh; Richardson, Bruce
> Subject: [PATCH 3/6] net/e1000: implement descriptor status API (igb)
>
> Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
> +
> +int
> +eth_igb_tx_descriptor_status(struct rte_eth_dev *dev, uint16_t tx_queue_id,
> + uint16_t offset)
> +{
> + volatile uint32_t *status;
> + struct igb_tx_queue *txq;
> + uint32_t desc;
> +
> + txq = dev->data->tx_queues[tx_queue_id];
> + if (unlikely(offset >= txq->nb_tx_desc))
> + return -EINVAL;
> +
> + desc = txq->tx_tail + offset;
Should we check nb_tx_desc here? The same for em.
> + status = &txq->tx_ring[desc].wb.status;
> + if (*status & rte_cpu_to_le_32(E1000_TXD_STAT_DD))
> + return RTE_ETH_TX_DESC_DONE;
> +
> + return RTE_ETH_TX_DESC_FULL;
> +}
> +
> void
> igb_dev_clear_queues(struct rte_eth_dev *dev) {
> --
> 2.8.1
Hi Wenzhuo,
On Thu, 2 Mar 2017 01:28:21 +0000, "Lu, Wenzhuo" <wenzhuo.lu@intel.com> wrote:
> Hi Olivier,
>
> > -----Original Message-----
> > From: Olivier Matz [mailto:olivier.matz@6wind.com]
> > Sent: Thursday, March 2, 2017 1:19 AM
> > To: dev@dpdk.org; thomas.monjalon@6wind.com; Ananyev, Konstantin; Lu,
> > Wenzhuo; Zhang, Helin; Wu, Jingjing; adrien.mazarguil@6wind.com;
> > nelio.laranjeiro@6wind.com
> > Cc: Yigit, Ferruh; Richardson, Bruce
> > Subject: [PATCH 3/6] net/e1000: implement descriptor status API (igb)
> >
> > Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
> > +
> > +int
> > +eth_igb_tx_descriptor_status(struct rte_eth_dev *dev, uint16_t tx_queue_id,
> > + uint16_t offset)
> > +{
> > + volatile uint32_t *status;
> > + struct igb_tx_queue *txq;
> > + uint32_t desc;
> > +
> > + txq = dev->data->tx_queues[tx_queue_id];
> > + if (unlikely(offset >= txq->nb_tx_desc))
> > + return -EINVAL;
> > +
> > + desc = txq->tx_tail + offset;
> Should we check nb_tx_desc here? The same for em.
Correct, thanks.
Olivier
@@ -311,6 +311,11 @@ uint32_t eth_igb_rx_queue_count(struct rte_eth_dev *dev,
int eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset);
+int eth_igb_rx_descriptor_status(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id, uint16_t offset);
+int eth_igb_tx_descriptor_status(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id, uint16_t offset);
+
int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
@@ -406,6 +406,8 @@ static const struct eth_dev_ops eth_igb_ops = {
.rx_queue_release = eth_igb_rx_queue_release,
.rx_queue_count = eth_igb_rx_queue_count,
.rx_descriptor_done = eth_igb_rx_descriptor_done,
+ .rx_descriptor_status = eth_igb_rx_descriptor_status,
+ .tx_descriptor_status = eth_igb_tx_descriptor_status,
.tx_queue_setup = eth_igb_tx_queue_setup,
.tx_queue_release = eth_igb_tx_queue_release,
.dev_led_on = eth_igb_led_on,
@@ -1606,6 +1606,52 @@ eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
}
+int
+eth_igb_rx_descriptor_status(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t offset)
+{
+ volatile uint32_t *status;
+ struct igb_rx_queue *rxq;
+ uint32_t desc;
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+ return RTE_ETH_RX_DESC_USED;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].wb.upper.status_error;
+ if (*status & rte_cpu_to_le_32(E1000_RXD_STAT_DD))
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+eth_igb_tx_descriptor_status(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t offset)
+{
+ volatile uint32_t *status;
+ struct igb_tx_queue *txq;
+ uint32_t desc;
+
+ txq = dev->data->tx_queues[tx_queue_id];
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ status = &txq->tx_ring[desc].wb.status;
+ if (*status & rte_cpu_to_le_32(E1000_TXD_STAT_DD))
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
void
igb_dev_clear_queues(struct rte_eth_dev *dev)
{