[PATCH v7 09/17] graph: add structure for stream moving between cores
Yan, Zhirun
zhirun.yan at intel.com
Tue Jun 6 06:30:49 CEST 2023
> -----Original Message-----
> From: Jerin Jacob <jerinjacobk at gmail.com>
> Sent: Monday, June 5, 2023 8:47 PM
> To: Yan, Zhirun <zhirun.yan at intel.com>
> Cc: dev at dpdk.org; jerinj at marvell.com; kirankumark at marvell.com;
> ndabilpuram at marvell.com; stephen at networkplumber.org;
> pbhagavatula at marvell.com; Liang, Cunming <cunming.liang at intel.com>; Wang,
> Haiyue <haiyue.wang at intel.com>; mattias.ronnblom
> <mattias.ronnblom at ericsson.com>
> Subject: Re: [PATCH v7 09/17] graph: add structure for stream moving between
> cores
>
> On Mon, Jun 5, 2023 at 4:56 PM Zhirun Yan <zhirun.yan at intel.com> wrote:
> >
> > Add graph_mcore_dispatch_wq_node to hold graph scheduling workqueue
> > node.
> >
> > Signed-off-by: Haiyue Wang <haiyue.wang at intel.com>
> > Signed-off-by: Cunming Liang <cunming.liang at intel.com>
> > Signed-off-by: Zhirun Yan <zhirun.yan at intel.com>
> > ---
> > lib/graph/graph.c | 2 ++
> > lib/graph/graph_populate.c | 1 +
> > lib/graph/graph_private.h | 12 ++++++++++++
> > lib/graph/rte_graph_worker_common.h | 23 +++++++++++++++++++++++
> > 4 files changed, 38 insertions(+)
> >
> > diff --git a/lib/graph/graph.c b/lib/graph/graph.c index
> > 8ce87ae6da..9f107db425 100644
> > --- a/lib/graph/graph.c
> > +++ b/lib/graph/graph.c
> > @@ -289,6 +289,7 @@
> > rte_graph_model_mcore_dispatch_core_bind(rte_graph_t id, int lcore)
> >
> > RTE_ASSERT(graph->graph->model ==
> RTE_GRAPH_MODEL_MCORE_DISPATCH);
> > graph->lcore_id = lcore;
> > + graph->graph->lcore_id = graph->lcore_id;
> > graph->socket = rte_lcore_to_socket_id(lcore);
> >
> > /* check the availability of source node */ @@ -312,6 +313,7
> > @@ rte_graph_model_mcore_dispatch_core_unbind(rte_graph_t id)
> > break;
> >
> > graph->lcore_id = RTE_MAX_LCORE;
> > + graph->graph->lcore_id = RTE_MAX_LCORE;
> >
> > fail:
> > return;
> > diff --git a/lib/graph/graph_populate.c b/lib/graph/graph_populate.c
> > index 2c0844ce92..ed596a7711 100644
> > --- a/lib/graph/graph_populate.c
> > +++ b/lib/graph/graph_populate.c
> > @@ -89,6 +89,7 @@ graph_nodes_populate(struct graph *_graph)
> > }
> > node->id = graph_node->node->id;
> > node->parent_id = pid;
> > + node->dispatch.lcore_id = graph_node->node->lcore_id;
> > nb_edges = graph_node->node->nb_edges;
> > node->nb_edges = nb_edges;
> > off += sizeof(struct rte_node); diff --git
> > a/lib/graph/graph_private.h b/lib/graph/graph_private.h index
> > 354dc8ac0a..d84174b667 100644
> > --- a/lib/graph/graph_private.h
> > +++ b/lib/graph/graph_private.h
> > @@ -64,6 +64,18 @@ struct node {
> > char next_nodes[][RTE_NODE_NAMESIZE]; /**< Names of next
> > nodes. */ };
> >
> > +/**
> > + * @internal
> > + *
> > + * Structure that holds the graph scheduling workqueue node stream.
> > + * Used for mcore dispatch model.
> > + */
> > +struct graph_mcore_dispatch_wq_node {
> > + rte_graph_off_t node_off;
> > + uint16_t nb_objs;
> > + void *objs[RTE_GRAPH_BURST_SIZE]; } __rte_cache_aligned;
> > +
> > /**
> > * @internal
> > *
> > diff --git a/lib/graph/rte_graph_worker_common.h
> > b/lib/graph/rte_graph_worker_common.h
> > index 72d132bae4..00bcf47ee8 100644
> > --- a/lib/graph/rte_graph_worker_common.h
> > +++ b/lib/graph/rte_graph_worker_common.h
> > @@ -39,6 +39,13 @@ enum rte_graph_worker_model {
> > /**< Dispatch model to support cross-core dispatching within
> > core affinity. */ };
> >
> > +/**
> > + * @internal
> > + *
> > + * Singly-linked list head for graph schedule run-queue.
> > + */
> > +SLIST_HEAD(rte_graph_rq_head, rte_graph);
> > +
> > /**
> > * @internal
> > *
> > @@ -50,6 +57,15 @@ struct rte_graph {
> > uint32_t cir_mask; /**< Circular buffer wrap around mask. */
> > rte_node_t nb_nodes; /**< Number of nodes in the graph. */
> > rte_graph_off_t *cir_start; /**< Pointer to circular buffer.
> > */
>
> Please add comment here, End of Fast path variables.
Got it.
>
>
> > + /* Graph schedule */
> > + struct rte_graph_rq_head *rq __rte_cache_aligned; /* The run-queue */
> > + struct rte_graph_rq_head rq_head; /* The head for run-queue
> > + list */
> > +
> > + SLIST_ENTRY(rte_graph) rq_next; /* The next for run-queue list */
> > + unsigned int lcore_id; /**< The graph running Lcore. */
> > + struct rte_ring *wq; /**< The work-queue for pending streams. */
> > + struct rte_mempool *mp; /**< The mempool for scheduling streams. */
> > + /* Graph schedule area */
>
> Please move above sections to _dispatch_ union.
Yes, will change it to union.
>
> > rte_graph_off_t nodes_start; /**< Offset at which node memory starts.
> */
> > rte_graph_t id; /**< Graph identifier. */
> > int socket; /**< Socket ID where memory is allocated. */
> > @@ -84,6 +100,13 @@ struct rte_node {
> > /** Original process function when pcap is enabled. */
> > rte_node_process_t original_process;
> >
> > + RTE_STD_C11
> > + union {
> > + /* Fast schedule area for mcore dispatch model */
> > + struct {
> > + unsigned int lcore_id; /**< Node running lcore. */
> > + } dispatch;
> > + };
> > /* Fast path area */
> > #define RTE_NODE_CTX_SZ 16
> > uint8_t ctx[RTE_NODE_CTX_SZ] __rte_cache_aligned; /**< Node
> > Context. */
> > --
> > 2.37.2
> >
More information about the dev
mailing list