[dpdk-dev] [PATCH 02/11] ip_pipeline: added config checks

Maciej Gajdzica maciejx.t.gajdzica at intel.com
Fri May 29 17:43:09 CEST 2015


After loading configuration from a file, data integrity is checked.

Signed-off-by: Jasvinder Singh <jasvinder.singh at intel.com>
---
 examples/ip_pipeline/Makefile       |    1 +
 examples/ip_pipeline/config_check.c |  617 +++++++++++++++++++++++++++++++++++
 examples/ip_pipeline/main.c         |    2 +
 3 files changed, 620 insertions(+)
 create mode 100644 examples/ip_pipeline/config_check.c

diff --git a/examples/ip_pipeline/Makefile b/examples/ip_pipeline/Makefile
index 2f224cc..c893952 100644
--- a/examples/ip_pipeline/Makefile
+++ b/examples/ip_pipeline/Makefile
@@ -44,6 +44,7 @@ APP = ip_pipeline
 # all source are stored in SRCS-y
 SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) := main.c
 SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += config_parse.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += config_check.c
 #SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += init.c
 SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += cpu_core_map.c
 #SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += cmdline.c
diff --git a/examples/ip_pipeline/config_check.c b/examples/ip_pipeline/config_check.c
new file mode 100644
index 0000000..bf400f4
--- /dev/null
+++ b/examples/ip_pipeline/config_check.c
@@ -0,0 +1,617 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+
+#include "app.h"
+
+
+static uint32_t
+app_link_get_n_hwq_in(struct app_params *app, uint32_t link_id)
+{
+	uint32_t hwq_id_max = 0, i;
+
+	for (i = 0; i < APP_LINK_MAX_HWQ_IN; i++) {
+		struct app_pktq_hwq_in_params *p = &app->hwq_in_params[APP_HWQ_IN_IDX(link_id, i)];
+
+		if (APP_PARAM_VALID(p))
+			hwq_id_max++;
+	}
+
+	return hwq_id_max;
+}
+
+static uint32_t
+app_link_get_n_hwq_out(struct app_params *app, uint32_t link_id)
+{
+	uint32_t hwq_id_max = 0, i;
+
+	for (i = 0; i < APP_LINK_MAX_HWQ_OUT; i++) {
+		struct app_pktq_hwq_out_params *p = &app->hwq_out_params[link_id
+				* APP_LINK_MAX_HWQ_OUT + i];
+
+		if (APP_PARAM_VALID(p))
+			hwq_id_max++;
+	}
+
+	return hwq_id_max;
+}
+
+static uint32_t
+app_link_get_hwq_in_readers(uint32_t link_id, uint32_t hwq_id)
+{
+	uint32_t j, n;	
+	
+	for (n = 0, j = 0; j < APP_MAX_LINKS * APP_LINK_MAX_HWQ_IN; j++) {
+	
+		uint32_t linkId = j / APP_MAX_LINKS;
+		uint32_t q_id = j % APP_MAX_LINKS;
+		
+		if ((linkId == link_id) &&
+			(q_id == hwq_id))
+			
+			n++;
+	}
+	
+	return n;
+}
+
+static uint32_t
+app_link_get_hwq_out_writers(uint32_t link_id, uint32_t hwq_id)
+{
+	uint32_t j, n;		
+	
+	for (n = 0, j = 0; j < APP_MAX_LINKS * APP_LINK_MAX_HWQ_OUT; j++) {
+	
+		uint32_t linkId = j / APP_MAX_LINKS;
+		uint32_t q_id = j % APP_MAX_LINKS;
+		
+		if ((linkId == link_id) &&
+			(q_id == hwq_id))
+			
+			n++;
+	}
+
+	return n;
+}
+
+
+static uint32_t
+app_pipeline_hwq_in_get_n(struct app_params *app, uint32_t hwq_id)
+{
+	uint32_t i, j, n;
+	
+	for (n = 0, i = 0; i < app->n_pipelines; i++) {
+	
+		if (!APP_PARAM_VALID(&app->pipeline_params[i]))
+			continue;
+
+		for (j = 0; j < app->pipeline_params[i].n_pktq_in; j++) {
+		
+			if ((app->pipeline_params[i].pktq_in[j].type == APP_PKTQ_IN_HWQ) &&
+				(app->pipeline_params[i].pktq_in[j].id == hwq_id))
+				n++;
+		}
+	}
+	return n;
+}
+
+static uint32_t
+app_pipeline_hwq_out_get_n(struct app_params *app, uint32_t hwq_id)
+{
+	uint32_t i, j, n;
+	
+	for (n = 0, i = 0; i < app->n_pipelines; i++) {
+	
+		if (!APP_PARAM_VALID(&app->pipeline_params[i]))
+			continue;
+
+		for (j = 0; j < app->pipeline_params[i].n_pktq_out; j++) {
+		
+			if ((app->pipeline_params[i].pktq_out[j].type == APP_PKTQ_OUT_HWQ) &&
+				(app->pipeline_params[i].pktq_out[j].id == hwq_id))
+				n++;
+		}
+	}
+	return n;
+}
+
+
+static uint32_t
+app_swq_in_get_readers(struct app_params *app, uint32_t swq_id)
+{
+	uint32_t i, j, n;
+	
+	for (n = 0, i = 0; i < app->n_pipelines; i++) {
+		
+		if (!APP_PARAM_VALID(&app->pipeline_params[i]))
+			continue;
+		
+		for (j = 0; j < app->pipeline_params[i].n_pktq_in; j++) {
+		
+			if ((app->pipeline_params[i].pktq_in[j].type == APP_PKTQ_IN_SWQ) &&
+				(app->pipeline_params[i].pktq_in[j].id == swq_id))
+				n++;
+		}
+	}
+	return n;
+}
+
+static uint32_t
+app_swq_out_get_writers(struct app_params *app, uint32_t swq_id)
+{
+	uint32_t i, j, n;
+	
+	for (n = 0, i = 0; i < app->n_pipelines; i++) {
+		
+		if (!APP_PARAM_VALID(&app->pipeline_params[i]))
+			continue;
+		
+		for (j = 0; j < app->pipeline_params[i].n_pktq_out; j++) {
+		
+			if ((app->pipeline_params[i].pktq_out[j].type == APP_PKTQ_OUT_SWQ) &&
+				(app->pipeline_params[i].pktq_out[j].id == swq_id))
+				n++;
+		}
+	}
+	return n;
+}
+
+
+static uint32_t
+app_cpu_socket_count(void)
+{
+	uint32_t n_lcores = rte_lcore_count();
+	uint32_t lcore_id, cpu_socket_id_max = 0;
+	
+	for (lcore_id = 0; lcore_id < n_lcores; lcore_id++) {
+		uint32_t cpu_socket_id = rte_lcore_to_socket_id(lcore_id);
+		
+		if (cpu_socket_id > cpu_socket_id_max)
+			cpu_socket_id_max = cpu_socket_id;
+	}
+	return (cpu_socket_id_max + 1);
+}
+
+static uint32_t
+app_mempool_cpu_socket_id(uint32_t mempool_socket_id) {
+
+	uint32_t i,n;
+	uint32_t n_cpu_socket = app_cpu_socket_count();
+	
+	for (n=0, i = 0; i < n_cpu_socket; i++) {
+	
+		if(mempool_socket_id == i)
+		n++;
+	}
+	return n;
+}
+
+static uint32_t
+app_swq_cpu_socket_id(uint32_t swq_socket_id) {
+
+	uint32_t i,n;
+	uint32_t n_cpu_socket = app_cpu_socket_count();
+	
+	for (n=0, i = 0; i < n_cpu_socket; i++) {
+	
+		if(swq_socket_id == i)
+			n++;
+	}
+	return n;
+}
+
+static void
+check_mempools_params(struct app_params *app){
+
+	uint32_t i;
+	
+	/* Valid CPU socket ID */
+	for (i = 0; i < app->n_mempools; i++) {
+		struct app_mempool_params *mempool= &app->mempool_params[i];
+		uint32_t p = app_mempool_cpu_socket_id (mempool->cpu_socket_id);	
+		if (p == 0)
+		    rte_panic("Mempool %u is allocated on invalid CPU socket\n",
+				i);
+	}
+}
+
+static void
+check_port_mask(struct app_params *app)
+{
+	if (app->port_mask == 0)
+		rte_panic("There are no ports to use.\n");
+}
+
+#define CHECK_LINKS_PARAMS_QUEUE(queue, max) \
+		APP_CHECK(link->queue <= max, \
+		"Link %u: %s = %d out of defined queue range %d - %d", \
+			i, # queue, link->queue, 0, max)
+
+
+static void
+check_links_params(struct app_params *app) {
+
+	uint32_t i;
+	
+	for (i = 0; i< app->n_links; i++) {
+		struct app_link_params *link= &app->link_params[i];
+		uint32_t n_hwq_in;
+
+		if (!APP_PARAM_VALID(link))
+			continue;
+
+		/* link filter queues */
+		n_hwq_in = app_link_get_n_hwq_in(app, i);
+
+		if (n_hwq_in > 0) {
+			CHECK_LINKS_PARAMS_QUEUE(arp_q, n_hwq_in - 1);
+			CHECK_LINKS_PARAMS_QUEUE(ip_local_q, n_hwq_in - 1);
+			CHECK_LINKS_PARAMS_QUEUE(tcp_local_q, n_hwq_in - 1);
+			CHECK_LINKS_PARAMS_QUEUE(udp_local_q, n_hwq_in - 1);
+			CHECK_LINKS_PARAMS_QUEUE(sctp_local_q, n_hwq_in - 1);
+			CHECK_LINKS_PARAMS_QUEUE(arp_q, n_hwq_in - 1);
+		}
+	}
+
+}
+#undef CHECK_LINKS_PARAMS_QUEUE
+
+static void
+check_links_contiguous_id(struct app_params *app) {
+
+	uint32_t i = 0;
+	int status = 0;
+	
+	/* All link id contiguous */
+	for (i = 0; i< app->n_links; i++) {
+
+		struct app_link_params *link= &app->link_params[i];
+
+		if (status == 0) {
+			if (!APP_PARAM_VALID(link))
+				status = 1;
+		} else {
+			if (APP_PARAM_VALID(link))
+				rte_panic("Link %u isn't contiguous\n", i);
+		}
+	}
+}
+
+static void
+check_link_hwq_read_write(struct app_params *app) {
+
+	uint32_t i;
+	for (i = 0; i< app->n_links; i++) {
+		uint32_t n_hwq_in, n_hwq_out, j;
+
+		if (!APP_PARAM_VALID(&app->link_params[i]))
+			continue;
+
+		n_hwq_in = app_link_get_n_hwq_in(app, i);
+		n_hwq_out = app_link_get_n_hwq_out(app, i);
+
+		for (j = 0; j < n_hwq_in; j++) {
+			uint32_t p = app_link_get_hwq_in_readers(i, j);
+
+			if (p == 0)
+				rte_panic("Link %u: HWQ in %u has no reader\n",
+					i, j);
+			if (p > 1)
+				rte_panic("Link %u: HWQ in %u has more than "
+					"one reader\n", i, j);
+		}
+		for (j = 0; j < n_hwq_out; j++) {
+			uint32_t p = app_link_get_hwq_out_writers(i, j);
+
+			if (p == 0)
+				rte_panic("Link %u: HWQ in %u has no writer\n",
+					i, j);
+			if (p > 1)
+				rte_panic("Link %u: HWQ in %u has more than\n"
+					"one writer\n", i, j);
+		}
+	}
+}
+
+static void
+check_hwq_in_params(struct app_params *app){
+	
+	uint32_t i;
+	
+	for (i = 0; i< app->n_pktq_hwq_in; i++) {
+
+		if (!APP_PARAM_VALID(&app->hwq_in_params[i]))
+			continue;
+	
+		uint32_t memId= app->hwq_in_params[i].mempool_id;
+		
+		if(!APP_PARAM_VALID(&app->mempool_params[memId]))
+			rte_panic("HWQ in %u has invalid mempool ",
+					 i);
+					 
+		/* Size - power of 2. */
+		if((app->hwq_in_params[i].size <=0) && 
+		  (rte_is_power_of_2(app->hwq_in_params[i].size) == 0))
+			rte_panic("HWQ in %u has invalid size\n ",
+					 i);
+					 
+		/* Burst Size is than queue Size */
+		if((app->hwq_in_params[i].burst > app->hwq_in_params[i].size))
+			rte_panic("HWQ in %u has burst size greater than Queue Size\n",
+					 i);
+	}
+}
+static void
+check_hwq_in_contiguous_id(struct app_params *app)
+{
+	uint32_t i;
+	int status = 0;
+	
+	for (i = 0; i < app->n_pktq_hwq_in; i++) {
+
+		if (status == 0) {
+			if (!APP_PARAM_VALID(&app->hwq_in_params[i]))
+					status = 1;
+		} else {
+			if (APP_PARAM_VALID(&app->hwq_in_params[i]))
+				rte_panic("HWQ in %u is't not contiguous \n",
+						i);
+		}
+	}
+}
+static void
+check_hwq_in_pipeline(struct app_params *app)
+{
+	uint32_t i, n;
+
+	for (i = 0; i< app->n_pktq_hwq_in; i++) {
+		if (!APP_PARAM_VALID(&app->hwq_in_params[i]))
+			continue;
+
+		n =	app_pipeline_hwq_in_get_n(app, i);
+		if(n == 0)
+			rte_panic("HWQ in %u isn't consistent with pipeline entry\n",
+				   i);
+		if(n > 1)
+			rte_panic("HWQ in %u has more than one reader\n",
+				   i);
+	}
+}
+
+static void
+check_hwq_out_params(struct app_params *app){
+
+	uint32_t i;
+			
+	for (i = 0; i< app->n_pktq_hwq_out; i++) {
+
+		if (!APP_PARAM_VALID(&app->hwq_out_params[i]))
+			continue;
+	
+		/* HWQ Size - power of 2 */ 
+		if((app->hwq_out_params[i].size <= 0) && 
+			(rte_is_power_of_2(app->hwq_out_params[i].size) == 0))
+				rte_panic("HWQ out %u has invalid size ",
+						i);
+	
+		/* Burst Size is than queue Size  */
+		if(app->hwq_out_params[i].burst > app->hwq_out_params[i].size)
+				rte_panic("HWQ out %u has burst size greater than Queue Size ",
+						i);
+	}
+}
+
+static void
+check_hwq_out_contiguous_id(struct app_params *app)
+{
+	uint32_t i;
+	int status = 0;
+	
+	for (i = 0; i < app->n_pktq_hwq_out; i++) {
+
+		if (status == 0) {
+			if (!APP_PARAM_VALID(&app->hwq_out_params[i]))
+					status = 1;
+		} else {
+			if (APP_PARAM_VALID(&app->hwq_out_params[i]))
+				rte_panic("HWQ out %u is't not contiguous \n",
+						i);
+		}
+	}
+}
+
+static void
+check_hwq_out_pipeline(struct app_params *app)
+{
+	uint32_t i, n;
+	
+	for (i = 0; i< app->n_pktq_hwq_out; i++) {
+
+		if (!APP_PARAM_VALID(&app->hwq_out_params[i]))
+			continue;
+	
+		n =	app_pipeline_hwq_out_get_n(app, i);
+		if (n == 0)
+			rte_panic("HWQ out %u isn't consistent with pipeline entry ",
+				   i);
+		if (n > 1)
+			rte_panic("HWQ out %u has more than one writer ",
+				   i);	
+	}				   
+}
+
+static void
+check_swq_params(struct app_params *app){
+
+	uint32_t i;
+	
+	for (i = 0; i< app->n_pktq_swq; i++) {
+	
+		/* Check swq size - power of 2. */ 
+		if ((app->swq_params[i].size <= 0) ||
+			(rte_is_power_of_2(app->swq_params[i].size) == 0))
+			rte_panic("SWQ %u: has invalid size\n",
+					i);
+		
+		/* Burst Size is greater than queue Size */ 		
+		if ((app->swq_params[i].burst_read > app->swq_params[i].size))
+			rte_panic("SWQ %u: has burst read size greater than Queue Size\n",
+					i);
+		if ((app->swq_params[i].burst_write > app->swq_params[i].size))
+			rte_panic("SWQ %u: has burst write size greater than Queue Size\n",
+					i);
+				
+		/* CPU socket ID is valid */ 
+		struct app_pktq_swq_params *swq= &app->swq_params[i];
+		
+		uint32_t p = app_swq_cpu_socket_id (swq->cpu_socket_id);	
+		if (p == 0)
+			rte_panic("SWQ %u is allocated on invalid CPU socket\n",
+				i);
+	}			
+}
+
+static void
+check_swq_contiguous_id(struct app_params *app){
+
+	uint32_t i;
+	int status = 0;
+	
+	for (i = 0; i < app->n_pktq_swq; i++) {
+
+		if (status == 0) {
+			if (!APP_PARAM_VALID(&app->swq_params[i]))
+					status = 1;
+		} else {
+			if (APP_PARAM_VALID(&app->swq_params[i]))
+				rte_panic("SWQ %u is't not contiguous \n",
+						i);
+		}
+	}	
+}
+
+static void
+check_swq_read_write(struct app_params *app){
+
+	uint32_t i, n;
+	
+	for (i = 0; i< app->n_pktq_swq; i++) {
+		
+		/* Readers */
+		n = app_swq_in_get_readers(app, i);
+		
+		if (n == 0)
+			rte_panic("SWQ %u has no reader\n", i);
+		if (n > 1)
+			rte_panic("SWQ %u has more than one reader\n", i);
+	
+		/* Writers */
+		n = app_swq_out_get_writers(app, i);
+
+		if (n == 0)
+			rte_panic("SWQ %u has no writer\n", i);
+		if (n > 1)
+			rte_panic("SWQ %u has more than one writer\n", i);
+	}
+}
+
+static void
+check_tm_params(struct app_params *app){
+
+	uint32_t i;
+		
+	for (i = 0; i< app->n_pktq_tm; i++) {
+		uint32_t linkId;
+		sscanf(app->tm_params[i].name, "TM%u", &linkId);
+
+		if(!APP_PARAM_VALID(&app->link_params[linkId]))
+			rte_panic("TM %u index is not matching with link\n", i);
+	}
+}
+
+static void
+check_source_params(struct app_params *app){
+
+	uint32_t i;
+		
+	for (i = 0; i< app->n_pktq_source; i++) {
+	
+		uint32_t memId =app->source_params[i].mempool_id;
+		
+		if (!APP_PARAM_VALID(&app->mempool_params[memId]))
+			rte_panic("Source %u doesn't have valid mempool\n", i);
+	}
+}
+
+
+int
+app_config_check(struct app_params *app)
+{
+	/* Mempool */
+	check_mempools_params(app);
+	
+	/* NIC/Links */
+	check_port_mask(app);
+	
+	/* Links parameters  */
+	check_links_params(app);	
+	check_links_contiguous_id(app);
+	check_link_hwq_read_write(app);
+	
+	/* HWQ In */	
+	check_hwq_in_params(app);
+	check_hwq_in_contiguous_id(app);
+	check_hwq_in_pipeline(app);
+
+	/* HWQ Out */
+	check_hwq_out_params(app);
+	check_hwq_out_contiguous_id(app);
+	check_hwq_out_pipeline(app);
+	
+	/* SWQ Checks */
+	check_swq_params(app);	
+	check_swq_contiguous_id(app);	
+	check_swq_read_write(app);
+
+	/* TM Checks */
+	check_tm_params(app);
+	
+	/* Source Checks */
+	check_source_params(app);
+	
+		
+	return 0;
+}
+
+
diff --git a/examples/ip_pipeline/main.c b/examples/ip_pipeline/main.c
index a2d7ef0..612eea9 100644
--- a/examples/ip_pipeline/main.c
+++ b/examples/ip_pipeline/main.c
@@ -47,5 +47,7 @@ main(int argc, char **argv)
 
 	app_config_parse(&app, app.config_file);
 
+	app_config_check(&app);
+
 	return 0;
 }
-- 
1.7.9.5



More information about the dev mailing list