[v3,8/8] ml/cnxk: reduce levels of nested variables access

Message ID 20230316212904.9318-9-syalavarthi@marvell.com (mailing list archive)
State Accepted, archived
Delegated to: Thomas Monjalon
Headers
Series Fixes to ml/cnxk driver |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/Intel-compilation success Compilation OK
ci/github-robot: build success github build: passed
ci/iol-broadcom-Performance fail Performance Testing issues
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-aarch64-compile-testing success Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS
ci/intel-Testing success Testing PASS
ci/intel-Functional fail Functional issues
ci/iol-aarch64-unit-testing success Testing PASS
ci/iol-abi-testing success Testing PASS
ci/iol-testing success Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS

Commit Message

Srikanth Yalavarthi March 16, 2023, 9:29 p.m. UTC
  Reduce the number of levels to access nested structure
variables. Use available variables or add new local
pointer variables for access to keep the code uniform.

Fixes: 298b2af4267f ("ml/cnxk: add internal structures for derived info")
Fixes: 0b9c0768ce2b ("ml/cnxk: support model query")

Signed-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>
---
 drivers/ml/cnxk/cn10k_ml_model.c | 48 ++++++++++++++++----------------
 1 file changed, 24 insertions(+), 24 deletions(-)
  

Patch

diff --git a/drivers/ml/cnxk/cn10k_ml_model.c b/drivers/ml/cnxk/cn10k_ml_model.c
index ceffde8459..2ded05c5dc 100644
--- a/drivers/ml/cnxk/cn10k_ml_model.c
+++ b/drivers/ml/cnxk/cn10k_ml_model.c
@@ -272,8 +272,8 @@  cn10k_ml_model_addr_update(struct cn10k_ml_model *model, uint8_t *buffer, uint8_
 	addr->total_input_sz_q = 0;
 	for (i = 0; i < metadata->model.num_input; i++) {
 		addr->input[i].nb_elements =
-			model->metadata.input[i].shape.w * model->metadata.input[i].shape.x *
-			model->metadata.input[i].shape.y * model->metadata.input[i].shape.z;
+			metadata->input[i].shape.w * metadata->input[i].shape.x *
+			metadata->input[i].shape.y * metadata->input[i].shape.z;
 		addr->input[i].sz_d = addr->input[i].nb_elements *
 				      rte_ml_io_type_size_get(metadata->input[i].input_type);
 		addr->input[i].sz_q = addr->input[i].nb_elements *
@@ -360,52 +360,52 @@  cn10k_ml_model_ocm_pages_count(struct cn10k_ml_dev *mldev, uint16_t model_id, ui
 void
 cn10k_ml_model_info_set(struct rte_ml_dev *dev, struct cn10k_ml_model *model)
 {
+	struct cn10k_ml_model_metadata *metadata;
 	struct rte_ml_model_info *info;
 	struct rte_ml_io_info *output;
 	struct rte_ml_io_info *input;
 	uint8_t i;
 
+	metadata = &model->metadata;
 	info = PLT_PTR_CAST(model->info);
 	input = PLT_PTR_ADD(info, sizeof(struct rte_ml_model_info));
-	output =
-		PLT_PTR_ADD(input, model->metadata.model.num_input * sizeof(struct rte_ml_io_info));
+	output = PLT_PTR_ADD(input, metadata->model.num_input * sizeof(struct rte_ml_io_info));
 
 	/* Set model info */
 	memset(info, 0, sizeof(struct rte_ml_model_info));
-	rte_memcpy(info->name, model->metadata.model.name, MRVL_ML_MODEL_NAME_LEN);
-	snprintf(info->version, RTE_ML_STR_MAX, "%u.%u.%u.%u", model->metadata.model.version[0],
-		 model->metadata.model.version[1], model->metadata.model.version[2],
-		 model->metadata.model.version[3]);
+	rte_memcpy(info->name, metadata->model.name, MRVL_ML_MODEL_NAME_LEN);
+	snprintf(info->version, RTE_ML_STR_MAX, "%u.%u.%u.%u", metadata->model.version[0],
+		 metadata->model.version[1], metadata->model.version[2],
+		 metadata->model.version[3]);
 	info->model_id = model->model_id;
 	info->device_id = dev->data->dev_id;
 	info->batch_size = model->batch_size;
-	info->nb_inputs = model->metadata.model.num_input;
+	info->nb_inputs = metadata->model.num_input;
 	info->input_info = input;
-	info->nb_outputs = model->metadata.model.num_output;
+	info->nb_outputs = metadata->model.num_output;
 	info->output_info = output;
-	info->wb_size = model->metadata.weights_bias.file_size;
+	info->wb_size = metadata->weights_bias.file_size;
 
 	/* Set input info */
 	for (i = 0; i < info->nb_inputs; i++) {
-		rte_memcpy(input[i].name, model->metadata.input[i].input_name,
-			   MRVL_ML_INPUT_NAME_LEN);
-		input[i].dtype = model->metadata.input[i].input_type;
-		input[i].qtype = model->metadata.input[i].model_input_type;
-		input[i].shape.format = model->metadata.input[i].shape.format;
-		input[i].shape.w = model->metadata.input[i].shape.w;
-		input[i].shape.x = model->metadata.input[i].shape.x;
-		input[i].shape.y = model->metadata.input[i].shape.y;
-		input[i].shape.z = model->metadata.input[i].shape.z;
+		rte_memcpy(input[i].name, metadata->input[i].input_name, MRVL_ML_INPUT_NAME_LEN);
+		input[i].dtype = metadata->input[i].input_type;
+		input[i].qtype = metadata->input[i].model_input_type;
+		input[i].shape.format = metadata->input[i].shape.format;
+		input[i].shape.w = metadata->input[i].shape.w;
+		input[i].shape.x = metadata->input[i].shape.x;
+		input[i].shape.y = metadata->input[i].shape.y;
+		input[i].shape.z = metadata->input[i].shape.z;
 	}
 
 	/* Set output info */
 	for (i = 0; i < info->nb_outputs; i++) {
-		rte_memcpy(output[i].name, model->metadata.output[i].output_name,
+		rte_memcpy(output[i].name, metadata->output[i].output_name,
 			   MRVL_ML_OUTPUT_NAME_LEN);
-		output[i].dtype = model->metadata.output[i].output_type;
-		output[i].qtype = model->metadata.output[i].model_output_type;
+		output[i].dtype = metadata->output[i].output_type;
+		output[i].qtype = metadata->output[i].model_output_type;
 		output[i].shape.format = RTE_ML_IO_FORMAT_1D;
-		output[i].shape.w = model->metadata.output[i].size;
+		output[i].shape.w = metadata->output[i].size;
 		output[i].shape.x = 1;
 		output[i].shape.y = 1;
 		output[i].shape.z = 1;