@@ -118,8 +118,8 @@ struct fwd_stream {
unsigned int rx_packets; /**< received packets */
unsigned int tx_packets; /**< received packets transmitted */
unsigned int fwd_dropped; /**< received packets not forwarded */
- unsigned int rx_bad_ip_csum ; /**< received packets has bad ip checksum */
- unsigned int rx_bad_l4_csum ; /**< received packets has bad l4 checksum */
+ unsigned int rx_bad_ip_csum; /**< received packets has bad ip checksum */
+ unsigned int rx_bad_l4_csum; /**< received packets has bad l4 checksum */
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t core_cycles; /**< used for RX and TX processing */
#endif
@@ -51,7 +51,7 @@
#define QAT_FIELD_SET(flags, val, bitpos, mask) \
{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \
- (((val) & (mask)) << (bitpos))) ; }
+ (((val) & (mask)) << (bitpos))); }
#define QAT_FIELD_GET(flags, bitpos, mask) \
(((flags) >> (bitpos)) & (mask))
@@ -587,7 +587,7 @@ dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
dpci_dev->queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
dpaa2_eventdev_process_atomic;
- for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
+ for (i = 0; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
rx_queue_cfg.user_ctx = (uint64_t)(&dpci_dev->queue[i]);
ret = dpci_set_rx_queue(&dpci_dev->dpci,
CMD_PRI_LOW,
@@ -204,7 +204,7 @@ rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
goto aligned;
/* convert mbuf to buffers for the remainder */
- for (i = 0; i < n ; i++) {
+ for (i = 0; i < n; i++) {
#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
bufs[i] = (uint64_t)rte_mempool_virt2phy(pool, obj_table[i])
+ meta_data_size;
@@ -11535,7 +11535,8 @@ static int cut_gzip_prefix(const uint8_t * zbuf, int len)
}
/* file name is present */
if (zbuf[3] & 0x8) {
- while ((zbuf[n++] != 0) && (n < len)) ;
+ while ((zbuf[n++] != 0) && (n < len))
+ ;
}
return n;
@@ -552,7 +552,7 @@ enum elink_cos_state {
};
struct elink_ets_cos_params {
- enum elink_cos_state state ;
+ enum elink_cos_state state;
union {
struct elink_ets_bw_params bw_params;
struct elink_ets_sp_params sp_params;
@@ -203,7 +203,7 @@ int igb_pf_host_configure(struct rte_eth_dev *eth_dev)
* enable vlan filtering and allow all vlan tags through
*/
vlanctrl = E1000_READ_REG(hw, E1000_RCTL);
- vlanctrl |= E1000_RCTL_VFE ; /* enable vlan filters */
+ vlanctrl |= E1000_RCTL_VFE; /* enable vlan filters */
E1000_WRITE_REG(hw, E1000_RCTL, vlanctrl);
/* VFTA - enable all vlan filters */
@@ -466,7 +466,7 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev,
goto err;
}
- for (i = 0 ; i < reta_size ; i++) {
+ for (i = 0; i < reta_size; i++) {
/* each reta_conf is for 64 entries.
* to support 128 we use 2 conf of 64
*/
@@ -525,7 +525,7 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
goto err;
}
- for (i = 0 ; i < reta_size ; i++) {
+ for (i = 0; i < reta_size; i++) {
reta_conf_idx = i / RTE_RETA_GROUP_SIZE;
reta_idx = i % RTE_RETA_GROUP_SIZE;
if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx))
@@ -1774,7 +1774,7 @@ int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
rss_params.rss_table_size_log = 7;
vport_update_params.vport_id = 0;
/* pass the L2 handles instead of qids */
- for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
+ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
idx = qdev->rss_ind_table[i];
rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
}
@@ -364,7 +364,7 @@ vhost_update_packet_xstats(struct vhost_queue *vq,
uint64_t index;
struct vhost_stats *pstats = &vq->stats;
- for (i = 0; i < count ; i++) {
+ for (i = 0; i < count; i++) {
pkt_len = bufs[i]->pkt_len;
if (pkt_len == 64) {
pstats->xstats[VHOST_64_PKT]++;
@@ -125,7 +125,7 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
uint16_t i;
/* Caller does the check */
- for (i = 0; i < num ; i++) {
+ for (i = 0; i < num; i++) {
used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
uep = &vq->vq_ring.used->ring[used_idx];
desc_idx = (uint16_t) uep->id;
@@ -756,7 +756,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
hdr_size = hw->vtnet_hdr_size;
offload = rx_offload_enabled(hw);
- for (i = 0; i < num ; i++) {
+ for (i = 0; i < num; i++) {
rxm = rcv_pkts[i];
PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
@@ -99,7 +99,7 @@ eth_xenvirt_rx(void *q, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
num = virtqueue_dequeue_burst(rxvq, rx_pkts, len, num);
PMD_RX_LOG(DEBUG, "used:%d dequeue:%d\n", nb_used, num);
- for (i = 0; i < num ; i ++) {
+ for (i = 0; i < num; i++) {
rxm = rx_pkts[i];
PMD_RX_LOG(DEBUG, "packet len:%d\n", len[i]);
rxm->next = NULL;
@@ -148,7 +148,7 @@ eth_xenvirt_tx(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
num = (uint16_t)(likely(nb_used <= VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ);
num = virtqueue_dequeue_burst(txvq, snd_pkts, len, num);
- for (i = 0; i < num ; i ++) {
+ for (i = 0; i < num; i++) {
/* mergable not supported, one segment only */
rte_pktmbuf_free_seg(snd_pkts[i]);
}
@@ -402,7 +402,7 @@ grant_node_create(uint32_t pg_num, uint32_t *gref_arr, phys_addr_t *pa_arr, char
if (rv == -1)
break;
- for (i = 0; i < entries_per_pg && j < pg_num ; i++) {
+ for (i = 0; i < entries_per_pg && j < pg_num; i++) {
((struct grant_node_item *)ptr)->gref = gref_arr[j];
((struct grant_node_item *)ptr)->pfn = pa_arr[j] >> pg_shift;
ptr = RTE_PTR_ADD(ptr, sizeof(struct grant_node_item));
@@ -250,7 +250,7 @@ virtqueue_dequeue_burst(struct virtqueue *vq, struct rte_mbuf **rx_pkts, uint32_
uint16_t used_idx, desc_idx;
uint16_t i;
/* Caller does the check */
- for (i = 0; i < num ; i ++) {
+ for (i = 0; i < num; i++) {
used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
uep = &vq->vq_ring.used->ring[used_idx];
desc_idx = (uint16_t) uep->id;
@@ -141,7 +141,7 @@ cpu_core_map_compute_eal(struct cpu_core_map *map)
core_id_contig = 0;
- for (core_id = 0; n_detected ; core_id++) {
+ for (core_id = 0; n_detected; core_id++) {
ht_id = 0;
for (lcore_id = 0;
@@ -362,7 +362,7 @@ cpu_core_map_compute_linux(struct cpu_core_map *map)
core_id_contig = 0;
- for (core_id = 0; n_detected ; core_id++) {
+ for (core_id = 0; n_detected; core_id++) {
ht_id = 0;
for (lcore_id = 0; lcore_id < n_lcores; lcore_id++) {
@@ -307,7 +307,7 @@ l2fwd_malloc_shared_struct(void)
if (mapping_id == NULL)
return -1;
- for (i = 0 ;i < RTE_MAX_LCORE; i++)
+ for (i = 0; i < RTE_MAX_LCORE; i++)
mapping_id[i] = INVALID_MAPPING_ID;
}
return 0;
@@ -485,7 +485,7 @@ rx_sync_ring(struct netmap_ring *ring, uint8_t port, uint16_t ring_number,
return -1;
/* Put those n_rx packets in the Netmap structures */
- for (i = 0; i < n_rx ; i++) {
+ for (i = 0; i < n_rx; i++) {
mbuf_to_slot(rx_mbufs[i], ring, cur_slot);
rte_pktmbuf_free(rx_mbufs[i]);
cur_slot = NETMAP_RING_NEXT(ring, cur_slot);
@@ -1767,7 +1767,7 @@ process_burst(struct rte_mbuf *pkts_burst[MAX_PKT_BURST], int nb_rx,
l3fwd_simple_forward(pkts_burst[j+7], portid);
}
}
- for (; j < nb_rx ; j++)
+ for (; j < nb_rx; j++)
l3fwd_simple_forward(pkts_burst[j], portid);
}
#elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
@@ -72,7 +72,7 @@ get_pkt_sched(struct rte_mbuf *m, uint32_t *subport, uint32_t *pipe,
*traffic_class = (pdata[QUEUE_OFFSET] & 0x0F) &
(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1); /* Destination IP */
*queue = ((pdata[QUEUE_OFFSET] >> 8) & 0x0F) &
- (RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS - 1) ; /* Destination IP */
+ (RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS - 1); /* Destination IP */
*color = pdata[COLOR_OFFSET] & 0x03; /* Destination IP */
return 0;
@@ -367,7 +367,7 @@ main(int argc, char **argv)
* Start pipeline_connect() on all the available slave lcores
* but the last
*/
- for (lcore_id = 0 ; lcore_id < last_lcore_id; lcore_id++) {
+ for (lcore_id = 0; lcore_id < last_lcore_id; lcore_id++) {
if (rte_lcore_is_enabled(lcore_id) &&
lcore_id != master_lcore_id) {
@@ -516,9 +516,8 @@ parse_mempoolnode(struct xen_guest *guest)
err:
if (gntnode)
xen_free_gntnode(gntnode);
- for (i = 0; i < MAX_XENVIRT_MEMPOOL ; i++) {
+ for (i = 0; i < MAX_XENVIRT_MEMPOOL; i++)
cleanup_mempool(&guest->mempool[i]);
- }
/* reinitialise mempool */
bzero(&guest->mempool, MAX_XENVIRT_MEMPOOL * sizeof(guest->mempool[0]));
return -1;
@@ -257,13 +257,13 @@ find_match_scalar(struct rte_distributor *d,
* 4. Add any matches to the output
*/
- for (j = 0 ; j < RTE_DIST_BURST_SIZE; j++)
+ for (j = 0; j < RTE_DIST_BURST_SIZE; j++)
output_ptr[j] = 0;
for (i = 0; i < d->num_workers; i++) {
bl = &d->backlog[i];
- for (j = 0; j < RTE_DIST_BURST_SIZE ; j++)
+ for (j = 0; j < RTE_DIST_BURST_SIZE; j++)
for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
if (d->in_flight_tags[i][j] == data_ptr[w]) {
output_ptr[j] = i+1;
@@ -347,7 +347,7 @@ release(struct rte_distributor *d, unsigned int wkr)
d->in_flight_tags[wkr][i] = d->backlog[wkr].tags[i];
}
buf->count = i;
- for ( ; i < RTE_DIST_BURST_SIZE ; i++) {
+ for ( ; i < RTE_DIST_BURST_SIZE; i++) {
buf->bufptr64[i] = RTE_DISTRIB_GET_BUF;
d->in_flight_tags[wkr][i] = 0;
}
@@ -381,7 +381,7 @@ rte_distributor_process_v1705(struct rte_distributor *d,
if (unlikely(num_mbufs == 0)) {
/* Flush out all non-full cache-lines to workers. */
- for (wid = 0 ; wid < d->num_workers; wid++) {
+ for (wid = 0; wid < d->num_workers; wid++) {
if ((d->bufs[wid].bufptr64[0] & RTE_DISTRIB_GET_BUF)) {
release(d, wid);
handle_returns(d, wid);
@@ -491,7 +491,7 @@ rte_distributor_process_v1705(struct rte_distributor *d,
}
/* Flush out all non-full cache-lines to workers. */
- for (wid = 0 ; wid < d->num_workers; wid++)
+ for (wid = 0; wid < d->num_workers; wid++)
if ((d->bufs[wid].bufptr64[0] & RTE_DISTRIB_GET_BUF))
release(d, wid);
@@ -666,7 +666,7 @@ rte_distributor_create_v1705(const char *name,
* Set up the backog tags so they're pointing at the second cache
* line for performance during flow matching
*/
- for (i = 0 ; i < num_workers ; i++)
+ for (i = 0; i < num_workers; i++)
d->backlog[i].tags = &d->in_flight_tags[i][RTE_DIST_BURST_SIZE];
dist_burst_list = RTE_TAILQ_CAST(rte_dist_burst_tailq.head,
@@ -470,7 +470,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
/* reserve a virtual area for next contiguous
* physical block: count the number of
* contiguous physical pages. */
- for (j = i+1; j < hpi->num_pages[0] ; j++) {
+ for (j = i+1; j < hpi->num_pages[0]; j++) {
#ifdef RTE_ARCH_PPC_64
/* The physical addresses are sorted in
* descending order on PPC64 */
@@ -287,7 +287,7 @@ rte_xen_dom0_memory_init(void)
memseg[memseg_idx].addr = vir_addr;
memseg[memseg_idx].phys_addr = page_size *
- seginfo[memseg_idx].pfn ;
+ seginfo[memseg_idx].pfn;
memseg[memseg_idx].len = seginfo[memseg_idx].size;
for ( i = 0; i < seginfo[memseg_idx].size / RTE_PGSIZE_2M; i++)
memseg[memseg_idx].mfn[i] = seginfo[memseg_idx].mfn[i];
@@ -1989,7 +1989,7 @@ void igb_reset(struct igb_adapter *adapter)
* Clear all flags except indication that the PF has set
* the VF MAC addresses administratively
*/
- for (i = 0 ; i < adapter->vfs_allocated_count; i++)
+ for (i = 0; i < adapter->vfs_allocated_count; i++)
adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
/* ping all the active vfs to let them know we are going down */
@@ -6140,7 +6140,7 @@ static void igb_ping_all_vfs(struct igb_adapter *adapter)
u32 ping;
int i;
- for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
+ for (i = 0; i < adapter->vfs_allocated_count; i++) {
ping = E1000_PF_CONTROL_MSG;
if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
ping |= E1000_VT_MSGTYPE_CTS;
@@ -1033,7 +1033,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
/* Mark all the VFs as inactive */
- for (i = 0 ; i < adapter->num_vfs; i++)
+ for (i = 0; i < adapter->num_vfs; i++)
adapter->vfinfo[i].clear_to_send = 0;
/* ping all the active vfs to let them know we are going down */
@@ -168,7 +168,7 @@ int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
size = end - buf + 1;
}
- for (; *fmt ; ++fmt) {
+ for (; *fmt; ++fmt) {
if (*fmt != '%') {
if (str <= end)
*str = *fmt;
@@ -1670,7 +1670,7 @@ rte_eth_xstats_get_by_id(uint8_t port_id, const uint64_t *ids, uint64_t *values,
return xcount;
if (values != NULL)
- for (i = 0 ; i < (unsigned int)xcount; i++)
+ for (i = 0; i < (unsigned int)xcount; i++)
values[i + count] = xstats[i].value;
}
@@ -132,14 +132,14 @@ find_best_rational_approximation(uint32_t alpha_num, uint32_t d_num, uint32_t de
}
/* update the interval */
- new_p_a = p_b + (x - 1) * p_a ;
+ new_p_a = p_b + (x - 1) * p_a;
new_q_a = q_b + (x - 1) * q_a;
- new_p_b = p_b + x * p_a ;
+ new_p_b = p_b + x * p_a;
new_q_b = q_b + x * q_a;
- p_a = new_p_a ;
+ p_a = new_p_a;
q_a = new_q_a;
- p_b = new_p_b ;
+ p_b = new_p_b;
q_b = new_q_b;
/* compute the number of steps to the right */
@@ -500,7 +500,8 @@ __rte_bitmap_scan_read(struct rte_bitmap *bmp, uint32_t *pos, uint64_t *slab)
uint64_t *slab2;
slab2 = bmp->array2 + bmp->index2;
- for ( ; bmp->go2 ; bmp->index2 ++, slab2 ++, bmp->go2 = bmp->index2 & RTE_BITMAP_CL_SLAB_MASK) {
+ for ( ; bmp->go2; bmp->index2++, slab2++,
+ bmp->go2 = bmp->index2 & RTE_BITMAP_CL_SLAB_MASK) {
if (*slab2) {
*pos = bmp->index2 << RTE_BITMAP_SLAB_BIT_SIZE_LOG2;
*slab = *slab2;
@@ -463,7 +463,7 @@ ut_setup(void)
"Failed to configure cryptodev %u",
ts_params->valid_devs[0]);
- for (qp_id = 0; qp_id < ts_params->conf.nb_queue_pairs ; qp_id++) {
+ for (qp_id = 0; qp_id < ts_params->conf.nb_queue_pairs; qp_id++) {
TEST_ASSERT_SUCCESS(rte_cryptodev_queue_pair_setup(
ts_params->valid_devs[0], qp_id,
&ts_params->qp_conf,
@@ -428,7 +428,7 @@ testsuite_setup(void)
ts_params->dev_id);
ts_params->qp_conf.nb_descriptors = PERF_NUM_OPS_INFLIGHT;
- for (qp_id = 0; qp_id < ts_params->conf.nb_queue_pairs ; qp_id++) {
+ for (qp_id = 0; qp_id < ts_params->conf.nb_queue_pairs; qp_id++) {
TEST_ASSERT_SUCCESS(rte_cryptodev_queue_pair_setup(
ts_params->dev_id, qp_id,
@@ -1983,7 +1983,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
TEST_ASSERT_NOT_NULL(test_crypto_session, "Session creation failed");
/* Generate Crypto op data structure(s) */
- for (i = 0; i < num_to_submit ; i++) {
+ for (i = 0; i < num_to_submit; i++) {
struct rte_mbuf *m = setup_test_string(ts_params->mbuf_mp,
data_params[0].expected.ciphertext,
data_params[0].length, 0);
@@ -2030,7 +2030,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
printf("\nDev No\tQP No\tNum Sent\tNum Received\tTx/Rx burst");
printf("\tRetries (Device Busy)\tAverage IA cycle cost "
"(assuming 0 retries)");
- for (i = 2; i <= 128 ; i *= 2) {
+ for (i = 2; i <= 128; i *= 2) {
num_sent = 0;
num_received = 0;
retries = 0;
@@ -2087,7 +2087,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
}
printf("\n");
- for (i = 0; i < num_to_submit ; i++) {
+ for (i = 0; i < num_to_submit; i++) {
rte_pktmbuf_free(c_ops[i]->sym->m_src);
rte_crypto_op_free(c_ops[i]);
}
@@ -2122,7 +2122,7 @@ test_perf_snow3G_optimise_cyclecount(struct perf_test_params *pparams)
TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
/* Generate Crypto op data structure(s)*/
- for (i = 0; i < num_to_submit ; i++) {
+ for (i = 0; i < num_to_submit; i++) {
struct rte_mbuf *m = test_perf_create_pktmbuf(
ts_params->mbuf_mp,
pparams->buf_size);
@@ -2159,7 +2159,7 @@ test_perf_snow3G_optimise_cyclecount(struct perf_test_params *pparams)
printf("\nOps Tx\tOps Rx\tOps/burst ");
printf("Retries EmptyPolls\tIACycles/CyOp\tIACycles/Burst\tIACycles/Byte");
- for (i = 2; i <= 128 ; i *= 2) {
+ for (i = 2; i <= 128; i *= 2) {
num_sent = 0;
num_ops_received = 0;
retries = 0;
@@ -2218,7 +2218,7 @@ test_perf_snow3G_optimise_cyclecount(struct perf_test_params *pparams)
}
printf("\n");
- for (i = 0; i < num_to_submit ; i++) {
+ for (i = 0; i < num_to_submit; i++) {
rte_pktmbuf_free(c_ops[i]->sym->m_src);
rte_crypto_op_free(c_ops[i]);
}
@@ -2314,7 +2314,7 @@ test_perf_openssl_optimise_cyclecount(struct perf_test_params *pparams)
TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
/* Generate Crypto op data structure(s)*/
- for (i = 0; i < num_to_submit ; i++) {
+ for (i = 0; i < num_to_submit; i++) {
struct rte_mbuf *m = test_perf_create_pktmbuf(
ts_params->mbuf_mp,
pparams->buf_size);
@@ -2373,7 +2373,7 @@ test_perf_openssl_optimise_cyclecount(struct perf_test_params *pparams)
printf("Retries EmptyPolls\tIACycles/CyOp\tIACycles/Burst\t"
"IACycles/Byte");
- for (i = 2; i <= 128 ; i *= 2) {
+ for (i = 2; i <= 128; i *= 2) {
num_sent = 0;
num_ops_received = 0;
retries = 0;
@@ -2438,7 +2438,7 @@ test_perf_openssl_optimise_cyclecount(struct perf_test_params *pparams)
}
printf("\n");
- for (i = 0; i < num_to_submit ; i++) {
+ for (i = 0; i < num_to_submit; i++) {
rte_pktmbuf_free(c_ops[i]->sym->m_src);
rte_crypto_op_free(c_ops[i]);
}
@@ -2479,7 +2479,7 @@ test_perf_armv8_optimise_cyclecount(struct perf_test_params *pparams)
sess = NULL;
/* Generate Crypto op data structure(s)*/
- for (i = 0; i < num_to_submit ; i++) {
+ for (i = 0; i < num_to_submit; i++) {
struct rte_mbuf *m = test_perf_create_pktmbuf(
ts_params->mbuf_mp,
pparams->buf_size);
@@ -2510,7 +2510,7 @@ test_perf_armv8_optimise_cyclecount(struct perf_test_params *pparams)
printf("Retries "
"EmptyPolls\tIACycles/CyOp\tIACycles/Burst\tIACycles/Byte");
- for (i = 2; i <= 128 ; i *= 2) {
+ for (i = 2; i <= 128; i *= 2) {
num_sent = 0;
num_ops_received = 0;
retries = 0;
@@ -2574,7 +2574,7 @@ test_perf_armv8_optimise_cyclecount(struct perf_test_params *pparams)
}
printf("\n");
- for (i = 0; i < num_to_submit ; i++) {
+ for (i = 0; i < num_to_submit; i++) {
rte_pktmbuf_free(c_ops[i]->sym->m_src);
rte_crypto_op_free(c_ops[i]);
}
@@ -2696,7 +2696,7 @@ parallel_basic(struct test *t, int check_order)
/* Check to see if the sequence numbers are in expected order */
if (check_order) {
- for (j = 0 ; j < deq_pkts ; j++) {
+ for (j = 0; j < deq_pkts; j++) {
if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
printf(
"%d: Incorrect sequence number(%d) from port %d\n",
@@ -106,7 +106,7 @@ test_align_overlap_per_lcore(__attribute__((unused)) void *arg)
ret = -1;
break;
}
- for(j = 0; j < 1000 ; j++) {
+ for (j = 0; j < 1000; j++) {
if( *(char *)p1 != 0) {
printf("rte_zmalloc didn't zero"
"the allocated memory\n");
@@ -178,7 +178,7 @@ test_reordered_free_per_lcore(__attribute__((unused)) void *arg)
ret = -1;
break;
}
- for(j = 0; j < 1000 ; j++) {
+ for (j = 0; j < 1000; j++) {
if( *(char *)p1 != 0) {
printf("rte_zmalloc didn't zero"
"the allocated memory\n");
@@ -75,7 +75,7 @@ test_memory(void)
/* try to read memory (should not segfault) */
mem = rte_eal_get_physmem_layout();
- for (i = 0; i < RTE_MAX_MEMSEG && mem[i].addr != NULL ; i++) {
+ for (i = 0; i < RTE_MAX_MEMSEG && mem[i].addr != NULL; i++) {
/* check memory */
for (j = 0; j<mem[i].len; j++) {
@@ -329,7 +329,7 @@ my_mp_init(struct rte_mempool *mp, __attribute__((unused)) void *arg)
{
printf("mempool name is %s\n", mp->name);
/* nothing to be implemented here*/
- return ;
+ return;
}
/*
@@ -155,9 +155,8 @@ test_ring_basic(void)
if (src == NULL)
goto fail;
- for (i = 0; i < RING_SIZE*2 ; i++) {
+ for (i = 0; i < RING_SIZE*2; i++)
src[i] = (void *)(unsigned long)i;
- }
cur_src = src;
/* alloc some room for copied objects */
@@ -358,9 +357,8 @@ test_ring_burst_basic(void)
if (src == NULL)
goto fail;
- for (i = 0; i < RING_SIZE*2 ; i++) {
+ for (i = 0; i < RING_SIZE*2; i++)
src[i] = (void *)(unsigned long)i;
- }
cur_src = src;
/* alloc some room for copied objects */
@@ -650,7 +650,7 @@ setup_acl_pipeline(void)
}
/* Enable input ports */
- for (i = 0; i < N_PORTS ; i++)
+ for (i = 0; i < N_PORTS; i++)
if (rte_pipeline_port_in_enable(p, port_in_id[i]))
rte_panic("Unable to enable input port %u\n",
port_in_id[i]);
@@ -410,7 +410,7 @@ setup_pipeline(int test_type)
}
/* Enable input ports */
- for (i = 0; i < N_PORTS ; i++)
+ for (i = 0; i < N_PORTS; i++)
if (rte_pipeline_port_in_enable(p, port_in_id[i]))
rte_panic("Unable to enable input port %u\n",
port_in_id[i]);