@@ -297,7 +297,7 @@ static void
meminfo_display(void)
{
printf("----------- MEMORY_SEGMENTS -----------\n");
- rte_dump_physmem_layout(stdout);
+ rte_dump_iovamem_layout(stdout);
printf("--------- END_MEMORY_SEGMENTS ---------\n");
printf("------------ MEMORY_ZONES -------------\n");
@@ -390,7 +390,7 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
} else if (strstr(key_token, "aad")) {
rte_free(vector->aad.data);
vector->aad.data = data;
- vector->aad.phys_addr = rte_malloc_virt2phy(vector->aad.data);
+ vector->aad.phys_addr = rte_malloc_virt2iova(vector->aad.data);
if (tc_found)
vector->aad.length = data_length;
else {
@@ -405,7 +405,7 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
} else if (strstr(key_token, "digest")) {
rte_free(vector->digest.data);
vector->digest.data = data;
- vector->digest.phys_addr = rte_malloc_virt2phy(
+ vector->digest.phys_addr = rte_malloc_virt2iova(
vector->digest.data);
if (tc_found)
vector->digest.length = data_length;
@@ -498,7 +498,7 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
return NULL;
}
t_vec->digest.phys_addr =
- rte_malloc_virt2phy(t_vec->digest.data);
+ rte_malloc_virt2iova(t_vec->digest.data);
t_vec->digest.length = options->digest_sz;
memcpy(t_vec->digest.data, digest,
options->digest_sz);
@@ -531,7 +531,7 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
return NULL;
}
memcpy(t_vec->aad.data, aad, options->aead_aad_sz);
- t_vec->aad.phys_addr = rte_malloc_virt2phy(t_vec->aad.data);
+ t_vec->aad.phys_addr = rte_malloc_virt2iova(t_vec->aad.data);
t_vec->aad.length = options->aead_aad_sz;
} else {
t_vec->aad.data = NULL;
@@ -546,7 +546,7 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
return NULL;
}
t_vec->digest.phys_addr =
- rte_malloc_virt2phy(t_vec->digest.data);
+ rte_malloc_virt2iova(t_vec->digest.data);
t_vec->digest.length = options->digest_sz;
memcpy(t_vec->digest.data, digest, options->digest_sz);
t_vec->data.aead_offset = 0;
@@ -8039,7 +8039,7 @@ static void cmd_dump_parsed(void *parsed_result,
struct cmd_dump_result *res = parsed_result;
if (!strcmp(res->dump, "dump_physmem"))
- rte_dump_physmem_layout(stdout);
+ rte_dump_iovamem_layout(stdout);
else if (!strcmp(res->dump, "dump_memzone"))
rte_memzone_dump(stdout);
else if (!strcmp(res->dump, "dump_struct_sizes"))
@@ -201,7 +201,7 @@ int rte_fslmc_vfio_dmamap(void)
if (is_dma_done)
return 0;
- memseg = rte_eal_get_physmem_layout();
+ memseg = rte_eal_get_iovamem_layout();
if (memseg == NULL) {
FSLMC_VFIO_LOG(ERR, "Cannot get physical layout.");
return -ENODEV;
@@ -275,7 +275,7 @@ static void *dpaa2_mem_ptov(iova_addr_t paddr) __attribute__((unused));
/* todo - this is costly, need to write a fast coversion routine */
static void *dpaa2_mem_ptov(iova_addr_t paddr)
{
- const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
+ const struct rte_memseg *memseg = rte_eal_get_iovamem_layout();
int i;
for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
@@ -290,7 +290,7 @@ static void *dpaa2_mem_ptov(iova_addr_t paddr)
static iova_addr_t dpaa2_mem_vtop(uint64_t vaddr) __attribute__((unused));
static iova_addr_t dpaa2_mem_vtop(uint64_t vaddr)
{
- const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
+ const struct rte_memseg *memseg = rte_eal_get_iovamem_layout();
int i;
for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
@@ -106,7 +106,7 @@ queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
queue_name, queue_size, socket_id);
- ms = rte_eal_get_physmem_layout();
+ ms = rte_eal_get_iovamem_layout();
switch (ms[0].hugepage_sz) {
case(RTE_PGSIZE_2M):
memzone_flags = RTE_MEMZONE_2MB;
@@ -100,11 +100,11 @@ eth_ark_rx_hw_setup(struct rte_eth_dev *dev,
iova_addr_t phys_addr_q_base;
iova_addr_t phys_addr_prod_index;
- queue_base = rte_malloc_virt2phy(queue);
+ queue_base = rte_malloc_virt2iova(queue);
phys_addr_prod_index = queue_base +
offsetof(struct ark_rx_queue, prod_index);
- phys_addr_q_base = rte_malloc_virt2phy(queue->paddress_q);
+ phys_addr_q_base = rte_malloc_virt2iova(queue->paddress_q);
/* Verify HW */
if (ark_mpu_verify(queue->mpu, sizeof(iova_addr_t))) {
@@ -318,8 +318,8 @@ eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue)
if (ark_mpu_verify(queue->mpu, sizeof(struct ark_tx_meta)))
return -1;
- queue_base = rte_malloc_virt2phy(queue);
- ring_base = rte_malloc_virt2phy(queue->meta_q);
+ queue_base = rte_malloc_virt2iova(queue);
+ ring_base = rte_malloc_virt2iova(queue->meta_q);
cons_index_addr =
queue_base + offsetof(struct ark_tx_queue, cons_index);
@@ -1679,8 +1679,8 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD,
- "Using rte_mem_virt2phy()\n");
- mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ "Using rte_mem_virt2iova()\n");
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD,
"unable to map address to physical memory\n");
@@ -1714,8 +1714,8 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD,
- "Using rte_mem_virt2phy()\n");
- mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ "Using rte_mem_virt2iova()\n");
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD,
"unable to map address to physical memory\n");
@@ -270,7 +270,7 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
req.vlan_tag_tbl_addr = rte_cpu_to_le_16(
- rte_mem_virt2phy(vlan_table));
+ rte_mem_virt2iova(vlan_table));
req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
}
req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
@@ -311,7 +311,7 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
req.fid = rte_cpu_to_le_16(fid);
req.vlan_tag_mask_tbl_addr =
- rte_cpu_to_le_64(rte_mem_virt2phy(vlan_table));
+ rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -612,7 +612,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
}
rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
bp->hwrm_cmd_resp_dma_addr =
- rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+ rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == 0) {
RTE_LOG(ERR, PMD,
"Unable to map response buffer to physical memory.\n");
@@ -638,7 +638,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
}
rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
bp->hwrm_short_cmd_req_dma_addr =
- rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr);
+ rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
if (bp->hwrm_short_cmd_req_dma_addr == 0) {
rte_free(bp->hwrm_short_cmd_req_addr);
RTE_LOG(ERR, PMD,
@@ -1683,7 +1683,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
if (bp->hwrm_cmd_resp_addr == NULL)
return -ENOMEM;
bp->hwrm_cmd_resp_dma_addr =
- rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+ rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == 0) {
RTE_LOG(ERR, PMD,
"unable to map response address to physical memory\n");
@@ -2489,7 +2489,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
req.req_buf_page_addr[0] =
- rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
+ rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
if (req.req_buf_page_addr[0] == 0) {
RTE_LOG(ERR, PMD,
"unable to map buffer address to physical memory\n");
@@ -2861,7 +2861,7 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
- req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
+ req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
if (req.vnic_id_tbl_addr == 0) {
RTE_LOG(ERR, PMD,
@@ -177,10 +177,10 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD,
- "Using rte_mem_virt2phy()\n");
+ "Using rte_mem_virt2iova()\n");
for (sz = 0; sz < total_alloc_len; sz += getpagesize())
rte_mem_lock_page(((char *)mz->addr) + sz);
- mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD,
"unable to map ring address to physical memory\n");
@@ -196,8 +196,8 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD,
- "Using rte_mem_virt2phy()\n");
- mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ "Using rte_mem_virt2iova()\n");
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD,
"unable to map vnic address to physical memory\n");
@@ -1289,7 +1289,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
txq->port_id = dev->data->port_id;
txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
- txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+ txq->tx_ring_phys_addr = rte_mem_phy2iova(tz->memseg_id, tz->phys_addr);
txq->tx_ring = (struct e1000_data_desc *) tz->addr;
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
@@ -1416,7 +1416,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
- rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+ rxq->rx_ring_phys_addr = rte_mem_phy2iova(rz->memseg_id, rz->phys_addr);
rxq->rx_ring = (struct e1000_rx_desc *) rz->addr;
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
@@ -1530,7 +1530,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
txq->port_id = dev->data->port_id;
txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
- txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+ txq->tx_ring_phys_addr = rte_mem_phy2iova(tz->memseg_id, tz->phys_addr);
txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
/* Allocate software ring */
@@ -1667,7 +1667,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
}
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
- rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+ rxq->rx_ring_phys_addr = rte_mem_phy2iova(rz->memseg_id, rz->phys_addr);
rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
/* Allocate software ring. */
@@ -1887,7 +1887,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
return -ENOMEM;
}
q->hw_ring = mz->addr;
- q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+ q->hw_ring_phys_addr = rte_mem_phy2iova(mz->memseg_id, mz->phys_addr);
/* Check if number of descs satisfied Vector requirement */
if (!rte_is_power_of_2(nb_desc)) {
@@ -2047,7 +2047,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
return -ENOMEM;
}
q->hw_ring = mz->addr;
- q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+ q->hw_ring_phys_addr = rte_mem_phy2iova(mz->memseg_id, mz->phys_addr);
/*
* allocate memory for the RS bit tracker. Enough slots to hold the
@@ -3741,7 +3741,7 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
mem->size = size;
mem->va = mz->addr;
- mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+ mem->pa = rte_mem_phy2iova(mz->memseg_id, mz->phys_addr);
mem->zone = (const void *)mz;
PMD_DRV_LOG(DEBUG,
"memzone %s allocated with physical address: %"PRIu64,
@@ -249,7 +249,7 @@ i40e_fdir_setup(struct i40e_pf *pf)
goto fail_mem;
}
pf->fdir.prg_pkt = mz->addr;
- pf->fdir.dma_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+ pf->fdir.dma_addr = rte_mem_phy2iova(mz->memseg_id, mz->phys_addr);
pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
@@ -1822,7 +1822,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
/* Zero all the descriptors in the ring. */
memset(rz->addr, 0, ring_size);
- rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+ rxq->rx_ring_phys_addr = rte_mem_phy2iova(rz->memseg_id, rz->phys_addr);
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
len = (uint16_t)(nb_desc + RTE_PMD_I40E_RX_MAX_BURST);
@@ -2159,7 +2159,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->vsi = vsi;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
- txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+ txq->tx_ring_phys_addr = rte_mem_phy2iova(tz->memseg_id, tz->phys_addr);
txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
/* Allocate software ring */
@@ -2675,7 +2675,7 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
txq->vsi = pf->fdir.fdir_vsi;
- txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+ txq->tx_ring_phys_addr = rte_mem_phy2iova(tz->memseg_id, tz->phys_addr);
txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
/*
* don't need to allocate software ring and reset for the fdir
@@ -2731,7 +2731,7 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
rxq->vsi = pf->fdir.fdir_vsi;
- rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+ rxq->rx_ring_phys_addr = rte_mem_phy2iova(rz->memseg_id, rz->phys_addr);
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
/*
@@ -2548,7 +2548,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
else
txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
- txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+ txq->tx_ring_phys_addr = rte_mem_phy2iova(tz->memseg_id, tz->phys_addr);
txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
/* Allocate software ring */
@@ -2850,7 +2850,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
}
- rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+ rxq->rx_ring_phys_addr = rte_mem_phy2iova(rz->memseg_id, rz->phys_addr);
rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
/*
@@ -1790,7 +1790,7 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
m = m->next;
}
- phyaddr = rte_mem_virt2phy(g->sg);
+ phyaddr = rte_mem_virt2iova(g->sg);
if (phyaddr == RTE_BAD_PHYS_ADDR) {
PMD_TX_LOG(lio_dev, ERR, "bad phys addr\n");
goto xmit_failed;
@@ -1206,7 +1206,7 @@ static struct ibv_mr *mlx4_mp2mr(struct ibv_pd *, struct rte_mempool *)
static struct ibv_mr *
mlx4_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp)
{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+ const struct rte_memseg *ms = rte_eal_get_iovamem_layout();
uintptr_t start;
uintptr_t end;
unsigned int i;
@@ -131,7 +131,7 @@ static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,
struct ibv_mr *
mlx5_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp)
{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+ const struct rte_memseg *ms = rte_eal_get_iovamem_layout();
uintptr_t start;
uintptr_t end;
unsigned int i;
@@ -61,7 +61,7 @@ sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
return ENOMEM;
}
- esmp->esm_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+ esmp->esm_addr = rte_mem_phy2iova(mz->memseg_id, mz->phys_addr);
if (esmp->esm_addr == RTE_BAD_PHYS_ADDR) {
(void)rte_memzone_free(mz);
return EFAULT;
@@ -155,7 +155,7 @@ sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
header_len);
tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
- header_paddr = rte_malloc_virt2phy((void *)tsoh);
+ header_paddr = rte_malloc_virt2iova((void *)tsoh);
} else {
if (m->data_len == header_len) {
*in_off = 0;
@@ -2461,7 +2461,7 @@ reserve_key_memory(struct l2fwd_crypto_options *options)
options->aad.data = rte_malloc("aad", MAX_KEY_SIZE, 0);
if (options->aad.data == NULL)
rte_exit(EXIT_FAILURE, "Failed to allocate memory for AAD");
- options->aad.phys_addr = rte_malloc_virt2phy(options->aad.data);
+ options->aad.phys_addr = rte_malloc_virt2iova(options->aad.data);
}
int
@@ -1271,7 +1271,7 @@ rte_crypto_op_init(struct rte_mempool *mempool,
__rte_crypto_op_reset(op, type);
- op->phys_addr = rte_mem_virt2phy(_op_data);
+ op->phys_addr = rte_mem_virt2iova(_op_data);
op->mempool = mempool;
}
@@ -441,7 +441,7 @@ eal_check_mem_on_local_socket(void)
socket_id = rte_lcore_to_socket_id(rte_config.master_lcore);
- ms = rte_eal_get_physmem_layout();
+ ms = rte_eal_get_iovamem_layout();
for (i = 0; i < RTE_MAX_MEMSEG; i++)
if (ms[i].socket_id == socket_id &&
@@ -51,7 +51,7 @@
* Get physical address of any mapped virtual address in the current process.
*/
iova_addr_t
-rte_mem_virt2phy(const void *virtaddr)
+rte_mem_virt2iova(const void *virtaddr)
{
/* XXX not implemented. This function is only used by
* rte_mempool_virt2phy() when hugepages are disabled. */
@@ -14,7 +14,7 @@ DPDK_2.0 {
rte_cpu_get_flag_enabled;
rte_cycles_vmware_tsc_map;
rte_delay_us;
- rte_dump_physmem_layout;
+ rte_dump_iovamem_layout;
rte_dump_registers;
rte_dump_stack;
rte_dump_tailq;
@@ -25,8 +25,8 @@ DPDK_2.0 {
rte_eal_devargs_type_count;
rte_eal_get_configuration;
rte_eal_get_lcore_state;
- rte_eal_get_physmem_layout;
- rte_eal_get_physmem_size;
+ rte_eal_get_iovamem_layout;
+ rte_eal_get_iovamem_size;
rte_eal_has_hugepages;
rte_eal_hpet_init;
rte_eal_init;
@@ -62,10 +62,10 @@ DPDK_2.0 {
rte_malloc_set_limit;
rte_malloc_socket;
rte_malloc_validate;
- rte_malloc_virt2phy;
+ rte_malloc_virt2iova;
rte_mem_lock_page;
- rte_mem_phy2mch;
- rte_mem_virt2phy;
+ rte_mem_phy2iova;
+ rte_mem_virt2iova;
rte_memdump;
rte_memory_get_nchannel;
rte_memory_get_nrank;
@@ -55,7 +55,7 @@
* memory. The last element of the table contains a NULL address.
*/
const struct rte_memseg *
-rte_eal_get_physmem_layout(void)
+rte_eal_get_iovamem_layout(void)
{
return rte_eal_get_configuration()->mem_config->memseg;
}
@@ -63,7 +63,7 @@ rte_eal_get_physmem_layout(void)
/* get the total size of memory */
uint64_t
-rte_eal_get_physmem_size(void)
+rte_eal_get_iovamem_size(void)
{
const struct rte_mem_config *mcfg;
unsigned i = 0;
@@ -84,7 +84,7 @@ rte_eal_get_physmem_size(void)
/* Dump the physical memory layout on console */
void
-rte_dump_physmem_layout(FILE *f)
+rte_dump_iovamem_layout(FILE *f)
{
const struct rte_mem_config *mcfg;
unsigned i = 0;
@@ -251,7 +251,7 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
mcfg->memzone_cnt++;
snprintf(mz->name, sizeof(mz->name), "%s", name);
- mz->phys_addr = rte_malloc_virt2phy(mz_addr);
+ mz->phys_addr = rte_malloc_virt2iova(mz_addr);
mz->addr = mz_addr;
mz->len = (requested_len == 0 ? elem->size : requested_len);
mz->hugepage_sz = elem->ms->hugepage_sz;
@@ -419,7 +419,7 @@ rte_eal_memzone_init(void)
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
return 0;
- memseg = rte_eal_get_physmem_layout();
+ memseg = rte_eal_get_iovamem_layout();
if (memseg == NULL) {
RTE_LOG(ERR, EAL, "%s(): Cannot get physical layout\n", __func__);
return -1;
@@ -341,7 +341,7 @@ int rte_eal_hugepage_attach(void);
* addresses are obtainable. It is only possible to get
* physical addresses when running as a privileged user.
*/
-bool rte_eal_using_phys_addrs(void);
+bool rte_eal_using_iova_addrs(void);
/**
* Find a bus capable of identifying a device.
@@ -333,7 +333,7 @@ rte_malloc_set_limit(const char *type, size_t max);
* otherwise return physical address of the buffer
*/
iova_addr_t
-rte_malloc_virt2phy(const void *addr);
+rte_malloc_virt2iova(const void *addr);
#ifdef __cplusplus
}
@@ -142,7 +142,7 @@ int rte_mem_lock_page(const void *virt);
* @return
* The physical address or RTE_BAD_PHYS_ADDR on error.
*/
-iova_addr_t rte_mem_virt2phy(const void *virt);
+iova_addr_t rte_mem_virt2iova(const void *virt);
/**
* Get the layout of the available physical memory.
@@ -159,7 +159,7 @@ iova_addr_t rte_mem_virt2phy(const void *virt);
* - On error, return NULL. This should not happen since it is a fatal
* error that will probably cause the entire system to panic.
*/
-const struct rte_memseg *rte_eal_get_physmem_layout(void);
+const struct rte_memseg *rte_eal_get_iovamem_layout(void);
/**
* Dump the physical memory layout to a file.
@@ -167,7 +167,7 @@ const struct rte_memseg *rte_eal_get_physmem_layout(void);
* @param f
* A pointer to a file for output
*/
-void rte_dump_physmem_layout(FILE *f);
+void rte_dump_iovamem_layout(FILE *f);
/**
* Get the total amount of available physical memory.
@@ -175,7 +175,7 @@ void rte_dump_physmem_layout(FILE *f);
* @return
* The total amount of available physical memory in bytes.
*/
-uint64_t rte_eal_get_physmem_size(void);
+uint64_t rte_eal_get_iovamem_size(void);
/**
* Get the number of memory channels.
@@ -216,7 +216,7 @@ iova_addr_t rte_xen_mem_phy2mch(int32_t, const iova_addr_t);
* The physical address or RTE_BAD_PHYS_ADDR on error.
*/
static inline iova_addr_t
-rte_mem_phy2mch(int32_t memseg_id, const iova_addr_t phy_addr)
+rte_mem_phy2iova(int32_t memseg_id, const iova_addr_t phy_addr)
{
if (rte_xen_dom0_supported())
return rte_xen_mem_phy2mch(memseg_id, phy_addr);
@@ -252,7 +252,7 @@ static inline int rte_xen_dom0_supported(void)
}
static inline iova_addr_t
-rte_mem_phy2mch(int32_t memseg_id __rte_unused, const iova_addr_t phy_addr)
+rte_mem_phy2iova(int32_t memseg_id __rte_unused, const iova_addr_t phy_addr)
{
return phy_addr;
}
@@ -249,7 +249,7 @@ rte_malloc_set_limit(__rte_unused const char *type,
* Return the physical address of a virtual address obtained through rte_malloc
*/
iova_addr_t
-rte_malloc_virt2phy(const void *addr)
+rte_malloc_virt2iova(const void *addr)
{
const struct malloc_elem *elem = malloc_elem_from_data(addr);
if (elem == NULL)
@@ -671,7 +671,7 @@ eal_check_mem_on_local_socket(void)
socket_id = rte_lcore_to_socket_id(rte_config.master_lcore);
- ms = rte_eal_get_physmem_layout();
+ ms = rte_eal_get_iovamem_layout();
for (i = 0; i < RTE_MAX_MEMSEG; i++)
if (ms[i].socket_id == socket_id &&
@@ -117,7 +117,7 @@ test_phys_addrs_available(void)
return;
}
- physaddr = rte_mem_virt2phy(&tmp);
+ physaddr = rte_mem_virt2iova(&tmp);
if (physaddr == RTE_BAD_PHYS_ADDR) {
RTE_LOG(ERR, EAL,
"Cannot obtain physical addresses: %s. "
@@ -131,7 +131,7 @@ test_phys_addrs_available(void)
* Get physical address of any mapped virtual address in the current process.
*/
iova_addr_t
-rte_mem_virt2phy(const void *virtaddr)
+rte_mem_virt2iova(const void *virtaddr)
{
int fd, retval;
uint64_t page, physaddr;
@@ -222,7 +222,7 @@ find_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
iova_addr_t addr;
for (i = 0; i < hpi->num_pages[0]; i++) {
- addr = rte_mem_virt2phy(hugepg_tbl[i].orig_va);
+ addr = rte_mem_virt2iova(hugepg_tbl[i].orig_va);
if (addr == RTE_BAD_PHYS_ADDR)
return -1;
hugepg_tbl[i].physaddr = addr;
@@ -1543,7 +1543,7 @@ rte_eal_hugepage_attach(void)
}
bool
-rte_eal_using_phys_addrs(void)
+rte_eal_using_iova_addrs(void)
{
return phys_addrs_available;
}
@@ -102,7 +102,7 @@ rte_pci_map_device(struct rte_pci_device *dev)
break;
case RTE_KDRV_IGB_UIO:
case RTE_KDRV_UIO_GENERIC:
- if (rte_eal_using_phys_addrs()) {
+ if (rte_eal_using_iova_addrs()) {
/* map resources for devices that use uio */
ret = pci_uio_map_resource(dev);
}
@@ -144,7 +144,7 @@ rte_pci_unmap_device(struct rte_pci_device *dev)
void *
pci_find_max_end_va(void)
{
- const struct rte_memseg *seg = rte_eal_get_physmem_layout();
+ const struct rte_memseg *seg = rte_eal_get_iovamem_layout();
const struct rte_memseg *last = seg;
unsigned i = 0;
@@ -692,7 +692,7 @@ vfio_get_group_no(const char *sysfs_base,
static int
vfio_type1_dma_map(int vfio_container_fd)
{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+ const struct rte_memseg *ms = rte_eal_get_iovamem_layout();
int i, ret;
/* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */
@@ -725,7 +725,7 @@ vfio_type1_dma_map(int vfio_container_fd)
static int
vfio_spapr_dma_map(int vfio_container_fd)
{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+ const struct rte_memseg *ms = rte_eal_get_iovamem_layout();
int i, ret;
struct vfio_iommu_spapr_register_memory reg = {
@@ -760,7 +760,7 @@ vfio_spapr_dma_map(int vfio_container_fd)
}
/* calculate window size based on number of hugepages configured */
- create.window_size = rte_eal_get_physmem_size();
+ create.window_size = rte_eal_get_iovamem_size();
create.page_shift = __builtin_ctzll(ms->hugepage_sz);
create.levels = 2;
@@ -14,7 +14,7 @@ DPDK_2.0 {
rte_cpu_get_flag_enabled;
rte_cycles_vmware_tsc_map;
rte_delay_us;
- rte_dump_physmem_layout;
+ rte_dump_iovamem_layout;
rte_dump_registers;
rte_dump_stack;
rte_dump_tailq;
@@ -25,8 +25,8 @@ DPDK_2.0 {
rte_eal_devargs_type_count;
rte_eal_get_configuration;
rte_eal_get_lcore_state;
- rte_eal_get_physmem_layout;
- rte_eal_get_physmem_size;
+ rte_eal_get_iovamem_layout;
+ rte_eal_get_iovamem_size;
rte_eal_has_hugepages;
rte_eal_hpet_init;
rte_eal_init;
@@ -62,10 +62,10 @@ DPDK_2.0 {
rte_malloc_set_limit;
rte_malloc_socket;
rte_malloc_validate;
- rte_malloc_virt2phy;
+ rte_malloc_virt2iova;
rte_mem_lock_page;
- rte_mem_phy2mch;
- rte_mem_virt2phy;
+ rte_mem_phy2iova;
+ rte_mem_virt2iova;
rte_memdump;
rte_memory_get_nchannel;
rte_memory_get_nrank;
@@ -344,7 +344,7 @@ rte_mempool_free_memchunks(struct rte_mempool *mp)
* on error.
*/
int
-rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
+rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
iova_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
void *opaque)
{
@@ -408,7 +408,7 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
* number of objects added, or a negative value on error.
*/
int
-rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
+rte_mempool_populate_iova_tab(struct rte_mempool *mp, char *vaddr,
const iova_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
{
@@ -421,7 +421,7 @@ rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
return -EEXIST;
if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG)
- return rte_mempool_populate_phys(mp, vaddr, RTE_BAD_PHYS_ADDR,
+ return rte_mempool_populate_iova(mp, vaddr, RTE_BAD_PHYS_ADDR,
pg_num * pg_sz, free_cb, opaque);
for (i = 0; i < pg_num && mp->populated_size < mp->size; i += n) {
@@ -431,7 +431,7 @@ rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
paddr[i + n - 1] + pg_sz == paddr[i + n]; n++)
;
- ret = rte_mempool_populate_phys(mp, vaddr + i * pg_sz,
+ ret = rte_mempool_populate_iova(mp, vaddr + i * pg_sz,
paddr[i], n * pg_sz, free_cb, opaque);
if (ret < 0) {
rte_mempool_free_memchunks(mp);
@@ -466,15 +466,15 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
return -EINVAL;
if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG)
- return rte_mempool_populate_phys(mp, addr, RTE_BAD_PHYS_ADDR,
+ return rte_mempool_populate_iova(mp, addr, RTE_BAD_PHYS_ADDR,
len, free_cb, opaque);
for (off = 0; off + pg_sz <= len &&
mp->populated_size < mp->size; off += phys_len) {
- paddr = rte_mem_virt2phy(addr + off);
+ paddr = rte_mem_virt2iova(addr + off);
/* required for xen_dom0 to get the machine address */
- paddr = rte_mem_phy2mch(-1, paddr);
+ paddr = rte_mem_phy2iova(-1, paddr);
if (paddr == RTE_BAD_PHYS_ADDR && rte_eal_has_hugepages()) {
ret = -EINVAL;
@@ -485,14 +485,14 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
for (phys_len = pg_sz; off + phys_len < len; phys_len += pg_sz) {
iova_addr_t paddr_tmp;
- paddr_tmp = rte_mem_virt2phy(addr + off + phys_len);
- paddr_tmp = rte_mem_phy2mch(-1, paddr_tmp);
+ paddr_tmp = rte_mem_virt2iova(addr + off + phys_len);
+ paddr_tmp = rte_mem_phy2iova(-1, paddr_tmp);
if (paddr_tmp != paddr + phys_len)
break;
}
- ret = rte_mempool_populate_phys(mp, addr + off, paddr,
+ ret = rte_mempool_populate_iova(mp, addr + off, paddr,
phys_len, free_cb, opaque);
if (ret < 0)
goto fail;
@@ -569,7 +569,7 @@ rte_mempool_populate_default(struct rte_mempool *mp)
paddr = mz->phys_addr;
if (rte_eal_has_hugepages() && !rte_xen_dom0_supported())
- ret = rte_mempool_populate_phys(mp, mz->addr,
+ ret = rte_mempool_populate_iova(mp, mz->addr,
paddr, mz->len,
rte_mempool_memchunk_mz_free,
(void *)(uintptr_t)mz);
@@ -954,7 +954,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
if (mp_init)
mp_init(mp, mp_init_arg);
- ret = rte_mempool_populate_phys_tab(mp, vaddr, paddr, pg_num, pg_shift,
+ ret = rte_mempool_populate_iova_tab(mp, vaddr, paddr, pg_num, pg_shift,
NULL, NULL);
if (ret < 0 || ret != (int)mp->size)
goto fail;
@@ -819,7 +819,7 @@ rte_mempool_free(struct rte_mempool *mp);
* On error, the chunk is not added in the memory list of the
* mempool and a negative errno is returned.
*/
-int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
+int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
iova_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
void *opaque);
@@ -850,7 +850,7 @@ int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
* On error, the chunks are not added in the memory list of the
* mempool and a negative errno is returned.
*/
-int rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
+int rte_mempool_populate_iova_tab(struct rte_mempool *mp, char *vaddr,
const iova_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
rte_mempool_memchunk_free_cb_t *free_cb, void *opaque);
@@ -34,8 +34,8 @@ DPDK_16.07 {
rte_mempool_ops_table;
rte_mempool_populate_anon;
rte_mempool_populate_default;
- rte_mempool_populate_phys;
- rte_mempool_populate_phys_tab;
+ rte_mempool_populate_iova;
+ rte_mempool_populate_iova_tab;
rte_mempool_populate_virt;
rte_mempool_register_ops;
rte_mempool_set_ops_byname;
@@ -453,7 +453,7 @@ add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
uint64_t host_phys_addr;
uint64_t size;
- host_phys_addr = rte_mem_virt2phy((void *)(uintptr_t)host_user_addr);
+ host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
size = page_size - (guest_phys_addr & (page_size - 1));
size = RTE_MIN(size, reg_size);
@@ -464,7 +464,7 @@ add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
while (reg_size > 0) {
size = RTE_MIN(reg_size, page_size);
- host_phys_addr = rte_mem_virt2phy((void *)(uintptr_t)
+ host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
host_user_addr);
add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size);
@@ -147,7 +147,7 @@ static void cmd_dump_parsed(void *parsed_result,
struct cmd_dump_result *res = parsed_result;
if (!strcmp(res->dump, "dump_physmem"))
- rte_dump_physmem_layout(stdout);
+ rte_dump_iovamem_layout(stdout);
else if (!strcmp(res->dump, "dump_memzone"))
rte_memzone_dump(stdout);
else if (!strcmp(res->dump, "dump_struct_sizes"))
@@ -741,7 +741,7 @@ test_malloc_bad_params(void)
static int
is_mem_on_socket(int32_t socket)
{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+ const struct rte_memseg *ms = rte_eal_get_iovamem_layout();
unsigned i;
for (i = 0; i < RTE_MAX_MEMSEG; i++) {
@@ -758,7 +758,7 @@ is_mem_on_socket(int32_t socket)
static int32_t
addr_to_socket(void * addr)
{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+ const struct rte_memseg *ms = rte_eal_get_iovamem_layout();
unsigned i;
for (i = 0; i < RTE_MAX_MEMSEG; i++) {
@@ -64,17 +64,17 @@ test_memory(void)
* that at least one line is dumped
*/
printf("Dump memory layout\n");
- rte_dump_physmem_layout(stdout);
+ rte_dump_iovamem_layout(stdout);
/* check that memory size is != 0 */
- s = rte_eal_get_physmem_size();
+ s = rte_eal_get_iovamem_size();
if (s == 0) {
printf("No memory detected\n");
return -1;
}
/* try to read memory (should not segfault) */
- mem = rte_eal_get_physmem_layout();
+ mem = rte_eal_get_iovamem_layout();
for (i = 0; i < RTE_MAX_MEMSEG && mem[i].addr != NULL ; i++) {
/* check memory */
@@ -145,9 +145,9 @@ test_mempool_basic(struct rte_mempool *mp, int use_external_cache)
MEMPOOL_HEADER_SIZE(mp, mp->cache_size))
GOTO_ERR(ret, out);
-#ifndef RTE_EXEC_ENV_BSDAPP /* rte_mem_virt2phy() not supported on bsd */
+#ifndef RTE_EXEC_ENV_BSDAPP /* rte_mem_virt2iova() not supported on bsd */
printf("get physical address of an object\n");
- if (rte_mempool_virt2phy(mp, obj) != rte_mem_virt2phy(obj))
+ if (rte_mempool_virt2phy(mp, obj) != rte_mem_virt2iova(obj))
GOTO_ERR(ret, out);
#endif
@@ -139,7 +139,7 @@ test_memzone_reserve_flags(void)
int hugepage_16GB_avail = 0;
const size_t size = 100;
int i = 0;
- ms = rte_eal_get_physmem_layout();
+ ms = rte_eal_get_iovamem_layout();
for (i = 0; i < RTE_MAX_MEMSEG; i++) {
if (ms[i].hugepage_sz == RTE_PGSIZE_2M)
hugepage_2MB_avail = 1;
@@ -422,7 +422,7 @@ test_memzone_reserve_max(void)
if (mz == NULL){
printf("Failed to reserve a big chunk of memory - %s\n",
rte_strerror(rte_errno));
- rte_dump_physmem_layout(stdout);
+ rte_dump_iovamem_layout(stdout);
rte_memzone_dump(stdout);
return -1;
}
@@ -430,7 +430,7 @@ test_memzone_reserve_max(void)
if (mz->len != maxlen) {
printf("Memzone reserve with 0 size did not return bigest block\n");
printf("Expected size = %zu, actual size = %zu\n", maxlen, mz->len);
- rte_dump_physmem_layout(stdout);
+ rte_dump_iovamem_layout(stdout);
rte_memzone_dump(stdout);
return -1;
}
@@ -459,7 +459,7 @@ test_memzone_reserve_max_aligned(void)
if (mz == NULL){
printf("Failed to reserve a big chunk of memory - %s\n",
rte_strerror(rte_errno));
- rte_dump_physmem_layout(stdout);
+ rte_dump_iovamem_layout(stdout);
rte_memzone_dump(stdout);
return -1;
}
@@ -469,7 +469,7 @@ test_memzone_reserve_max_aligned(void)
" bigest block\n", align);
printf("Expected size = %zu, actual size = %zu\n",
maxlen, mz->len);
- rte_dump_physmem_layout(stdout);
+ rte_dump_iovamem_layout(stdout);
rte_memzone_dump(stdout);
return -1;
}