[dpdk-dev] [PATCH v3 12/14] Add eal memory support for IBM Power Architecture

David Marchand david.marchand at 6wind.com
Mon Nov 24 16:17:48 CET 2014


Chao,

I think there are two remaining issues, in
lib/librte_eal/linuxapp/eal/eal.c
and lib/librte_eal/linuxapp/eal/eal_hugepage_info.c.
I will send a patch in reply to this patch.
I think it can be integrated into your patchset.

Thanks.

-- 
David Marchand

On Mon, Nov 24, 2014 at 2:22 AM, Chao Zhu <chaozhu at linux.vnet.ibm.com>
wrote:

> The mmap of hugepage files on IBM Power starts from high address to low
> address. This is different from x86. This patch modified the memory
> segment detection code to get the correct memory segment layout on Power
> architecture. This patch also added a commond ARCH_PPC_64 defination for
> 64 bit systems.
>
> Signed-off-by: Chao Zhu <chaozhu at linux.vnet.ibm.com>
> ---
>  config/defconfig_ppc_64-power8-linuxapp-gcc   |    1 +
>  config/defconfig_x86_64-native-linuxapp-clang |    1 +
>  config/defconfig_x86_64-native-linuxapp-gcc   |    1 +
>  config/defconfig_x86_64-native-linuxapp-icc   |    1 +
>  lib/librte_eal/linuxapp/eal/eal_memory.c      |   75
> ++++++++++++++++++-------
>  5 files changed, 59 insertions(+), 20 deletions(-)
>
> diff --git a/config/defconfig_ppc_64-power8-linuxapp-gcc
> b/config/defconfig_ppc_64-power8-linuxapp-gcc
> index b10f60c..23a5591 100644
> --- a/config/defconfig_ppc_64-power8-linuxapp-gcc
> +++ b/config/defconfig_ppc_64-power8-linuxapp-gcc
> @@ -35,6 +35,7 @@ CONFIG_RTE_MACHINE="power8"
>  CONFIG_RTE_ARCH="ppc_64"
>  CONFIG_RTE_ARCH_PPC_64=y
>  CONFIG_RTE_ARCH_BIG_ENDIAN=y
> +CONFIG_RTE_ARCH_64=y
>
>  CONFIG_RTE_TOOLCHAIN="gcc"
>  CONFIG_RTE_TOOLCHAIN_GCC=y
> diff --git a/config/defconfig_x86_64-native-linuxapp-clang
> b/config/defconfig_x86_64-native-linuxapp-clang
> index bbda080..5f3074e 100644
> --- a/config/defconfig_x86_64-native-linuxapp-clang
> +++ b/config/defconfig_x86_64-native-linuxapp-clang
> @@ -36,6 +36,7 @@ CONFIG_RTE_MACHINE="native"
>
>  CONFIG_RTE_ARCH="x86_64"
>  CONFIG_RTE_ARCH_X86_64=y
> +CONFIG_RTE_ARCH_64=y
>
>  CONFIG_RTE_TOOLCHAIN="clang"
>  CONFIG_RTE_TOOLCHAIN_CLANG=y
> diff --git a/config/defconfig_x86_64-native-linuxapp-gcc
> b/config/defconfig_x86_64-native-linuxapp-gcc
> index 3de818a..60baf5b 100644
> --- a/config/defconfig_x86_64-native-linuxapp-gcc
> +++ b/config/defconfig_x86_64-native-linuxapp-gcc
> @@ -36,6 +36,7 @@ CONFIG_RTE_MACHINE="native"
>
>  CONFIG_RTE_ARCH="x86_64"
>  CONFIG_RTE_ARCH_X86_64=y
> +CONFIG_RTE_ARCH_64=y
>
>  CONFIG_RTE_TOOLCHAIN="gcc"
>  CONFIG_RTE_TOOLCHAIN_GCC=y
> diff --git a/config/defconfig_x86_64-native-linuxapp-icc
> b/config/defconfig_x86_64-native-linuxapp-icc
> index 795333b..71d1e28 100644
> --- a/config/defconfig_x86_64-native-linuxapp-icc
> +++ b/config/defconfig_x86_64-native-linuxapp-icc
> @@ -36,6 +36,7 @@ CONFIG_RTE_MACHINE="native"
>
>  CONFIG_RTE_ARCH="x86_64"
>  CONFIG_RTE_ARCH_X86_64=y
> +CONFIG_RTE_ARCH_64=y
>
>  CONFIG_RTE_TOOLCHAIN="icc"
>  CONFIG_RTE_TOOLCHAIN_ICC=y
> diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c
> b/lib/librte_eal/linuxapp/eal/eal_memory.c
> index f2454f4..a8e7421 100644
> --- a/lib/librte_eal/linuxapp/eal/eal_memory.c
> +++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
> @@ -316,11 +316,11 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>  #endif
>
> hugepg_tbl[i].filepath[sizeof(hugepg_tbl[i].filepath) - 1] = '\0';
>                 }
> -#ifndef RTE_ARCH_X86_64
> -               /* for 32-bit systems, don't remap 1G pages, just reuse
> original
> +#ifndef RTE_ARCH_64
> +               /* for 32-bit systems, don't remap 1G and 16G pages, just
> reuse original
>                  * map address as final map address.
>                  */
> -               else if (hugepage_sz == RTE_PGSIZE_1G){
> +               else if ((hugepage_sz == RTE_PGSIZE_1G) || (hugepage_sz ==
> RTE_PGSIZE_16G)){
>                         hugepg_tbl[i].final_va = hugepg_tbl[i].orig_va;
>                         hugepg_tbl[i].orig_va = NULL;
>                         continue;
> @@ -335,9 +335,16 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>                          * physical block: count the number of
>                          * contiguous physical pages. */
>                         for (j = i+1; j < hpi->num_pages[0] ; j++) {
> +#ifdef RTE_ARCH_PPC_64
> +                /* The physical addresses are sorted in descending order
> on PPC64 */
> +                               if (hugepg_tbl[j].physaddr !=
> +                                   hugepg_tbl[j-1].physaddr - hugepage_sz)
> +                                       break;
> +#else
>                                 if (hugepg_tbl[j].physaddr !=
>                                     hugepg_tbl[j-1].physaddr + hugepage_sz)
>                                         break;
> +#endif
>                         }
>                         num_pages = j - i;
>                         vma_len = num_pages * hugepage_sz;
> @@ -412,11 +419,11 @@ remap_all_hugepages(struct hugepage_file
> *hugepg_tbl, struct hugepage_info *hpi)
>
>         while (i < hpi->num_pages[0]) {
>
> -#ifndef RTE_ARCH_X86_64
> -               /* for 32-bit systems, don't remap 1G pages, just reuse
> original
> +#ifndef RTE_ARCH_64
> +               /* for 32-bit systems, don't remap 1G pages and 16G pages,
> just reuse original
>                  * map address as final map address.
>                  */
> -               if (hugepage_sz == RTE_PGSIZE_1G){
> +               if ((hugepage_sz == RTE_PGSIZE_1G) || (hugepage_sz ==
> RTE_PGSIZE_16G)){
>                         hugepg_tbl[i].final_va = hugepg_tbl[i].orig_va;
>                         hugepg_tbl[i].orig_va = NULL;
>                         i++;
> @@ -428,9 +435,15 @@ remap_all_hugepages(struct hugepage_file *hugepg_tbl,
> struct hugepage_info *hpi)
>                  * physical block: count the number of
>                  * contiguous physical pages. */
>                 for (j = i+1; j < hpi->num_pages[0] ; j++) {
> +#ifdef RTE_ARCH_PPC_64
> +            /*  The physical addresses are sorted in descending order on
> PPC64 */
> +                       if (hugepg_tbl[j].physaddr !=
> hugepg_tbl[j-1].physaddr - hugepage_sz)
> +                               break;
> +#else
>                         if (hugepg_tbl[j].physaddr !=
> hugepg_tbl[j-1].physaddr + hugepage_sz)
>                                 break;
> -               }
> +#endif
> +       }
>                 num_pages = j - i;
>                 vma_len = num_pages * hugepage_sz;
>
> @@ -652,21 +665,21 @@ error:
>  }
>
>  /*
> - * Sort the hugepg_tbl by physical address (lower addresses first). We
> - * use a slow algorithm, but we won't have millions of pages, and this
> + * Sort the hugepg_tbl by physical address (lower addresses first on x86,
> higher address first
> + * on powerpc). We use a slow algorithm, but we won't have millions of
> pages, and this
>   * is only done at init time.
>   */
>  static int
>  sort_by_physaddr(struct hugepage_file *hugepg_tbl, struct hugepage_info
> *hpi)
>  {
>         unsigned i, j;
> -       int smallest_idx;
> -       uint64_t smallest_addr;
> +       int compare_idx;
> +       uint64_t compare_addr;
>         struct hugepage_file tmp;
>
>         for (i = 0; i < hpi->num_pages[0]; i++) {
> -               smallest_addr = 0;
> -               smallest_idx = -1;
> +               compare_addr = 0;
> +               compare_idx = -1;
>
>                 /*
>                  * browse all entries starting at 'i', and find the
> @@ -674,22 +687,26 @@ sort_by_physaddr(struct hugepage_file *hugepg_tbl,
> struct hugepage_info *hpi)
>                  */
>                 for (j=i; j< hpi->num_pages[0]; j++) {
>
> -                       if (smallest_addr == 0 ||
> -                           hugepg_tbl[j].physaddr < smallest_addr) {
> -                               smallest_addr = hugepg_tbl[j].physaddr;
> -                               smallest_idx = j;
> +                       if (compare_addr == 0 ||
> +#ifdef RTE_ARCH_PPC_64
> +                           hugepg_tbl[j].physaddr > compare_addr) {
> +#else
> +                           hugepg_tbl[j].physaddr < compare_addr) {
> +#endif
> +                compare_addr = hugepg_tbl[j].physaddr;
> +                compare_idx = j;
>                         }
>                 }
>
>                 /* should not happen */
> -               if (smallest_idx == -1) {
> +               if (compare_idx == -1) {
>                         RTE_LOG(ERR, EAL, "%s(): error in physaddr
> sorting\n", __func__);
>                         return -1;
>                 }
>
>                 /* swap the 2 entries in the table */
> -               memcpy(&tmp, &hugepg_tbl[smallest_idx], sizeof(struct
> hugepage_file));
> -               memcpy(&hugepg_tbl[smallest_idx], &hugepg_tbl[i],
> +               memcpy(&tmp, &hugepg_tbl[compare_idx], sizeof(struct
> hugepage_file));
> +               memcpy(&hugepg_tbl[compare_idx], &hugepg_tbl[i],
>                                 sizeof(struct hugepage_file));
>                 memcpy(&hugepg_tbl[i], &tmp, sizeof(struct hugepage_file));
>         }
> @@ -1260,12 +1277,24 @@ rte_eal_hugepage_init(void)
>                         new_memseg = 1;
>                 else if (hugepage[i].size != hugepage[i-1].size)
>                         new_memseg = 1;
> +
> +#ifdef RTE_ARCH_PPC_64
> +               /* On PPC64 architecture, the mmap always start from
> higher virtual address to lower address.
> +        * Here, both the physical address and virtual address are in
> descending order */
> +               else if ((hugepage[i-1].physaddr - hugepage[i].physaddr) !=
> +                   hugepage[i].size)
> +                       new_memseg = 1;
> +               else if (((unsigned long)hugepage[i-1].final_va -
> +                   (unsigned long)hugepage[i].final_va) !=
> hugepage[i].size)
> +                       new_memseg = 1;
> +#else
>                 else if ((hugepage[i].physaddr - hugepage[i-1].physaddr) !=
>                     hugepage[i].size)
>                         new_memseg = 1;
>                 else if (((unsigned long)hugepage[i].final_va -
>                     (unsigned long)hugepage[i-1].final_va) !=
> hugepage[i].size)
>                         new_memseg = 1;
> +#endif
>
>                 if (new_memseg) {
>                         j += 1;
> @@ -1284,6 +1313,12 @@ rte_eal_hugepage_init(void)
>                 }
>                 /* continuation of previous memseg */
>                 else {
> +#ifdef RTE_ARCH_PPC_64
> +               /* Use the phy and virt address of the last page as
> segment address
> +                 * for IBM Power architecture */
> +                       mcfg->memseg[j].phys_addr = hugepage[i].physaddr;
> +                       mcfg->memseg[j].addr = hugepage[i].final_va;
> +#endif
>                         mcfg->memseg[j].len += mcfg->memseg[j].hugepage_sz;
>                 }
>                 hugepage[i].memseg_id = j;
> --
> 1.7.1
>
>


More information about the dev mailing list