[dpdk-dev] arch/arm: optimization for memcpy on AArch64

Message ID 1511768985-21639-1-git-send-email-herbert.guan@arm.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation success Compilation OK

Commit Message

Herbert Guan Nov. 27, 2017, 7:49 a.m. UTC
  This patch provides an option to do rte_memcpy() using 'restrict'
qualifier, which can induce GCC to do optimizations by using more
efficient instructions, providing some performance gain over memcpy()
on some AArch64 platforms/enviroments.

The memory copy performance differs between different AArch64
platforms. And a more recent glibc (e.g. 2.23 or later)
can provide a better memcpy() performance compared to old glibc
versions. It's always suggested to use a more recent glibc if
possible, from which the entire system can get benefit. If for some
reason an old glibc has to be used, this patch is provided for an
alternative.

This implementation can improve memory copy on some AArch64
platforms, when an old glibc (e.g. 2.19, 2.17...) is being used.
It is disabled by default and needs "RTE_ARCH_ARM64_MEMCPY"
defined to activate. It's not always proving better performance
than memcpy() so users need to run DPDK unit test
"memcpy_perf_autotest" and customize parameters in "customization
section" in rte_memcpy_64.h for best performance.

Compiler version will also impact the rte_memcpy() performance.
It's observed on some platforms and with the same code, GCC 7.2.0
compiled binary can provide better performance than GCC 4.8.5. It's
suggested to use GCC 5.4.0 or later.

Signed-off-by: Herbert Guan <herbert.guan@arm.com>
---
 .../common/include/arch/arm/rte_memcpy_64.h        | 193 +++++++++++++++++++++
 1 file changed, 193 insertions(+)
  

Comments

Jerin Jacob Nov. 29, 2017, 12:31 p.m. UTC | #1
-----Original Message-----
> Date: Mon, 27 Nov 2017 15:49:45 +0800
> From: Herbert Guan <herbert.guan@arm.com>
> To: jerin.jacob@caviumnetworks.com, jianbo.liu@arm.com, dev@dpdk.org
> CC: Herbert Guan <herbert.guan@arm.com>
> Subject: [PATCH] arch/arm: optimization for memcpy on AArch64
> X-Mailer: git-send-email 1.8.3.1
> +
> +/**************************************
> + * Beginning of customization section
> + **************************************/
> +#define ALIGNMENT_MASK 0x0F
> +#ifndef RTE_ARCH_ARM64_MEMCPY_STRICT_ALIGN
> +// Only src unalignment will be treaed as unaligned copy

C++ style comments. It may generate check patch errors.

> +#define IS_UNALIGNED_COPY(dst, src) ((uintptr_t)(dst) & ALIGNMENT_MASK)
> +#else
> +// Both dst and src unalignment will be treated as unaligned copy
> +#define IS_UNALIGNED_COPY(dst, src) \
> +		(((uintptr_t)(dst) | (uintptr_t)(src)) & ALIGNMENT_MASK)
> +#endif
> +
> +
> +// If copy size is larger than threshold, memcpy() will be used.
> +// Run "memcpy_perf_autotest" to determine the proper threshold.
> +#define ALIGNED_THRESHOLD       ((size_t)(0xffffffff))
> +#define UNALIGNED_THRESHOLD     ((size_t)(0xffffffff))

Do you see any case where this threshold is useful.

> +
> +static inline void *__attribute__ ((__always_inline__))
> +rte_memcpy(void *restrict dst, const void *restrict src, size_t n)
> +{
> +	if (n < 16) {
> +		rte_memcpy_lt16((uint8_t *)dst, (const uint8_t *)src, n);
> +		return dst;
> +	}
> +	if (n < 64) {
> +		rte_memcpy_ge16_lt64((uint8_t *)dst, (const uint8_t *)src, n);
> +		return dst;
> +	}

Unfortunately we have 128B cache arm64 implementation too. Could you 
please take care that based on RTE_CACHE_LINE_SIZE

> +	__builtin_prefetch(src, 0, 0);
> +	__builtin_prefetch(dst, 1, 0);

See above point and Please use DPDK equivalents. rte_prefetch*()
  
Pavan Nikhilesh Dec. 2, 2017, 7:33 a.m. UTC | #2
On Mon, Nov 27, 2017 at 03:49:45PM +0800, Herbert Guan wrote:
> This patch provides an option to do rte_memcpy() using 'restrict'
> qualifier, which can induce GCC to do optimizations by using more
> efficient instructions, providing some performance gain over memcpy()
> on some AArch64 platforms/enviroments.
>
> The memory copy performance differs between different AArch64
> platforms. And a more recent glibc (e.g. 2.23 or later)
> can provide a better memcpy() performance compared to old glibc
> versions. It's always suggested to use a more recent glibc if
> possible, from which the entire system can get benefit. If for some
> reason an old glibc has to be used, this patch is provided for an
> alternative.
>
> This implementation can improve memory copy on some AArch64
> platforms, when an old glibc (e.g. 2.19, 2.17...) is being used.
> It is disabled by default and needs "RTE_ARCH_ARM64_MEMCPY"
> defined to activate. It's not always proving better performance
> than memcpy() so users need to run DPDK unit test
> "memcpy_perf_autotest" and customize parameters in "customization
> section" in rte_memcpy_64.h for best performance.
>
> Compiler version will also impact the rte_memcpy() performance.
> It's observed on some platforms and with the same code, GCC 7.2.0
> compiled binary can provide better performance than GCC 4.8.5. It's
> suggested to use GCC 5.4.0 or later.
>
> Signed-off-by: Herbert Guan <herbert.guan@arm.com>
> ---
>  .../common/include/arch/arm/rte_memcpy_64.h        | 193 +++++++++++++++++++++
>  1 file changed, 193 insertions(+)
>
> diff --git a/lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h b/lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h
> index b80d8ba..1f42b3c 100644
> --- a/lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h
> +++ b/lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h
> @@ -42,6 +42,197 @@
>
>  #include "generic/rte_memcpy.h"
>
> +#ifdef RTE_ARCH_ARM64_MEMCPY

There is an existing flag for arm32 to enable neon based memcpy
RTE_ARCH_ARM_NEON_MEMCPY we could reuse that here as restrict does the same.

> +#include <rte_common.h>
> +#include <rte_branch_prediction.h>
> +
> +/*******************************************************************************
> + * The memory copy performance differs on different AArch64 micro-architectures.
> + * And the most recent glibc (e.g. 2.23 or later) can provide a better memcpy()
> + * performance compared to old glibc versions. It's always suggested to use a
> + * more recent glibc if possible, from which the entire system can get benefit.
> + *
> + * This implementation improves memory copy on some aarch64 micro-architectures,
> + * when an old glibc (e.g. 2.19, 2.17...) is being used. It is disabled by
> + * default and needs "RTE_ARCH_ARM64_MEMCPY" defined to activate. It's not
> + * always providing better performance than memcpy() so users need to run unit
> + * test "memcpy_perf_autotest" and customize parameters in customization section
> + * below for best performance.
> + *
> + * Compiler version will also impact the rte_memcpy() performance. It's observed
> + * on some platforms and with the same code, GCC 7.2.0 compiled binaries can
> + * provide better performance than GCC 4.8.5 compiled binaries.
> + ******************************************************************************/
> +
> +/**************************************
> + * Beginning of customization section
> + **************************************/
> +#define ALIGNMENT_MASK 0x0F
> +#ifndef RTE_ARCH_ARM64_MEMCPY_STRICT_ALIGN
> +// Only src unalignment will be treaed as unaligned copy
> +#define IS_UNALIGNED_COPY(dst, src) ((uintptr_t)(dst) & ALIGNMENT_MASK)

We can use existing `rte_is_aligned` function instead.

> +#else
> +// Both dst and src unalignment will be treated as unaligned copy
> +#define IS_UNALIGNED_COPY(dst, src) \
> +		(((uintptr_t)(dst) | (uintptr_t)(src)) & ALIGNMENT_MASK)
> +#endif
> +
> +
> +// If copy size is larger than threshold, memcpy() will be used.
> +// Run "memcpy_perf_autotest" to determine the proper threshold.
> +#define ALIGNED_THRESHOLD       ((size_t)(0xffffffff))
> +#define UNALIGNED_THRESHOLD     ((size_t)(0xffffffff))
> +
> +
> +/**************************************
> + * End of customization section
> + **************************************/
> +#ifdef RTE_TOOLCHAIN_GCC
> +#if (GCC_VERSION < 50400)
> +#warning "The GCC version is quite old, which may result in sub-optimal \
> +performance of the compiled code. It is suggested that at least GCC 5.4.0 \
> +be used."
> +#endif
> +#endif
> +
> +static inline void __attribute__ ((__always_inline__))
use __rte_always_inline instead.
> +rte_mov16(uint8_t *restrict dst, const uint8_t *restrict src)
> +{
> +	__int128 * restrict dst128 = (__int128 * restrict)dst;
> +	const __int128 * restrict src128 = (const __int128 * restrict)src;
> +	*dst128 = *src128;
> +}
> +
> +static inline void __attribute__ ((__always_inline__))
> +rte_mov64(uint8_t *restrict dst, const uint8_t *restrict src)
> +{
> +	__int128 * restrict dst128 = (__int128 * restrict)dst;

ISO C does not support ‘__int128’ please use '__int128_t' or '__uint128_t'.

> +	const __int128 * restrict src128 = (const __int128 * restrict)src;
> +	dst128[0] = src128[0];
> +	dst128[1] = src128[1];
> +	dst128[2] = src128[2];
> +	dst128[3] = src128[3];
> +}
> +
<snip>

Would doing this still benifit if size is compile time constant? i.e. when
__builtin_constant_p(n) is true.

> +
> +static inline void *__attribute__ ((__always_inline__))
> +rte_memcpy(void *restrict dst, const void *restrict src, size_t n)
> +{
> +	if (n < 16) {
> +		rte_memcpy_lt16((uint8_t *)dst, (const uint8_t *)src, n);
> +		return dst;
> +	}
> +	if (n < 64) {
> +		rte_memcpy_ge16_lt64((uint8_t *)dst, (const uint8_t *)src, n);
> +		return dst;
> +	}
> +	__builtin_prefetch(src, 0, 0);
> +	__builtin_prefetch(dst, 1, 0);
> +	if (likely(
> +		  (!IS_UNALIGNED_COPY(dst, src) && n <= ALIGNED_THRESHOLD)
> +		   || (IS_UNALIGNED_COPY(dst, src) && n <= UNALIGNED_THRESHOLD)
> +		  )) {
> +		rte_memcpy_ge64((uint8_t *)dst, (const uint8_t *)src, n);
> +		return dst;
> +	} else
> +		return memcpy(dst, src, n);
> +}
> +
> +
> +#else
>  static inline void
>  rte_mov16(uint8_t *dst, const uint8_t *src)
>  {
> @@ -80,6 +271,8 @@
>
>  #define rte_memcpy(d, s, n)	memcpy((d), (s), (n))
>
> +#endif
> +
>  #ifdef __cplusplus
>  }
>  #endif
> --
> 1.8.3.1
>
Regards,
Pavan.
  
Herbert Guan Dec. 3, 2017, 12:37 p.m. UTC | #3
Jerin,

Thanks a lot for your review and comments.  Please find my comments below inline.

Best regards,
Herbert

> -----Original Message-----
> From: Jerin Jacob [mailto:jerin.jacob@caviumnetworks.com]
> Sent: Wednesday, November 29, 2017 20:32
> To: Herbert Guan <Herbert.Guan@arm.com>
> Cc: Jianbo Liu <Jianbo.Liu@arm.com>; dev@dpdk.org
> Subject: Re: [PATCH] arch/arm: optimization for memcpy on AArch64
>
> -----Original Message-----
> > Date: Mon, 27 Nov 2017 15:49:45 +0800
> > From: Herbert Guan <herbert.guan@arm.com>
> > To: jerin.jacob@caviumnetworks.com, jianbo.liu@arm.com, dev@dpdk.org
> > CC: Herbert Guan <herbert.guan@arm.com>
> > Subject: [PATCH] arch/arm: optimization for memcpy on AArch64
> > X-Mailer: git-send-email 1.8.3.1
> > +
> > +/**************************************
> > + * Beginning of customization section
> > +**************************************/
> > +#define ALIGNMENT_MASK 0x0F
> > +#ifndef RTE_ARCH_ARM64_MEMCPY_STRICT_ALIGN
> > +// Only src unalignment will be treaed as unaligned copy
>
> C++ style comments. It may generate check patch errors.

I'll change it to use C style comment in the version 2.

>
> > +#define IS_UNALIGNED_COPY(dst, src) ((uintptr_t)(dst) &
> > +ALIGNMENT_MASK) #else // Both dst and src unalignment will be treated
> > +as unaligned copy #define IS_UNALIGNED_COPY(dst, src) \
> > +(((uintptr_t)(dst) | (uintptr_t)(src)) & ALIGNMENT_MASK)
> #endif
> > +
> > +
> > +// If copy size is larger than threshold, memcpy() will be used.
> > +// Run "memcpy_perf_autotest" to determine the proper threshold.
> > +#define ALIGNED_THRESHOLD       ((size_t)(0xffffffff))
> > +#define UNALIGNED_THRESHOLD     ((size_t)(0xffffffff))
>
> Do you see any case where this threshold is useful.

Yes, on some platforms, and/or with some glibc version,  the glibc memcpy has better performance in larger size (e.g., >512, >4096...).  So developers should run unit test to find the best threshold.  The default value of 0xffffffff should be modified with evaluated values.

>
> > +
> > +static inline void *__attribute__ ((__always_inline__))
> > +rte_memcpy(void *restrict dst, const void *restrict src, size_t n)
> > +{
> > +if (n < 16) {
> > +rte_memcpy_lt16((uint8_t *)dst, (const uint8_t *)src, n);
> > +return dst;
> > +}
> > +if (n < 64) {
> > +rte_memcpy_ge16_lt64((uint8_t *)dst, (const uint8_t *)src,
> n);
> > +return dst;
> > +}
>
> Unfortunately we have 128B cache arm64 implementation too. Could you
> please take care that based on RTE_CACHE_LINE_SIZE
>

Here the value of '64' is not the cache line size.  But for the reason that prefetch itself will cost some cycles, it's not worthwhile to do prefetch for small size (e.g. < 64 bytes) copy.  Per my test, prefetching for small size copy will actually lower the performance.

In the other hand, I can only find one 128B cache line aarch64 machine here.  And it do exist some specific optimization for this machine.  Not sure if it'll be beneficial for other 128B cache machines or not.  I prefer not to put it in this patch but in a later standalone specific patch for 128B cache machines.

> > +__builtin_prefetch(src, 0, 0);  // rte_prefetch_non_temporal(src);
> > +__builtin_prefetch(dst, 1, 0);  //  * unchanged *
>
> See above point and Please use DPDK equivalents. rte_prefetch*()

I can use the " rte_prefetch_non_temporal()" for read prefetch.  However, there's no DPDK equivalents for the write prefetch.  Would you suggest that we add one API for DPDK?
BTW, the current DPDK rte_prefetch*() are using ASM instructions.  It might be better to use __builtin_prefetch(src, 0, 0/1/2/3) for better compatibility of future aarch64 architectures.

IMPORTANT NOTICE: The contents of this email and any attachments are confidential and may also be privileged. If you are not the intended recipient, please notify the sender immediately and do not disclose the contents to any other person, use it for any purpose, or store or copy the information in any medium. Thank you.
  
Herbert Guan Dec. 3, 2017, 12:38 p.m. UTC | #4
Pavan,

Thanks for review and comments.  Please find my comments inline below.

Best regards,
Herbert

> -----Original Message-----

> From: Pavan Nikhilesh Bhagavatula

> [mailto:pbhagavatula@caviumnetworks.com]

> Sent: Saturday, December 2, 2017 15:33

> To: Herbert Guan <Herbert.Guan@arm.com>; Jianbo Liu

> <Jianbo.Liu@arm.com>

> Cc: dev@dpdk.org

> Subject: Re: [dpdk-dev] [PATCH] arch/arm: optimization for memcpy on

> AArch64

>

> On Mon, Nov 27, 2017 at 03:49:45PM +0800, Herbert Guan wrote:

> > This patch provides an option to do rte_memcpy() using 'restrict'

> > qualifier, which can induce GCC to do optimizations by using more

> > efficient instructions, providing some performance gain over memcpy()

> > on some AArch64 platforms/enviroments.

> >

> > The memory copy performance differs between different AArch64

> > platforms. And a more recent glibc (e.g. 2.23 or later) can provide a

> > better memcpy() performance compared to old glibc versions. It's

> > always suggested to use a more recent glibc if possible, from which

> > the entire system can get benefit. If for some reason an old glibc has

> > to be used, this patch is provided for an alternative.

> >

> > This implementation can improve memory copy on some AArch64

> platforms,

> > when an old glibc (e.g. 2.19, 2.17...) is being used.

> > It is disabled by default and needs "RTE_ARCH_ARM64_MEMCPY"

> > defined to activate. It's not always proving better performance than

> > memcpy() so users need to run DPDK unit test "memcpy_perf_autotest"

> > and customize parameters in "customization section" in rte_memcpy_64.h

> > for best performance.

> >

> > Compiler version will also impact the rte_memcpy() performance.

> > It's observed on some platforms and with the same code, GCC 7.2.0

> > compiled binary can provide better performance than GCC 4.8.5. It's

> > suggested to use GCC 5.4.0 or later.

> >

> > Signed-off-by: Herbert Guan <herbert.guan@arm.com>

> > ---

> >  .../common/include/arch/arm/rte_memcpy_64.h        | 193

> +++++++++++++++++++++

> >  1 file changed, 193 insertions(+)

> >

> > diff --git a/lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h

> > b/lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h

> > index b80d8ba..1f42b3c 100644

> > --- a/lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h

> > +++ b/lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h

> > @@ -42,6 +42,197 @@

> >

> >  #include "generic/rte_memcpy.h"

> >

> > +#ifdef RTE_ARCH_ARM64_MEMCPY

>

> There is an existing flag for arm32 to enable neon based memcpy

> RTE_ARCH_ARM_NEON_MEMCPY we could reuse that here as restrict does

> the same.

>

This implementation is actually not using ARM NEON instructions so the existing flag is not describing the option exactly.  It'll be good if the existing flag is "RTE_ARCH_ARM_MEMCPY" but unfortunately it might be too late now to get the flags aligned.

> > +#include <rte_common.h>

> > +#include <rte_branch_prediction.h>

> > +

> >

> +/*********************************************************

> ***********

> > +***********

> > + * The memory copy performance differs on different AArch64 micro-

> architectures.

> > + * And the most recent glibc (e.g. 2.23 or later) can provide a

> > +better memcpy()

> > + * performance compared to old glibc versions. It's always suggested

> > +to use a

> > + * more recent glibc if possible, from which the entire system can get

> benefit.

> > + *

> > + * This implementation improves memory copy on some aarch64

> > +micro-architectures,

> > + * when an old glibc (e.g. 2.19, 2.17...) is being used. It is

> > +disabled by

> > + * default and needs "RTE_ARCH_ARM64_MEMCPY" defined to activate.

> > +It's not

> > + * always providing better performance than memcpy() so users need to

> > +run unit

> > + * test "memcpy_perf_autotest" and customize parameters in

> > +customization section

> > + * below for best performance.

> > + *

> > + * Compiler version will also impact the rte_memcpy() performance.

> > +It's observed

> > + * on some platforms and with the same code, GCC 7.2.0 compiled

> > +binaries can

> > + * provide better performance than GCC 4.8.5 compiled binaries.

> > +

> >

> +*********************************************************

> ************

> > +*********/

> > +

> > +/**************************************

> > + * Beginning of customization section

> > +**************************************/

> > +#define ALIGNMENT_MASK 0x0F

> > +#ifndef RTE_ARCH_ARM64_MEMCPY_STRICT_ALIGN

> > +// Only src unalignment will be treaed as unaligned copy #define

> > +IS_UNALIGNED_COPY(dst, src) ((uintptr_t)(dst) & ALIGNMENT_MASK)

>

> We can use existing `rte_is_aligned` function instead.


The exising 'rte_is_aligned()' inline function is defined in a relatively complex way, and there will be more instructions generated (using GCC 7.2.0):

0000000000000000 <align_check_rte>:   // using rte_is_aligned()
   0:91003c01 addx1, x0, #0xf
   4:927cec21 andx1, x1, #0xfffffffffffffff0
   8:eb01001f cmpx0, x1
   c:1a9f07e0 csetw0, ne  // ne = any
  10:d65f03c0 ret
  14:d503201f nop

0000000000000018 <align_check_simp>:   // using above expression
  18:12000c00 andw0, w0, #0xf
  1c:d65f03c0 ret

So to get better performance, it's better to use the simple logic.


>

> > +#else

> > +// Both dst and src unalignment will be treated as unaligned copy

> > +#define IS_UNALIGNED_COPY(dst, src) \

> > +(((uintptr_t)(dst) | (uintptr_t)(src)) & ALIGNMENT_MASK)

> #endif

> > +

> > +

> > +// If copy size is larger than threshold, memcpy() will be used.

> > +// Run "memcpy_perf_autotest" to determine the proper threshold.

> > +#define ALIGNED_THRESHOLD       ((size_t)(0xffffffff))

> > +#define UNALIGNED_THRESHOLD     ((size_t)(0xffffffff))

> > +

> > +

> > +/**************************************

> > + * End of customization section

> > + **************************************/

> > +#ifdef RTE_TOOLCHAIN_GCC

> > +#if (GCC_VERSION < 50400)

> > +#warning "The GCC version is quite old, which may result in

> > +sub-optimal \ performance of the compiled code. It is suggested that

> > +at least GCC 5.4.0 \ be used."

> > +#endif

> > +#endif

> > +

> > +static inline void __attribute__ ((__always_inline__))

> use __rte_always_inline instead.

> > +rte_mov16(uint8_t *restrict dst, const uint8_t *restrict src) {

> > +__int128 * restrict dst128 = (__int128 * restrict)dst;

> > +const __int128 * restrict src128 = (const __int128 * restrict)src;

> > +*dst128 = *src128;

> > +}

> > +

> > +static inline void __attribute__ ((__always_inline__))

> > +rte_mov64(uint8_t *restrict dst, const uint8_t *restrict src) {

> > +__int128 * restrict dst128 = (__int128 * restrict)dst;

>

> ISO C does not support ‘__int128’ please use '__int128_t' or '__uint128_t'.


Very good point.  Thanks for this reminding and I'll update to use '__uint128_t' in the next version.

>

> > +const __int128 * restrict src128 = (const __int128 * restrict)src;

> > +dst128[0] = src128[0];

> > +dst128[1] = src128[1];

> > +dst128[2] = src128[2];

> > +dst128[3] = src128[3];

> > +}

> > +

> <snip>

>

> Would doing this still benifit if size is compile time constant? i.e. when

> __builtin_constant_p(n) is true.

>

Yes, performance margin is observed if size is compile time constant on some tested platforms.

> > +

> > +static inline void *__attribute__ ((__always_inline__))

> > +rte_memcpy(void *restrict dst, const void *restrict src, size_t n) {

> > +if (n < 16) {

> > +rte_memcpy_lt16((uint8_t *)dst, (const uint8_t *)src, n);

> > +return dst;

> > +}

> > +if (n < 64) {

> > +rte_memcpy_ge16_lt64((uint8_t *)dst, (const uint8_t *)src,

> n);

> > +return dst;

> > +}

> > +__builtin_prefetch(src, 0, 0);

> > +__builtin_prefetch(dst, 1, 0);

> > +if (likely(

> > +  (!IS_UNALIGNED_COPY(dst, src) && n <=

> ALIGNED_THRESHOLD)

> > +   || (IS_UNALIGNED_COPY(dst, src) && n <=

> UNALIGNED_THRESHOLD)

> > +  )) {

> > +rte_memcpy_ge64((uint8_t *)dst, (const uint8_t *)src, n);

> > +return dst;

> > +} else

> > +return memcpy(dst, src, n);

> > +}

> > +

> > +

> > +#else

> >  static inline void

> >  rte_mov16(uint8_t *dst, const uint8_t *src)  { @@ -80,6 +271,8 @@

> >

> >  #define rte_memcpy(d, s, n)memcpy((d), (s), (n))

> >

> > +#endif

> > +

> >  #ifdef __cplusplus

> >  }

> >  #endif

> > --

> > 1.8.3.1

> >

> Regards,

> Pavan.

IMPORTANT NOTICE: The contents of this email and any attachments are confidential and may also be privileged. If you are not the intended recipient, please notify the sender immediately and do not disclose the contents to any other person, use it for any purpose, or store or copy the information in any medium. Thank you.
  
Pavan Nikhilesh Dec. 3, 2017, 2:20 p.m. UTC | #5
On Sun, Dec 03, 2017 at 12:38:35PM +0000, Herbert Guan wrote:
> Pavan,
>
> Thanks for review and comments.  Please find my comments inline below.
>
> Best regards,
> Herbert
>
<snip>
> > There is an existing flag for arm32 to enable neon based memcpy
> > RTE_ARCH_ARM_NEON_MEMCPY we could reuse that here as restrict does
> > the same.
> >
> This implementation is actually not using ARM NEON instructions so the existing flag is not describing the option exactly.  It'll be good if the existing flag is "RTE_ARCH_ARM_MEMCPY" but unfortunately it might be too late now to get the flags aligned.
>

Correct me if I'm wrong but doesn't restrict tell the compiler to do SIMD
optimization?
Anyway can we put RTE_ARCH_ARM64_MEMCPY into config/common_base as
CONFIG_RTE_ARCH_ARM64_MEMCPY=n so that it would be easier to enable/disable.

> > > +#include <rte_common.h>
> > > +#include <rte_branch_prediction.h>
> > > +
> > >
> > +/*********************************************************
> > ***********
> > > +***********
> > > + * The memory copy performance differs on different AArch64 micro-
> > architectures.
> > > + * And the most recent glibc (e.g. 2.23 or later) can provide a
> > > +better memcpy()
> > > + * performance compared to old glibc versions. It's always suggested
> > > +to use a
> > > + * more recent glibc if possible, from which the entire system can get
> > benefit.
> > > + *
> > > + * This implementation improves memory copy on some aarch64
> > > +micro-architectures,
> > > + * when an old glibc (e.g. 2.19, 2.17...) is being used. It is
> > > +disabled by
> > > + * default and needs "RTE_ARCH_ARM64_MEMCPY" defined to activate.
> > > +It's not
> > > + * always providing better performance than memcpy() so users need to
> > > +run unit
> > > + * test "memcpy_perf_autotest" and customize parameters in
> > > +customization section
> > > + * below for best performance.
> > > + *
> > > + * Compiler version will also impact the rte_memcpy() performance.
> > > +It's observed
> > > + * on some platforms and with the same code, GCC 7.2.0 compiled
> > > +binaries can
> > > + * provide better performance than GCC 4.8.5 compiled binaries.
> > > +
> > >
> > +*********************************************************
> > ************
> > > +*********/
> > > +
> > > +/**************************************
> > > + * Beginning of customization section
> > > +**************************************/
> > > +#define ALIGNMENT_MASK 0x0F
> > > +#ifndef RTE_ARCH_ARM64_MEMCPY_STRICT_ALIGN
> > > +// Only src unalignment will be treaed as unaligned copy #define
> > > +IS_UNALIGNED_COPY(dst, src) ((uintptr_t)(dst) & ALIGNMENT_MASK)
> >
> > We can use existing `rte_is_aligned` function instead.
>
> The exising 'rte_is_aligned()' inline function is defined in a relatively complex way, and there will be more instructions generated (using GCC 7.2.0):
>
> 0000000000000000 <align_check_rte>:   // using rte_is_aligned()
>    0:91003c01 addx1, x0, #0xf
>    4:927cec21 andx1, x1, #0xfffffffffffffff0
>    8:eb01001f cmpx0, x1
>    c:1a9f07e0 csetw0, ne  // ne = any
>   10:d65f03c0 ret
>   14:d503201f nop
>
> 0000000000000018 <align_check_simp>:   // using above expression
>   18:12000c00 andw0, w0, #0xf
>   1c:d65f03c0 ret
>
> So to get better performance, it's better to use the simple logic.

Agreed, I have noticed that too maybe we could change rte_is_aligned to be
simpler (Not in this patch).

<snip>
> > Would doing this still benifit if size is compile time constant? i.e. when
> > __builtin_constant_p(n) is true.
> >
> Yes, performance margin is observed if size is compile time constant on some tested platforms.
>

Sorry I didn't get you but which is better? If size is compile time constant is
using libc memcpy is better or going with restrict implementation better.

If the former then we could do what 32bit rte_memcpy is using i.e.

#define rte_memcpy(dst, src, n)              \
        __extension__ ({                     \
        (__builtin_constant_p(n)) ?          \
        memcpy((dst), (src), (n)) :          \
        rte_memcpy_func((dst), (src), (n)); })

Regards,
Pavan.
  
Herbert Guan Dec. 4, 2017, 7:14 a.m. UTC | #6
> -----Original Message-----
> From: Pavan Nikhilesh Bhagavatula
> [mailto:pbhagavatula@caviumnetworks.com]
> Sent: Sunday, December 3, 2017 22:21
> To: Herbert Guan <Herbert.Guan@arm.com>; Jianbo Liu
> <Jianbo.Liu@arm.com>
> Cc: dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH] arch/arm: optimization for memcpy on
> AArch64
>
> On Sun, Dec 03, 2017 at 12:38:35PM +0000, Herbert Guan wrote:
> > Pavan,
> >
> > Thanks for review and comments.  Please find my comments inline below.
> >
> > Best regards,
> > Herbert
> >
> <snip>
> > > There is an existing flag for arm32 to enable neon based memcpy
> > > RTE_ARCH_ARM_NEON_MEMCPY we could reuse that here as restrict
> does
> > > the same.
> > >
> > This implementation is actually not using ARM NEON instructions so the
> existing flag is not describing the option exactly.  It'll be good if the existing
> flag is "RTE_ARCH_ARM_MEMCPY" but unfortunately it might be too late
> now to get the flags aligned.
> >
>
> Correct me if I'm wrong but doesn't restrict tell the compiler to do SIMD
> optimization?
> Anyway can we put RTE_ARCH_ARM64_MEMCPY into config/common_base
> as CONFIG_RTE_ARCH_ARM64_MEMCPY=n so that it would be easier to
> enable/disable.
>

The result of using 'restrict' is to generate codes with ldp/stp instructions.  These instructions actually belong to the "data transfer instructions", though they are loading/storing a pair of registers.  'ld1/st1' are SIMD (NEON) instructions.

I can add CONFIG_RTE_ARCH_ARM64_MEMCPY=n  into common_armv8a_linuxapp in the new version as you've suggested.

> > > > +#include <rte_common.h>
> > > > +#include <rte_branch_prediction.h>
> > > > +
> > > >
> > >
> +/*********************************************************
> > > ***********
> > > > +***********
> > > > + * The memory copy performance differs on different AArch64
> > > > +micro-
> > > architectures.
> > > > + * And the most recent glibc (e.g. 2.23 or later) can provide a
> > > > +better memcpy()
> > > > + * performance compared to old glibc versions. It's always
> > > > +suggested to use a
> > > > + * more recent glibc if possible, from which the entire system
> > > > +can get
> > > benefit.
> > > > + *
> > > > + * This implementation improves memory copy on some aarch64
> > > > +micro-architectures,
> > > > + * when an old glibc (e.g. 2.19, 2.17...) is being used. It is
> > > > +disabled by
> > > > + * default and needs "RTE_ARCH_ARM64_MEMCPY" defined to
> activate.
> > > > +It's not
> > > > + * always providing better performance than memcpy() so users
> > > > +need to run unit
> > > > + * test "memcpy_perf_autotest" and customize parameters in
> > > > +customization section
> > > > + * below for best performance.
> > > > + *
> > > > + * Compiler version will also impact the rte_memcpy() performance.
> > > > +It's observed
> > > > + * on some platforms and with the same code, GCC 7.2.0 compiled
> > > > +binaries can
> > > > + * provide better performance than GCC 4.8.5 compiled binaries.
> > > > +
> > > >
> > >
> +*********************************************************
> > > ************
> > > > +*********/
> > > > +
> > > > +/**************************************
> > > > + * Beginning of customization section
> > > > +**************************************/
> > > > +#define ALIGNMENT_MASK 0x0F
> > > > +#ifndef RTE_ARCH_ARM64_MEMCPY_STRICT_ALIGN
> > > > +// Only src unalignment will be treaed as unaligned copy #define
> > > > +IS_UNALIGNED_COPY(dst, src) ((uintptr_t)(dst) & ALIGNMENT_MASK)
> > >
> > > We can use existing `rte_is_aligned` function instead.
> >
> > The exising 'rte_is_aligned()' inline function is defined in a relatively
> complex way, and there will be more instructions generated (using GCC
> 7.2.0):
> >
> > 0000000000000000 <align_check_rte>:   // using rte_is_aligned()
> >    0:91003c01 addx1, x0, #0xf
> >    4:927cec21 andx1, x1, #0xfffffffffffffff0
> >    8:eb01001f cmpx0, x1
> >    c:1a9f07e0 csetw0, ne  // ne = any
> >   10:d65f03c0 ret
> >   14:d503201f nop
> >
> > 0000000000000018 <align_check_simp>:   // using above expression
> >   18:12000c00 andw0, w0, #0xf
> >   1c:d65f03c0 ret
> >
> > So to get better performance, it's better to use the simple logic.
>
> Agreed, I have noticed that too maybe we could change rte_is_aligned to be
> simpler (Not in this patch).
>
> <snip>
> > > Would doing this still benifit if size is compile time constant?
> > > i.e. when
> > > __builtin_constant_p(n) is true.
> > >
> > Yes, performance margin is observed if size is compile time constant on
> some tested platforms.
> >
>
> Sorry I didn't get you but which is better? If size is compile time constant is
> using libc memcpy is better or going with restrict implementation better.
>
> If the former then we could do what 32bit rte_memcpy is using i.e.
>
> #define rte_memcpy(dst, src, n)              \
>         __extension__ ({                     \
>         (__builtin_constant_p(n)) ?          \
>         memcpy((dst), (src), (n)) :          \
>         rte_memcpy_func((dst), (src), (n)); })
>
Per my test, it usually has the same direction.  Means if the variable size can get improved performance, then hopefully the compile time constant will be improved as well, and vice versa.  The percentage might be different.  So in this patch, the property of size parameter (variable or compile time constant is not checked).

> Regards,
> Pavan.

Thanks,
Herbert
IMPORTANT NOTICE: The contents of this email and any attachments are confidential and may also be privileged. If you are not the intended recipient, please notify the sender immediately and do not disclose the contents to any other person, use it for any purpose, or store or copy the information in any medium. Thank you.
  
Jerin Jacob Dec. 15, 2017, 4:06 a.m. UTC | #7
-----Original Message-----
> Date: Sun, 3 Dec 2017 12:37:30 +0000
> From: Herbert Guan <Herbert.Guan@arm.com>
> To: Jerin Jacob <jerin.jacob@caviumnetworks.com>
> CC: Jianbo Liu <Jianbo.Liu@arm.com>, "dev@dpdk.org" <dev@dpdk.org>
> Subject: RE: [PATCH] arch/arm: optimization for memcpy on AArch64
> 
> Jerin,

Hi Herbert,

> 
> Thanks a lot for your review and comments.  Please find my comments below inline.
> 
> Best regards,
> Herbert
> 
> > -----Original Message-----
> > From: Jerin Jacob [mailto:jerin.jacob@caviumnetworks.com]
> > Sent: Wednesday, November 29, 2017 20:32
> > To: Herbert Guan <Herbert.Guan@arm.com>
> > Cc: Jianbo Liu <Jianbo.Liu@arm.com>; dev@dpdk.org
> > Subject: Re: [PATCH] arch/arm: optimization for memcpy on AArch64
> >
> > -----Original Message-----
> > > Date: Mon, 27 Nov 2017 15:49:45 +0800
> > > From: Herbert Guan <herbert.guan@arm.com>
> > > To: jerin.jacob@caviumnetworks.com, jianbo.liu@arm.com, dev@dpdk.org
> > > CC: Herbert Guan <herbert.guan@arm.com>
> > > Subject: [PATCH] arch/arm: optimization for memcpy on AArch64
> > > X-Mailer: git-send-email 1.8.3.1
> > > +
> > > +/**************************************
> > > + * Beginning of customization section
> > > +**************************************/
> > > +#define ALIGNMENT_MASK 0x0F
> > > +#ifndef RTE_ARCH_ARM64_MEMCPY_STRICT_ALIGN
> > > +// Only src unalignment will be treaed as unaligned copy
> >
> > C++ style comments. It may generate check patch errors.
> 
> I'll change it to use C style comment in the version 2.
> 
> >
> > > +#define IS_UNALIGNED_COPY(dst, src) ((uintptr_t)(dst) &
> > > +ALIGNMENT_MASK) #else // Both dst and src unalignment will be treated
> > > +as unaligned copy #define IS_UNALIGNED_COPY(dst, src) \
> > > +(((uintptr_t)(dst) | (uintptr_t)(src)) & ALIGNMENT_MASK)
> > #endif
> > > +
> > > +
> > > +// If copy size is larger than threshold, memcpy() will be used.
> > > +// Run "memcpy_perf_autotest" to determine the proper threshold.
> > > +#define ALIGNED_THRESHOLD       ((size_t)(0xffffffff))
> > > +#define UNALIGNED_THRESHOLD     ((size_t)(0xffffffff))
> >
> > Do you see any case where this threshold is useful.
> 
> Yes, on some platforms, and/or with some glibc version,  the glibc memcpy has better performance in larger size (e.g., >512, >4096...).  So developers should run unit test to find the best threshold.  The default value of 0xffffffff should be modified with evaluated values.

OK

> 
> >
> > > +
> > > +static inline void *__attribute__ ((__always_inline__))i

use __rte_always_inline

> > > +rte_memcpy(void *restrict dst, const void *restrict src, size_t n)
> > > +{
> > > +if (n < 16) {
> > > +rte_memcpy_lt16((uint8_t *)dst, (const uint8_t *)src, n);
> > > +return dst;
> > > +}
> > > +if (n < 64) {
> > > +rte_memcpy_ge16_lt64((uint8_t *)dst, (const uint8_t *)src,
> > n);
> > > +return dst;
> > > +}
> >
> > Unfortunately we have 128B cache arm64 implementation too. Could you
> > please take care that based on RTE_CACHE_LINE_SIZE
> >
> 
> Here the value of '64' is not the cache line size.  But for the reason that prefetch itself will cost some cycles, it's not worthwhile to do prefetch for small size (e.g. < 64 bytes) copy.  Per my test, prefetching for small size copy will actually lower the performance.

But
I think, '64' is a function of cache size. ie. Any reason why we haven't used rte_memcpy_ge16_lt128()/rte_memcpy_ge128 pair instead of rte_memcpy_ge16_lt64//rte_memcpy_ge64 pair?
I think, if you can add one more conditional compilation to choose between rte_memcpy_ge16_lt128()/rte_memcpy_ge128 vs rte_memcpy_ge16_lt64//rte_memcpy_ge64,
will address the all arm64 variants supported in current DPDK.

> 
> In the other hand, I can only find one 128B cache line aarch64 machine here.  And it do exist some specific optimization for this machine.  Not sure if it'll be beneficial for other 128B cache machines or not.  I prefer not to put it in this patch but in a later standalone specific patch for 128B cache machines.
> 
> > > +__builtin_prefetch(src, 0, 0);  // rte_prefetch_non_temporal(src);
> > > +__builtin_prefetch(dst, 1, 0);  //  * unchanged *

# Why only once __builtin_prefetch used? Why not invoke in rte_memcpy_ge64 loop
# Does it make sense to prefetch src + 64/128 * n
  
Herbert Guan Dec. 18, 2017, 2:51 a.m. UTC | #8
Hi Jerin,

> -----Original Message-----
> From: Jerin Jacob [mailto:jerin.jacob@caviumnetworks.com]
> Sent: Friday, December 15, 2017 12:06
> To: Herbert Guan <Herbert.Guan@arm.com>
> Cc: Jianbo Liu <Jianbo.Liu@arm.com>; dev@dpdk.org
> Subject: Re: [PATCH] arch/arm: optimization for memcpy on AArch64
>
> -----Original Message-----
> > Date: Sun, 3 Dec 2017 12:37:30 +0000
> > From: Herbert Guan <Herbert.Guan@arm.com>
> > To: Jerin Jacob <jerin.jacob@caviumnetworks.com>
> > CC: Jianbo Liu <Jianbo.Liu@arm.com>, "dev@dpdk.org" <dev@dpdk.org>
> > Subject: RE: [PATCH] arch/arm: optimization for memcpy on AArch64
> >
> > Jerin,
>
> Hi Herbert,
>
> >
> > Thanks a lot for your review and comments.  Please find my comments
> below inline.
> >
> > Best regards,
> > Herbert
> >
> > > -----Original Message-----
> > > From: Jerin Jacob [mailto:jerin.jacob@caviumnetworks.com]
> > > Sent: Wednesday, November 29, 2017 20:32
> > > To: Herbert Guan <Herbert.Guan@arm.com>
> > > Cc: Jianbo Liu <Jianbo.Liu@arm.com>; dev@dpdk.org
> > > Subject: Re: [PATCH] arch/arm: optimization for memcpy on AArch64
> > >
> > > -----Original Message-----
> > > > Date: Mon, 27 Nov 2017 15:49:45 +0800
> > > > From: Herbert Guan <herbert.guan@arm.com>
> > > > To: jerin.jacob@caviumnetworks.com, jianbo.liu@arm.com,
> dev@dpdk.org
> > > > CC: Herbert Guan <herbert.guan@arm.com>
> > > > Subject: [PATCH] arch/arm: optimization for memcpy on AArch64
> > > > X-Mailer: git-send-email 1.8.3.1
> > > > +
> > > > +/**************************************
> > > > + * Beginning of customization section
> > > > +**************************************/
> > > > +#define ALIGNMENT_MASK 0x0F
> > > > +#ifndef RTE_ARCH_ARM64_MEMCPY_STRICT_ALIGN
> > > > +// Only src unalignment will be treaed as unaligned copy
> > >
> > > C++ style comments. It may generate check patch errors.
> >
> > I'll change it to use C style comment in the version 2.
> >
> > >
> > > > +#define IS_UNALIGNED_COPY(dst, src) ((uintptr_t)(dst) &
> > > > +ALIGNMENT_MASK) #else // Both dst and src unalignment will be
> treated
> > > > +as unaligned copy #define IS_UNALIGNED_COPY(dst, src) \
> > > > +(((uintptr_t)(dst) | (uintptr_t)(src)) & ALIGNMENT_MASK)
> > > #endif
> > > > +
> > > > +
> > > > +// If copy size is larger than threshold, memcpy() will be used.
> > > > +// Run "memcpy_perf_autotest" to determine the proper threshold.
> > > > +#define ALIGNED_THRESHOLD       ((size_t)(0xffffffff))
> > > > +#define UNALIGNED_THRESHOLD     ((size_t)(0xffffffff))
> > >
> > > Do you see any case where this threshold is useful.
> >
> > Yes, on some platforms, and/or with some glibc version,  the glibc memcpy
> has better performance in larger size (e.g., >512, >4096...).  So developers
> should run unit test to find the best threshold.  The default value of 0xffffffff
> should be modified with evaluated values.
>
> OK
>
> >
> > >
> > > > +
> > > > +static inline void *__attribute__ ((__always_inline__))i
>
> use __rte_always_inline

Applied in V3 patch.

>
> > > > +rte_memcpy(void *restrict dst, const void *restrict src, size_t n)
> > > > +{
> > > > +if (n < 16) {
> > > > +rte_memcpy_lt16((uint8_t *)dst, (const uint8_t *)src, n);
> > > > +return dst;
> > > > +}
> > > > +if (n < 64) {
> > > > +rte_memcpy_ge16_lt64((uint8_t *)dst, (const uint8_t *)src,
> > > n);
> > > > +return dst;
> > > > +}
> > >
> > > Unfortunately we have 128B cache arm64 implementation too. Could you
> > > please take care that based on RTE_CACHE_LINE_SIZE
> > >
> >
> > Here the value of '64' is not the cache line size.  But for the reason that
> prefetch itself will cost some cycles, it's not worthwhile to do prefetch for
> small size (e.g. < 64 bytes) copy.  Per my test, prefetching for small size copy
> will actually lower the performance.
>
> But
> I think, '64' is a function of cache size. ie. Any reason why we haven't used
> rte_memcpy_ge16_lt128()/rte_memcpy_ge128 pair instead of
> rte_memcpy_ge16_lt64//rte_memcpy_ge64 pair?
> I think, if you can add one more conditional compilation to choose between
> rte_memcpy_ge16_lt128()/rte_memcpy_ge128 vs
> rte_memcpy_ge16_lt64//rte_memcpy_ge64,
> will address the all arm64 variants supported in current DPDK.
>

The logic for 128B cache is implemented as you've suggested, and has been added in V3 patch.

> >
> > In the other hand, I can only find one 128B cache line aarch64 machine here.
> And it do exist some specific optimization for this machine.  Not sure if it'll be
> beneficial for other 128B cache machines or not.  I prefer not to put it in this
> patch but in a later standalone specific patch for 128B cache machines.
> >
> > > > +__builtin_prefetch(src, 0, 0);  // rte_prefetch_non_temporal(src);
> > > > +__builtin_prefetch(dst, 1, 0);  //  * unchanged *
>
> # Why only once __builtin_prefetch used? Why not invoke in
> rte_memcpy_ge64 loop
> # Does it make sense to prefetch src + 64/128 * n

Prefetch is only necessary once at the beginning.  CPU will do auto incremental prefetch when the continuous memory access starts.  It's not necessary to do prefetch in the loop.  In fact doing it in loop will actually break CPU's HW prefetch and degrade the performance.
IMPORTANT NOTICE: The contents of this email and any attachments are confidential and may also be privileged. If you are not the intended recipient, please notify the sender immediately and do not disclose the contents to any other person, use it for any purpose, or store or copy the information in any medium. Thank you.
  
Jerin Jacob Dec. 18, 2017, 4:17 a.m. UTC | #9
-----Original Message-----
> Date: Mon, 18 Dec 2017 02:51:19 +0000
> From: Herbert Guan <Herbert.Guan@arm.com>
> To: Jerin Jacob <jerin.jacob@caviumnetworks.com>
> CC: Jianbo Liu <Jianbo.Liu@arm.com>, "dev@dpdk.org" <dev@dpdk.org>
> Subject: RE: [PATCH] arch/arm: optimization for memcpy on AArch64
> 
> Hi Jerin,

Hi Herbert,

> > >
> > > Here the value of '64' is not the cache line size.  But for the reason that
> > prefetch itself will cost some cycles, it's not worthwhile to do prefetch for
> > small size (e.g. < 64 bytes) copy.  Per my test, prefetching for small size copy
> > will actually lower the performance.
> >
> > But
> > I think, '64' is a function of cache size. ie. Any reason why we haven't used
> > rte_memcpy_ge16_lt128()/rte_memcpy_ge128 pair instead of
> > rte_memcpy_ge16_lt64//rte_memcpy_ge64 pair?
> > I think, if you can add one more conditional compilation to choose between
> > rte_memcpy_ge16_lt128()/rte_memcpy_ge128 vs
> > rte_memcpy_ge16_lt64//rte_memcpy_ge64,
> > will address the all arm64 variants supported in current DPDK.
> >
> 
> The logic for 128B cache is implemented as you've suggested, and has been added in V3 patch.
> 
> > >
> > > In the other hand, I can only find one 128B cache line aarch64 machine here.
> > And it do exist some specific optimization for this machine.  Not sure if it'll be
> > beneficial for other 128B cache machines or not.  I prefer not to put it in this
> > patch but in a later standalone specific patch for 128B cache machines.
> > >
> > > > > +__builtin_prefetch(src, 0, 0);  // rte_prefetch_non_temporal(src);
> > > > > +__builtin_prefetch(dst, 1, 0);  //  * unchanged *
> >
> > # Why only once __builtin_prefetch used? Why not invoke in
> > rte_memcpy_ge64 loop
> > # Does it make sense to prefetch src + 64/128 * n
> 
> Prefetch is only necessary once at the beginning.  CPU will do auto incremental prefetch when the continuous memory access starts.  It's not necessary to do prefetch in the loop.  In fact doing it in loop will actually break CPU's HW prefetch and degrade the performance.

Yes. But, aarch64 specification does not mandate that all implementation should have HW prefetch
mechanism(ie. it is IMPLEMENTATION DEFINED).
I think, You have provided a good start for memcpy implementation and we
can fine tune it _latter_ based different micro architecture.
Your v3 looks good.


> IMPORTANT NOTICE: The contents of this email and any attachments are confidential and may also be privileged. If you are not the intended recipient, please notify the sender immediately and do not disclose the contents to any other person, use it for any purpose, or store or copy the information in any medium. Thank you.

Please remove such notice from public mailing list.
  

Patch

diff --git a/lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h b/lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h
index b80d8ba..1f42b3c 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h
@@ -42,6 +42,197 @@ 
 
 #include "generic/rte_memcpy.h"
 
+#ifdef RTE_ARCH_ARM64_MEMCPY
+#include <rte_common.h>
+#include <rte_branch_prediction.h>
+
+/*******************************************************************************
+ * The memory copy performance differs on different AArch64 micro-architectures.
+ * And the most recent glibc (e.g. 2.23 or later) can provide a better memcpy()
+ * performance compared to old glibc versions. It's always suggested to use a
+ * more recent glibc if possible, from which the entire system can get benefit.
+ *
+ * This implementation improves memory copy on some aarch64 micro-architectures,
+ * when an old glibc (e.g. 2.19, 2.17...) is being used. It is disabled by
+ * default and needs "RTE_ARCH_ARM64_MEMCPY" defined to activate. It's not
+ * always providing better performance than memcpy() so users need to run unit
+ * test "memcpy_perf_autotest" and customize parameters in customization section
+ * below for best performance.
+ *
+ * Compiler version will also impact the rte_memcpy() performance. It's observed
+ * on some platforms and with the same code, GCC 7.2.0 compiled binaries can
+ * provide better performance than GCC 4.8.5 compiled binaries.
+ ******************************************************************************/
+
+/**************************************
+ * Beginning of customization section
+ **************************************/
+#define ALIGNMENT_MASK 0x0F
+#ifndef RTE_ARCH_ARM64_MEMCPY_STRICT_ALIGN
+// Only src unalignment will be treaed as unaligned copy
+#define IS_UNALIGNED_COPY(dst, src) ((uintptr_t)(dst) & ALIGNMENT_MASK)
+#else
+// Both dst and src unalignment will be treated as unaligned copy
+#define IS_UNALIGNED_COPY(dst, src) \
+		(((uintptr_t)(dst) | (uintptr_t)(src)) & ALIGNMENT_MASK)
+#endif
+
+
+// If copy size is larger than threshold, memcpy() will be used.
+// Run "memcpy_perf_autotest" to determine the proper threshold.
+#define ALIGNED_THRESHOLD       ((size_t)(0xffffffff))
+#define UNALIGNED_THRESHOLD     ((size_t)(0xffffffff))
+
+
+/**************************************
+ * End of customization section
+ **************************************/
+#ifdef RTE_TOOLCHAIN_GCC
+#if (GCC_VERSION < 50400)
+#warning "The GCC version is quite old, which may result in sub-optimal \
+performance of the compiled code. It is suggested that at least GCC 5.4.0 \
+be used."
+#endif
+#endif
+
+static inline void __attribute__ ((__always_inline__))
+rte_mov16(uint8_t *restrict dst, const uint8_t *restrict src)
+{
+	__int128 * restrict dst128 = (__int128 * restrict)dst;
+	const __int128 * restrict src128 = (const __int128 * restrict)src;
+	*dst128 = *src128;
+}
+
+static inline void __attribute__ ((__always_inline__))
+rte_mov32(uint8_t *restrict dst, const uint8_t *restrict src)
+{
+	__int128 * restrict dst128 = (__int128 * restrict)dst;
+	const __int128 * restrict src128 = (const __int128 * restrict)src;
+	dst128[0] = src128[0];
+	dst128[1] = src128[1];
+}
+
+static inline void __attribute__ ((__always_inline__))
+rte_mov48(uint8_t *restrict dst, const uint8_t *restrict src)
+{
+	__int128 * restrict dst128 = (__int128 * restrict)dst;
+	const __int128 * restrict src128 = (const __int128 * restrict)src;
+	dst128[0] = src128[0];
+	dst128[1] = src128[1];
+	dst128[2] = src128[2];
+}
+
+static inline void __attribute__ ((__always_inline__))
+rte_mov64(uint8_t *restrict dst, const uint8_t *restrict src)
+{
+	__int128 * restrict dst128 = (__int128 * restrict)dst;
+	const __int128 * restrict src128 = (const __int128 * restrict)src;
+	dst128[0] = src128[0];
+	dst128[1] = src128[1];
+	dst128[2] = src128[2];
+	dst128[3] = src128[3];
+}
+
+static inline void __attribute__ ((__always_inline__))
+rte_mov128(uint8_t *restrict dst, const uint8_t *restrict src)
+{
+	rte_mov64(dst, src);
+	rte_mov64(dst + 64, src + 64);
+}
+
+static inline void __attribute__ ((__always_inline__))
+rte_mov256(uint8_t *restrict dst, const uint8_t *restrict src)
+{
+	rte_mov128(dst, src);
+	rte_mov128(dst + 128, src + 128);
+}
+
+static inline void __attribute__ ((__always_inline__))
+rte_memcpy_lt16(uint8_t *restrict dst, const uint8_t *restrict src, size_t n)
+{
+	if (n & 0x08) {
+		/* copy 8 ~ 15 bytes */
+		*(uint64_t *)dst = *(const uint64_t *)src;
+		*(uint64_t *)(dst - 8 + n) = *(const uint64_t *)(src - 8 + n);
+	} else if (n & 0x04) {
+		/* copy 4 ~ 7 bytes */
+		*(uint32_t *)dst = *(const uint32_t *)src;
+		*(uint32_t *)(dst - 4 + n) = *(const uint32_t *)(src - 4 + n);
+	} else if (n & 0x02) {
+		/* copy 2 ~ 3 bytes */
+		*(uint16_t *)dst = *(const uint16_t *)src;
+		*(uint16_t *)(dst - 2 + n) = *(const uint16_t *)(src - 2 + n);
+	} else if (n & 0x01) {
+		/* copy 1 byte */
+		*dst = *src;
+	}
+}
+
+static inline void __attribute__ ((__always_inline__))
+rte_memcpy_ge16_lt64
+(uint8_t *restrict dst, const uint8_t *restrict src, size_t n)
+{
+	if (n == 16) {
+		rte_mov16(dst, src);
+	} else if (n <= 32) {
+		rte_mov16(dst, src);
+		rte_mov16(dst - 16 + n, src - 16 + n);
+	} else if (n <= 48) {
+		rte_mov32(dst, src);
+		rte_mov16(dst - 16 + n, src - 16 + n);
+	} else {
+		rte_mov48(dst, src);
+		rte_mov16(dst - 16 + n, src - 16 + n);
+	}
+}
+
+static inline void __attribute__ ((__always_inline__))
+rte_memcpy_ge64(uint8_t *restrict dst, const uint8_t *restrict src, size_t n)
+{
+	do {
+		rte_mov64(dst, src);
+		src += 64;
+		dst += 64;
+		n -= 64;
+	} while (likely(n >= 64));
+
+	if (likely(n)) {
+		if (n > 48)
+			rte_mov64(dst - 64 + n, src - 64 + n);
+		else if (n > 32)
+			rte_mov48(dst - 48 + n, src - 48 + n);
+		else if (n > 16)
+			rte_mov32(dst - 32 + n, src - 32 + n);
+		else
+			rte_mov16(dst - 16 + n, src - 16 + n);
+	}
+}
+
+static inline void *__attribute__ ((__always_inline__))
+rte_memcpy(void *restrict dst, const void *restrict src, size_t n)
+{
+	if (n < 16) {
+		rte_memcpy_lt16((uint8_t *)dst, (const uint8_t *)src, n);
+		return dst;
+	}
+	if (n < 64) {
+		rte_memcpy_ge16_lt64((uint8_t *)dst, (const uint8_t *)src, n);
+		return dst;
+	}
+	__builtin_prefetch(src, 0, 0);
+	__builtin_prefetch(dst, 1, 0);
+	if (likely(
+		  (!IS_UNALIGNED_COPY(dst, src) && n <= ALIGNED_THRESHOLD)
+		   || (IS_UNALIGNED_COPY(dst, src) && n <= UNALIGNED_THRESHOLD)
+		  )) {
+		rte_memcpy_ge64((uint8_t *)dst, (const uint8_t *)src, n);
+		return dst;
+	} else
+		return memcpy(dst, src, n);
+}
+
+
+#else
 static inline void
 rte_mov16(uint8_t *dst, const uint8_t *src)
 {
@@ -80,6 +271,8 @@ 
 
 #define rte_memcpy(d, s, n)	memcpy((d), (s), (n))
 
+#endif
+
 #ifdef __cplusplus
 }
 #endif