V2 [PATCH] x86: Add thresholds for "rep movsb/stosb" to tunables

Message ID 20200704120307.GA1117522@gmail.com
State New
Headers show
Series
  • V2 [PATCH] x86: Add thresholds for "rep movsb/stosb" to tunables
Related show

Commit Message

Adhemerval Zanella via Libc-alpha July 4, 2020, 12:03 p.m.
On Fri, Jul 03, 2020 at 03:49:21PM -0400, Carlos O'Donell wrote:
> On 7/3/20 1:52 PM, H.J. Lu wrote:

> > Add x86_rep_movsb_threshold and x86_rep_stosb_threshold to tunables

> > to update thresholds for "rep movsb" and "rep stosb" at run-time.

> > 

> > Note that the user specified threshold for "rep movsb" smaller than the

> > minimum threshold will be ignored.

> 

> Post v2 please. Almost there.

> 

> > ---

> >  manual/tunables.texi                          | 14 +++++++

> >  sysdeps/x86/cacheinfo.c                       | 20 ++++++++++

> >  sysdeps/x86/cpu-features.h                    |  4 ++

> >  sysdeps/x86/dl-cacheinfo.c                    | 38 +++++++++++++++++++

> >  sysdeps/x86/dl-tunables.list                  |  6 +++

> >  .../multiarch/memmove-vec-unaligned-erms.S    | 16 +-------

> >  .../multiarch/memset-vec-unaligned-erms.S     | 12 +-----

> >  7 files changed, 84 insertions(+), 26 deletions(-)

> > 

> > diff --git a/manual/tunables.texi b/manual/tunables.texi

> > index ec18b10834..61edd62425 100644

> > --- a/manual/tunables.texi

> > +++ b/manual/tunables.texi

> > @@ -396,6 +396,20 @@ to set threshold in bytes for non temporal store.

> >  This tunable is specific to i386 and x86-64.

> >  @end deftp

> >  

> > +@deftp Tunable glibc.cpu.x86_rep_movsb_threshold

> > +The @code{glibc.cpu.x86_rep_movsb_threshold} tunable allows the user

> > +to set threshold in bytes to start using "rep movsb".

> > +

> > +This tunable is specific to i386 and x86-64.

> > +@end deftp

> > +

> > +@deftp Tunable glibc.cpu.x86_rep_stosb_threshold

> > +The @code{glibc.cpu.x86_rep_stosb_threshold} tunable allows the user

> > +to set threshold in bytes to start using "rep stosb".

> > +

> > +This tunable is specific to i386 and x86-64.

> > +@end deftp

> > +

> >  @deftp Tunable glibc.cpu.x86_ibt

> >  The @code{glibc.cpu.x86_ibt} tunable allows the user to control how

> >  indirect branch tracking (IBT) should be enabled.  Accepted values are

> > diff --git a/sysdeps/x86/cacheinfo.c b/sysdeps/x86/cacheinfo.c

> > index 8c4c7f9972..bb536d96ef 100644

> > --- a/sysdeps/x86/cacheinfo.c

> > +++ b/sysdeps/x86/cacheinfo.c

> > @@ -41,6 +41,23 @@ long int __x86_raw_shared_cache_size attribute_hidden = 1024 * 1024;

> >  /* Threshold to use non temporal store.  */

> >  long int __x86_shared_non_temporal_threshold attribute_hidden;

> >  

> > +/* Threshold to use Enhanced REP MOVSB.  Since there is overhead to set

> > +   up REP MOVSB operation, REP MOVSB isn't faster on short data.  The

> > +   memcpy micro benchmark in glibc shows that 2KB is the approximate

> > +   value above which REP MOVSB becomes faster than SSE2 optimization

> > +   on processors with Enhanced REP MOVSB.  Since larger register size

> > +   can move more data with a single load and store, the threshold is

> > +   higher with larger register size.  */

> > +long int __x86_rep_movsb_threshold attribute_hidden = 2048;

> > +

> > +/* Threshold to use Enhanced REP STOSB.  Since there is overhead to set

> > +   up REP STOSB operation, REP STOSB isn't faster on short data.  The

> > +   memset micro benchmark in glibc shows that 2KB is the approximate

> > +   value above which REP STOSB becomes faster on processors with

> > +   Enhanced REP STOSB.  Since the stored value is fixed, larger register

> > +   size has minimal impact on threshold.  */

> > +long int __x86_rep_stosb_threshold attribute_hidden = 2048;

> > +

> >  #ifndef __x86_64__

> >  /* PREFETCHW support flag for use in memory and string routines.  */

> >  int __x86_prefetchw attribute_hidden;

> > @@ -117,6 +134,9 @@ init_cacheinfo (void)

> >    __x86_shared_non_temporal_threshold

> >      = cpu_features->non_temporal_threshold;

> >  

> > +  __x86_rep_movsb_threshold = cpu_features->rep_movsb_threshold;

> > +  __x86_rep_stosb_threshold = cpu_features->rep_stosb_threshold;

> > +

> 

> OK. Update global from cpu_features with values.

> 

> I would really like to see some kind of "assert (cpu_features->initialized);"

> that way we know we didn't break the startup sequence unintentionally.

> 

> >  #ifndef __x86_64__

> >    __x86_prefetchw = cpu_features->prefetchw;

> >  #endif

> > diff --git a/sysdeps/x86/cpu-features.h b/sysdeps/x86/cpu-features.h

> > index 3aaed33cbc..002e12e11f 100644

> > --- a/sysdeps/x86/cpu-features.h

> > +++ b/sysdeps/x86/cpu-features.h

> > @@ -128,6 +128,10 @@ struct cpu_features

> >    /* PREFETCHW support flag for use in memory and string routines.  */

> >    unsigned long int prefetchw;

> >  #endif

> > +  /* Threshold to use "rep movsb".  */

> > +  unsigned long int rep_movsb_threshold;

> > +  /* Threshold to use "rep stosb".  */

> > +  unsigned long int rep_stosb_threshold;

> 

> OK.

> 

> >  };

> >  

> >  /* Used from outside of glibc to get access to the CPU features

> > diff --git a/sysdeps/x86/dl-cacheinfo.c b/sysdeps/x86/dl-cacheinfo.c

> > index 8e2a6f552c..aff9bd1067 100644

> > --- a/sysdeps/x86/dl-cacheinfo.c

> > +++ b/sysdeps/x86/dl-cacheinfo.c

> > @@ -860,6 +860,31 @@ __init_cacheinfo (void)

> >       total shared cache size.  */

> >    unsigned long int non_temporal_threshold = (shared * threads * 3 / 4);

> >  

> > +  /* NB: The REP MOVSB threshold must be greater than VEC_SIZE * 8.  */

> > +  unsigned long int minimum_rep_movsb_threshold;

> > +  /* NB: The default REP MOVSB threshold is 2048 * (VEC_SIZE / 16).  See

> > +     comments for __x86_rep_movsb_threshold in cacheinfo.c.  */

> > +  unsigned long int rep_movsb_threshold;

> > +  if (CPU_FEATURES_ARCH_P (cpu_features, AVX512F_Usable)

> > +      && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))

> > +    {

> > +      rep_movsb_threshold = 2048 * (64 / 16);

> > +      minimum_rep_movsb_threshold = 64 * 8;

> > +    }

> > +  else if (CPU_FEATURES_ARCH_P (cpu_features,

> > +				AVX_Fast_Unaligned_Load))

> > +    {

> > +      rep_movsb_threshold = 2048 * (32 / 16);

> > +      minimum_rep_movsb_threshold = 32 * 8;

> > +    }

> > +  else

> > +    {

> > +      rep_movsb_threshold = 2048 * (16 / 16);

> > +      minimum_rep_movsb_threshold = 16 * 8;

> > +    }

> > +  /* NB: See comments for __x86_rep_stosb_threshold in cacheinfo.c.  */

> > +  unsigned long int rep_stosb_threshold = 2048;

> > +

> >  #if HAVE_TUNABLES

> >    long int tunable_size;

> >    tunable_size = TUNABLE_GET (x86_data_cache_size, long int, NULL);

> > @@ -871,11 +896,19 @@ __init_cacheinfo (void)

> >    tunable_size = TUNABLE_GET (x86_non_temporal_threshold, long int, NULL);

> >    if (tunable_size != 0)

> >      non_temporal_threshold = tunable_size;

> 

> > +  tunable_size = TUNABLE_GET (x86_rep_movsb_threshold, long int, NULL);

> > +  if (tunable_size > minimum_rep_movsb_threshold)

> > +    rep_movsb_threshold = tunable_size;

> 

> OK. Good, we only set rep_movsb_threshold if it's greater than min.

> 

> > +  tunable_size = TUNABLE_GET (x86_rep_stosb_threshold, long int, NULL);

> > +  if (tunable_size != 0)

> > +    rep_stosb_threshold = tunable_size;

> 

> This should be min=1, default=2048 in dl-tunables.list, and would remove 

> this code since the range is not dynamic.

> 

> The point of the tunables framework is to remove such boiler plate for

> range a default processing and clearing parameters for security settings.

> 

> >  #endif

> >  

> >    cpu_features->data_cache_size = data;

> >    cpu_features->shared_cache_size = shared;

> >    cpu_features->non_temporal_threshold = non_temporal_threshold;

> > +  cpu_features->rep_movsb_threshold = rep_movsb_threshold;

> > +  cpu_features->rep_stosb_threshold = rep_stosb_threshold;

> >  

> >  #if HAVE_TUNABLES

> >    TUNABLE_UPDATE (x86_data_cache_size, long int,

> > @@ -884,5 +917,10 @@ __init_cacheinfo (void)

> >  		  shared, 0, (long int) -1);

> >    TUNABLE_UPDATE (x86_non_temporal_threshold, long int,

> >  		  non_temporal_threshold, 0, (long int) -1);

> > +  TUNABLE_UPDATE (x86_rep_movsb_threshold, long int,

> > +		  rep_movsb_threshold, minimum_rep_movsb_threshold,

> > +		  (long int) -1);

> 

> OK. Store the new value and the computed minimum.

> 

> > +  TUNABLE_UPDATE (x86_rep_stosb_threshold, long int,

> > +		  rep_stosb_threshold, 0, (long int) -1);

> 

> This one can be deleted.

> 


We should go with this simple one for 2.32.


H.J.
---
Add x86_rep_movsb_threshold and x86_rep_stosb_threshold to tunables
to update thresholds for "rep movsb" and "rep stosb" at run-time.

Note that the user specified threshold for "rep movsb" smaller than
the minimum threshold will be ignored.
---
 manual/tunables.texi                          | 14 ++++++
 sysdeps/x86/cacheinfo.c                       | 46 +++++++++++++++++++
 sysdeps/x86/cpu-features.c                    |  4 ++
 sysdeps/x86/cpu-features.h                    |  4 ++
 sysdeps/x86/dl-tunables.list                  |  6 +++
 .../multiarch/memmove-vec-unaligned-erms.S    | 16 +------
 .../multiarch/memset-vec-unaligned-erms.S     | 12 +----
 7 files changed, 76 insertions(+), 26 deletions(-)

-- 
2.26.2

Comments

Adhemerval Zanella via Libc-alpha July 6, 2020, 12:59 p.m. | #1
On 7/4/20 8:03 AM, H.J. Lu wrote:
> On Fri, Jul 03, 2020 at 03:49:21PM -0400, Carlos O'Donell wrote:

>> On 7/3/20 1:52 PM, H.J. Lu wrote:

>>> Add x86_rep_movsb_threshold and x86_rep_stosb_threshold to tunables

>>> to update thresholds for "rep movsb" and "rep stosb" at run-time.

>>>

>>> Note that the user specified threshold for "rep movsb" smaller than the

>>> minimum threshold will be ignored.

>>

>> Post v2 please. Almost there.

>>

>>> ---

>>>  manual/tunables.texi                          | 14 +++++++

>>>  sysdeps/x86/cacheinfo.c                       | 20 ++++++++++

>>>  sysdeps/x86/cpu-features.h                    |  4 ++

>>>  sysdeps/x86/dl-cacheinfo.c                    | 38 +++++++++++++++++++

>>>  sysdeps/x86/dl-tunables.list                  |  6 +++

>>>  .../multiarch/memmove-vec-unaligned-erms.S    | 16 +-------

>>>  .../multiarch/memset-vec-unaligned-erms.S     | 12 +-----

>>>  7 files changed, 84 insertions(+), 26 deletions(-)

>>>

>>> diff --git a/manual/tunables.texi b/manual/tunables.texi

>>> index ec18b10834..61edd62425 100644

>>> --- a/manual/tunables.texi

>>> +++ b/manual/tunables.texi

>>> @@ -396,6 +396,20 @@ to set threshold in bytes for non temporal store.

>>>  This tunable is specific to i386 and x86-64.

>>>  @end deftp

>>>  

>>> +@deftp Tunable glibc.cpu.x86_rep_movsb_threshold

>>> +The @code{glibc.cpu.x86_rep_movsb_threshold} tunable allows the user

>>> +to set threshold in bytes to start using "rep movsb".

>>> +

>>> +This tunable is specific to i386 and x86-64.

>>> +@end deftp

>>> +

>>> +@deftp Tunable glibc.cpu.x86_rep_stosb_threshold

>>> +The @code{glibc.cpu.x86_rep_stosb_threshold} tunable allows the user

>>> +to set threshold in bytes to start using "rep stosb".

>>> +

>>> +This tunable is specific to i386 and x86-64.

>>> +@end deftp

>>> +

>>>  @deftp Tunable glibc.cpu.x86_ibt

>>>  The @code{glibc.cpu.x86_ibt} tunable allows the user to control how

>>>  indirect branch tracking (IBT) should be enabled.  Accepted values are

>>> diff --git a/sysdeps/x86/cacheinfo.c b/sysdeps/x86/cacheinfo.c

>>> index 8c4c7f9972..bb536d96ef 100644

>>> --- a/sysdeps/x86/cacheinfo.c

>>> +++ b/sysdeps/x86/cacheinfo.c

>>> @@ -41,6 +41,23 @@ long int __x86_raw_shared_cache_size attribute_hidden = 1024 * 1024;

>>>  /* Threshold to use non temporal store.  */

>>>  long int __x86_shared_non_temporal_threshold attribute_hidden;

>>>  

>>> +/* Threshold to use Enhanced REP MOVSB.  Since there is overhead to set

>>> +   up REP MOVSB operation, REP MOVSB isn't faster on short data.  The

>>> +   memcpy micro benchmark in glibc shows that 2KB is the approximate

>>> +   value above which REP MOVSB becomes faster than SSE2 optimization

>>> +   on processors with Enhanced REP MOVSB.  Since larger register size

>>> +   can move more data with a single load and store, the threshold is

>>> +   higher with larger register size.  */

>>> +long int __x86_rep_movsb_threshold attribute_hidden = 2048;

>>> +

>>> +/* Threshold to use Enhanced REP STOSB.  Since there is overhead to set

>>> +   up REP STOSB operation, REP STOSB isn't faster on short data.  The

>>> +   memset micro benchmark in glibc shows that 2KB is the approximate

>>> +   value above which REP STOSB becomes faster on processors with

>>> +   Enhanced REP STOSB.  Since the stored value is fixed, larger register

>>> +   size has minimal impact on threshold.  */

>>> +long int __x86_rep_stosb_threshold attribute_hidden = 2048;

>>> +

>>>  #ifndef __x86_64__

>>>  /* PREFETCHW support flag for use in memory and string routines.  */

>>>  int __x86_prefetchw attribute_hidden;

>>> @@ -117,6 +134,9 @@ init_cacheinfo (void)

>>>    __x86_shared_non_temporal_threshold

>>>      = cpu_features->non_temporal_threshold;

>>>  

>>> +  __x86_rep_movsb_threshold = cpu_features->rep_movsb_threshold;

>>> +  __x86_rep_stosb_threshold = cpu_features->rep_stosb_threshold;

>>> +

>>

>> OK. Update global from cpu_features with values.

>>

>> I would really like to see some kind of "assert (cpu_features->initialized);"

>> that way we know we didn't break the startup sequence unintentionally.

>>

>>>  #ifndef __x86_64__

>>>    __x86_prefetchw = cpu_features->prefetchw;

>>>  #endif

>>> diff --git a/sysdeps/x86/cpu-features.h b/sysdeps/x86/cpu-features.h

>>> index 3aaed33cbc..002e12e11f 100644

>>> --- a/sysdeps/x86/cpu-features.h

>>> +++ b/sysdeps/x86/cpu-features.h

>>> @@ -128,6 +128,10 @@ struct cpu_features

>>>    /* PREFETCHW support flag for use in memory and string routines.  */

>>>    unsigned long int prefetchw;

>>>  #endif

>>> +  /* Threshold to use "rep movsb".  */

>>> +  unsigned long int rep_movsb_threshold;

>>> +  /* Threshold to use "rep stosb".  */

>>> +  unsigned long int rep_stosb_threshold;

>>

>> OK.

>>

>>>  };

>>>  

>>>  /* Used from outside of glibc to get access to the CPU features

>>> diff --git a/sysdeps/x86/dl-cacheinfo.c b/sysdeps/x86/dl-cacheinfo.c

>>> index 8e2a6f552c..aff9bd1067 100644

>>> --- a/sysdeps/x86/dl-cacheinfo.c

>>> +++ b/sysdeps/x86/dl-cacheinfo.c

>>> @@ -860,6 +860,31 @@ __init_cacheinfo (void)

>>>       total shared cache size.  */

>>>    unsigned long int non_temporal_threshold = (shared * threads * 3 / 4);

>>>  

>>> +  /* NB: The REP MOVSB threshold must be greater than VEC_SIZE * 8.  */

>>> +  unsigned long int minimum_rep_movsb_threshold;

>>> +  /* NB: The default REP MOVSB threshold is 2048 * (VEC_SIZE / 16).  See

>>> +     comments for __x86_rep_movsb_threshold in cacheinfo.c.  */

>>> +  unsigned long int rep_movsb_threshold;

>>> +  if (CPU_FEATURES_ARCH_P (cpu_features, AVX512F_Usable)

>>> +      && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))

>>> +    {

>>> +      rep_movsb_threshold = 2048 * (64 / 16);

>>> +      minimum_rep_movsb_threshold = 64 * 8;

>>> +    }

>>> +  else if (CPU_FEATURES_ARCH_P (cpu_features,

>>> +				AVX_Fast_Unaligned_Load))

>>> +    {

>>> +      rep_movsb_threshold = 2048 * (32 / 16);

>>> +      minimum_rep_movsb_threshold = 32 * 8;

>>> +    }

>>> +  else

>>> +    {

>>> +      rep_movsb_threshold = 2048 * (16 / 16);

>>> +      minimum_rep_movsb_threshold = 16 * 8;

>>> +    }

>>> +  /* NB: See comments for __x86_rep_stosb_threshold in cacheinfo.c.  */

>>> +  unsigned long int rep_stosb_threshold = 2048;

>>> +

>>>  #if HAVE_TUNABLES

>>>    long int tunable_size;

>>>    tunable_size = TUNABLE_GET (x86_data_cache_size, long int, NULL);

>>> @@ -871,11 +896,19 @@ __init_cacheinfo (void)

>>>    tunable_size = TUNABLE_GET (x86_non_temporal_threshold, long int, NULL);

>>>    if (tunable_size != 0)

>>>      non_temporal_threshold = tunable_size;

>>

>>> +  tunable_size = TUNABLE_GET (x86_rep_movsb_threshold, long int, NULL);

>>> +  if (tunable_size > minimum_rep_movsb_threshold)

>>> +    rep_movsb_threshold = tunable_size;

>>

>> OK. Good, we only set rep_movsb_threshold if it's greater than min.

>>

>>> +  tunable_size = TUNABLE_GET (x86_rep_stosb_threshold, long int, NULL);

>>> +  if (tunable_size != 0)

>>> +    rep_stosb_threshold = tunable_size;

>>

>> This should be min=1, default=2048 in dl-tunables.list, and would remove 

>> this code since the range is not dynamic.

>>

>> The point of the tunables framework is to remove such boiler plate for

>> range a default processing and clearing parameters for security settings.

>>

>>>  #endif

>>>  

>>>    cpu_features->data_cache_size = data;

>>>    cpu_features->shared_cache_size = shared;

>>>    cpu_features->non_temporal_threshold = non_temporal_threshold;

>>> +  cpu_features->rep_movsb_threshold = rep_movsb_threshold;

>>> +  cpu_features->rep_stosb_threshold = rep_stosb_threshold;

>>>  

>>>  #if HAVE_TUNABLES

>>>    TUNABLE_UPDATE (x86_data_cache_size, long int,

>>> @@ -884,5 +917,10 @@ __init_cacheinfo (void)

>>>  		  shared, 0, (long int) -1);

>>>    TUNABLE_UPDATE (x86_non_temporal_threshold, long int,

>>>  		  non_temporal_threshold, 0, (long int) -1);

>>> +  TUNABLE_UPDATE (x86_rep_movsb_threshold, long int,

>>> +		  rep_movsb_threshold, minimum_rep_movsb_threshold,

>>> +		  (long int) -1);

>>

>> OK. Store the new value and the computed minimum.

>>

>>> +  TUNABLE_UPDATE (x86_rep_stosb_threshold, long int,

>>> +		  rep_stosb_threshold, 0, (long int) -1);

>>

>> This one can be deleted.

>>

> 

> We should go with this simple one for 2.32.

 
I agree.

We can make this better in 2.33.

Please post V3 of this patch for final review.

See my notes below on how this is intended to be handled by tunables framework.
 
> H.J.

> ---

> Add x86_rep_movsb_threshold and x86_rep_stosb_threshold to tunables

> to update thresholds for "rep movsb" and "rep stosb" at run-time.

> 

> Note that the user specified threshold for "rep movsb" smaller than

> the minimum threshold will be ignored.

> ---

>  manual/tunables.texi                          | 14 ++++++

>  sysdeps/x86/cacheinfo.c                       | 46 +++++++++++++++++++

>  sysdeps/x86/cpu-features.c                    |  4 ++

>  sysdeps/x86/cpu-features.h                    |  4 ++

>  sysdeps/x86/dl-tunables.list                  |  6 +++

>  .../multiarch/memmove-vec-unaligned-erms.S    | 16 +------

>  .../multiarch/memset-vec-unaligned-erms.S     | 12 +----

>  7 files changed, 76 insertions(+), 26 deletions(-)

> 

> diff --git a/manual/tunables.texi b/manual/tunables.texi

> index ec18b10834..61edd62425 100644

> --- a/manual/tunables.texi

> +++ b/manual/tunables.texi

> @@ -396,6 +396,20 @@ to set threshold in bytes for non temporal store.

>  This tunable is specific to i386 and x86-64.

>  @end deftp

>  

> +@deftp Tunable glibc.cpu.x86_rep_movsb_threshold

> +The @code{glibc.cpu.x86_rep_movsb_threshold} tunable allows the user

> +to set threshold in bytes to start using "rep movsb".


Add: "The value must be greater than zero, and currently defaults to 2048 bytes."

> +

> +This tunable is specific to i386 and x86-64.

> +@end deftp

> +

> +@deftp Tunable glibc.cpu.x86_rep_stosb_threshold

> +The @code{glibc.cpu.x86_rep_stosb_threshold} tunable allows the user

> +to set threshold in bytes to start using "rep stosb".


Add: "The value must be greater than zero, and currently defaults to 2048 bytes."

> +

> +This tunable is specific to i386 and x86-64.

> +@end deftp


OK. Docs addition required and added here.

> +

>  @deftp Tunable glibc.cpu.x86_ibt

>  The @code{glibc.cpu.x86_ibt} tunable allows the user to control how

>  indirect branch tracking (IBT) should be enabled.  Accepted values are

> diff --git a/sysdeps/x86/cacheinfo.c b/sysdeps/x86/cacheinfo.c

> index 311502dee3..4322328a1b 100644

> --- a/sysdeps/x86/cacheinfo.c

> +++ b/sysdeps/x86/cacheinfo.c

> @@ -530,6 +530,23 @@ long int __x86_raw_shared_cache_size attribute_hidden = 1024 * 1024;

>  /* Threshold to use non temporal store.  */

>  long int __x86_shared_non_temporal_threshold attribute_hidden;

>  

> +/* Threshold to use Enhanced REP MOVSB.  Since there is overhead to set

> +   up REP MOVSB operation, REP MOVSB isn't faster on short data.  The

> +   memcpy micro benchmark in glibc shows that 2KB is the approximate

> +   value above which REP MOVSB becomes faster than SSE2 optimization

> +   on processors with Enhanced REP MOVSB.  Since larger register size

> +   can move more data with a single load and store, the threshold is

> +   higher with larger register size.  */


Comments should move to dl-tunables.list.

> +long int __x86_rep_movsb_threshold attribute_hidden = 2048;

> +

> +/* Threshold to use Enhanced REP STOSB.  Since there is overhead to set

> +   up REP STOSB operation, REP STOSB isn't faster on short data.  The

> +   memset micro benchmark in glibc shows that 2KB is the approximate

> +   value above which REP STOSB becomes faster on processors with

> +   Enhanced REP STOSB.  Since the stored value is fixed, larger register

> +   size has minimal impact on threshold.  */


Comments should move to dl-tunables.list.

> +long int __x86_rep_stosb_threshold attribute_hidden = 2048;


Are these globals used *before* tunables is initialized?

That should be the only case that causes us to require to set the default here.

Otherwise we should remove the default and express it in dl-tunables.list.

> +

>  #ifndef DISABLE_PREFETCHW

>  /* PREFETCHW support flag for use in memory and string routines.  */

>  int __x86_prefetchw attribute_hidden;

> @@ -872,6 +889,35 @@ init_cacheinfo (void)

>      = (cpu_features->non_temporal_threshold != 0

>         ? cpu_features->non_temporal_threshold

>         : __x86_shared_cache_size * threads * 3 / 4);

> +

> +  /* NB: The REP MOVSB threshold must be greater than VEC_SIZE * 8.  */

> +  unsigned int minimum_rep_movsb_threshold;

> +  /* NB: The default REP MOVSB threshold is 2048 * (VEC_SIZE / 16).  */

> +  unsigned int rep_movsb_threshold;

> +  if (CPU_FEATURES_ARCH_P (cpu_features, AVX512F_Usable)

> +      && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))

> +    {

> +      rep_movsb_threshold = 2048 * (64 / 16);

> +      minimum_rep_movsb_threshold = 64 * 8;

> +    }

> +  else if (CPU_FEATURES_ARCH_P (cpu_features,

> +				AVX_Fast_Unaligned_Load))

> +    {

> +      rep_movsb_threshold = 2048 * (32 / 16);

> +      minimum_rep_movsb_threshold = 32 * 8;

> +    }

> +  else

> +    {

> +      rep_movsb_threshold = 2048 * (16 / 16);

> +      minimum_rep_movsb_threshold = 16 * 8;

> +    }

> +  if (cpu_features->rep_movsb_threshold > minimum_rep_movsb_threshold)

> +    __x86_rep_movsb_threshold = cpu_features->rep_movsb_threshold;

> +  else

> +    __x86_rep_movsb_threshold = rep_movsb_threshold;


OK, use the cpu_features value otherwise the computed value threshold.

> +


OK.

> +  if (cpu_features->rep_stosb_threshold)

> +    __x86_rep_stosb_threshold = cpu_features->rep_stosb_threshold;


This code becomes:

__x86_rep_stosb_threshold = cpu_features->rep_stosb_threshold;

Because the tunables code ensured the thresold was > 0, otherwise
an invalid tunable would leave the default value of 2048.

>  }

>  

>  #endif

> diff --git a/sysdeps/x86/cpu-features.c b/sysdeps/x86/cpu-features.c

> index c351bdd54a..c7673a2eb9 100644

> --- a/sysdeps/x86/cpu-features.c

> +++ b/sysdeps/x86/cpu-features.c

> @@ -606,6 +606,10 @@ no_cpuid:

>    TUNABLE_GET (hwcaps, tunable_val_t *, TUNABLE_CALLBACK (set_hwcaps));

>    cpu_features->non_temporal_threshold

>      = TUNABLE_GET (x86_non_temporal_threshold, long int, NULL);

> +  cpu_features->rep_movsb_threshold

> +    = TUNABLE_GET (x86_rep_movsb_threshold, long int, NULL);

> +  cpu_features->rep_stosb_threshold

> +    = TUNABLE_GET (x86_rep_stosb_threshold, long int, NULL);


OK.

>    cpu_features->data_cache_size

>      = TUNABLE_GET (x86_data_cache_size, long int, NULL);

>    cpu_features->shared_cache_size

> diff --git a/sysdeps/x86/cpu-features.h b/sysdeps/x86/cpu-features.h

> index d66dc206f7..39d2b59d63 100644

> --- a/sysdeps/x86/cpu-features.h

> +++ b/sysdeps/x86/cpu-features.h

> @@ -102,6 +102,10 @@ struct cpu_features

>    unsigned long int shared_cache_size;

>    /* Threshold to use non temporal store.  */

>    unsigned long int non_temporal_threshold;

> +  /* Threshold to use "rep movsb".  */

> +  unsigned long int rep_movsb_threshold;

> +  /* Threshold to use "rep stosb".  */

> +  unsigned long int rep_stosb_threshold;


OK.

>  };

>  

>  /* Used from outside of glibc to get access to the CPU features

> diff --git a/sysdeps/x86/dl-tunables.list b/sysdeps/x86/dl-tunables.list

> index 251b926ce4..43bf6c2389 100644

> --- a/sysdeps/x86/dl-tunables.list

> +++ b/sysdeps/x86/dl-tunables.list

> @@ -30,6 +30,12 @@ glibc {

>      x86_non_temporal_threshold {

>        type: SIZE_T

>      }

> +    x86_rep_movsb_threshold {

> +      type: SIZE_T


Add "minimum: 1"
Add "default: 2048"
Add comments about why it's 2048.

> +    }

> +    x86_rep_stosb_threshold {

> +      type: SIZE_T


Add "minimum: 1"
Add "default: 2048"
Add comments about why it's 2048.

> +    }

>      x86_data_cache_size {

>        type: SIZE_T

>      }

> diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S

> index 74953245aa..bd5dc1a3f3 100644

> --- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S

> +++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S

> @@ -56,17 +56,6 @@

>  # endif

>  #endif

>  

> -/* Threshold to use Enhanced REP MOVSB.  Since there is overhead to set

> -   up REP MOVSB operation, REP MOVSB isn't faster on short data.  The

> -   memcpy micro benchmark in glibc shows that 2KB is the approximate

> -   value above which REP MOVSB becomes faster than SSE2 optimization

> -   on processors with Enhanced REP MOVSB.  Since larger register size

> -   can move more data with a single load and store, the threshold is

> -   higher with larger register size.  */

> -#ifndef REP_MOVSB_THRESHOLD

> -# define REP_MOVSB_THRESHOLD	(2048 * (VEC_SIZE / 16))

> -#endif


OK.

> -

>  #ifndef PREFETCH

>  # define PREFETCH(addr) prefetcht0 addr

>  #endif

> @@ -253,9 +242,6 @@ L(movsb):

>  	leaq	(%rsi,%rdx), %r9

>  	cmpq	%r9, %rdi

>  	/* Avoid slow backward REP MOVSB.  */

> -# if REP_MOVSB_THRESHOLD <= (VEC_SIZE * 8)

> -#  error Unsupported REP_MOVSB_THRESHOLD and VEC_SIZE!

> -# endif


OK.

>  	jb	L(more_8x_vec_backward)

>  1:

>  	mov	%RDX_LP, %RCX_LP

> @@ -331,7 +317,7 @@ L(between_2_3):

>  

>  #if defined USE_MULTIARCH && IS_IN (libc)

>  L(movsb_more_2x_vec):

> -	cmpq	$REP_MOVSB_THRESHOLD, %rdx

> +	cmp	__x86_rep_movsb_threshold(%rip), %RDX_LP


OK.

>  	ja	L(movsb)

>  #endif

>  L(more_2x_vec):

> diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S

> index af2299709c..2bfc95de05 100644

> --- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S

> +++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S

> @@ -58,16 +58,6 @@

>  # endif

>  #endif

>  

> -/* Threshold to use Enhanced REP STOSB.  Since there is overhead to set

> -   up REP STOSB operation, REP STOSB isn't faster on short data.  The

> -   memset micro benchmark in glibc shows that 2KB is the approximate

> -   value above which REP STOSB becomes faster on processors with

> -   Enhanced REP STOSB.  Since the stored value is fixed, larger register

> -   size has minimal impact on threshold.  */

> -#ifndef REP_STOSB_THRESHOLD

> -# define REP_STOSB_THRESHOLD		2048

> -#endif


OK.

> -

>  #ifndef SECTION

>  # error SECTION is not defined!

>  #endif

> @@ -181,7 +171,7 @@ ENTRY (MEMSET_SYMBOL (__memset, unaligned_erms))

>  	ret

>  

>  L(stosb_more_2x_vec):

> -	cmpq	$REP_STOSB_THRESHOLD, %rdx

> +	cmp	__x86_rep_stosb_threshold(%rip), %RDX_LP


OK.

>  	ja	L(stosb)

>  #endif

>  L(more_2x_vec):

> 



-- 
Cheers,
Carlos.

Patch

diff --git a/manual/tunables.texi b/manual/tunables.texi
index ec18b10834..61edd62425 100644
--- a/manual/tunables.texi
+++ b/manual/tunables.texi
@@ -396,6 +396,20 @@  to set threshold in bytes for non temporal store.
 This tunable is specific to i386 and x86-64.
 @end deftp
 
+@deftp Tunable glibc.cpu.x86_rep_movsb_threshold
+The @code{glibc.cpu.x86_rep_movsb_threshold} tunable allows the user
+to set threshold in bytes to start using "rep movsb".
+
+This tunable is specific to i386 and x86-64.
+@end deftp
+
+@deftp Tunable glibc.cpu.x86_rep_stosb_threshold
+The @code{glibc.cpu.x86_rep_stosb_threshold} tunable allows the user
+to set threshold in bytes to start using "rep stosb".
+
+This tunable is specific to i386 and x86-64.
+@end deftp
+
 @deftp Tunable glibc.cpu.x86_ibt
 The @code{glibc.cpu.x86_ibt} tunable allows the user to control how
 indirect branch tracking (IBT) should be enabled.  Accepted values are
diff --git a/sysdeps/x86/cacheinfo.c b/sysdeps/x86/cacheinfo.c
index 311502dee3..4322328a1b 100644
--- a/sysdeps/x86/cacheinfo.c
+++ b/sysdeps/x86/cacheinfo.c
@@ -530,6 +530,23 @@  long int __x86_raw_shared_cache_size attribute_hidden = 1024 * 1024;
 /* Threshold to use non temporal store.  */
 long int __x86_shared_non_temporal_threshold attribute_hidden;
 
+/* Threshold to use Enhanced REP MOVSB.  Since there is overhead to set
+   up REP MOVSB operation, REP MOVSB isn't faster on short data.  The
+   memcpy micro benchmark in glibc shows that 2KB is the approximate
+   value above which REP MOVSB becomes faster than SSE2 optimization
+   on processors with Enhanced REP MOVSB.  Since larger register size
+   can move more data with a single load and store, the threshold is
+   higher with larger register size.  */
+long int __x86_rep_movsb_threshold attribute_hidden = 2048;
+
+/* Threshold to use Enhanced REP STOSB.  Since there is overhead to set
+   up REP STOSB operation, REP STOSB isn't faster on short data.  The
+   memset micro benchmark in glibc shows that 2KB is the approximate
+   value above which REP STOSB becomes faster on processors with
+   Enhanced REP STOSB.  Since the stored value is fixed, larger register
+   size has minimal impact on threshold.  */
+long int __x86_rep_stosb_threshold attribute_hidden = 2048;
+
 #ifndef DISABLE_PREFETCHW
 /* PREFETCHW support flag for use in memory and string routines.  */
 int __x86_prefetchw attribute_hidden;
@@ -872,6 +889,35 @@  init_cacheinfo (void)
     = (cpu_features->non_temporal_threshold != 0
        ? cpu_features->non_temporal_threshold
        : __x86_shared_cache_size * threads * 3 / 4);
+
+  /* NB: The REP MOVSB threshold must be greater than VEC_SIZE * 8.  */
+  unsigned int minimum_rep_movsb_threshold;
+  /* NB: The default REP MOVSB threshold is 2048 * (VEC_SIZE / 16).  */
+  unsigned int rep_movsb_threshold;
+  if (CPU_FEATURES_ARCH_P (cpu_features, AVX512F_Usable)
+      && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
+    {
+      rep_movsb_threshold = 2048 * (64 / 16);
+      minimum_rep_movsb_threshold = 64 * 8;
+    }
+  else if (CPU_FEATURES_ARCH_P (cpu_features,
+				AVX_Fast_Unaligned_Load))
+    {
+      rep_movsb_threshold = 2048 * (32 / 16);
+      minimum_rep_movsb_threshold = 32 * 8;
+    }
+  else
+    {
+      rep_movsb_threshold = 2048 * (16 / 16);
+      minimum_rep_movsb_threshold = 16 * 8;
+    }
+  if (cpu_features->rep_movsb_threshold > minimum_rep_movsb_threshold)
+    __x86_rep_movsb_threshold = cpu_features->rep_movsb_threshold;
+  else
+    __x86_rep_movsb_threshold = rep_movsb_threshold;
+
+  if (cpu_features->rep_stosb_threshold)
+    __x86_rep_stosb_threshold = cpu_features->rep_stosb_threshold;
 }
 
 #endif
diff --git a/sysdeps/x86/cpu-features.c b/sysdeps/x86/cpu-features.c
index c351bdd54a..c7673a2eb9 100644
--- a/sysdeps/x86/cpu-features.c
+++ b/sysdeps/x86/cpu-features.c
@@ -606,6 +606,10 @@  no_cpuid:
   TUNABLE_GET (hwcaps, tunable_val_t *, TUNABLE_CALLBACK (set_hwcaps));
   cpu_features->non_temporal_threshold
     = TUNABLE_GET (x86_non_temporal_threshold, long int, NULL);
+  cpu_features->rep_movsb_threshold
+    = TUNABLE_GET (x86_rep_movsb_threshold, long int, NULL);
+  cpu_features->rep_stosb_threshold
+    = TUNABLE_GET (x86_rep_stosb_threshold, long int, NULL);
   cpu_features->data_cache_size
     = TUNABLE_GET (x86_data_cache_size, long int, NULL);
   cpu_features->shared_cache_size
diff --git a/sysdeps/x86/cpu-features.h b/sysdeps/x86/cpu-features.h
index d66dc206f7..39d2b59d63 100644
--- a/sysdeps/x86/cpu-features.h
+++ b/sysdeps/x86/cpu-features.h
@@ -102,6 +102,10 @@  struct cpu_features
   unsigned long int shared_cache_size;
   /* Threshold to use non temporal store.  */
   unsigned long int non_temporal_threshold;
+  /* Threshold to use "rep movsb".  */
+  unsigned long int rep_movsb_threshold;
+  /* Threshold to use "rep stosb".  */
+  unsigned long int rep_stosb_threshold;
 };
 
 /* Used from outside of glibc to get access to the CPU features
diff --git a/sysdeps/x86/dl-tunables.list b/sysdeps/x86/dl-tunables.list
index 251b926ce4..43bf6c2389 100644
--- a/sysdeps/x86/dl-tunables.list
+++ b/sysdeps/x86/dl-tunables.list
@@ -30,6 +30,12 @@  glibc {
     x86_non_temporal_threshold {
       type: SIZE_T
     }
+    x86_rep_movsb_threshold {
+      type: SIZE_T
+    }
+    x86_rep_stosb_threshold {
+      type: SIZE_T
+    }
     x86_data_cache_size {
       type: SIZE_T
     }
diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
index 74953245aa..bd5dc1a3f3 100644
--- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
@@ -56,17 +56,6 @@ 
 # endif
 #endif
 
-/* Threshold to use Enhanced REP MOVSB.  Since there is overhead to set
-   up REP MOVSB operation, REP MOVSB isn't faster on short data.  The
-   memcpy micro benchmark in glibc shows that 2KB is the approximate
-   value above which REP MOVSB becomes faster than SSE2 optimization
-   on processors with Enhanced REP MOVSB.  Since larger register size
-   can move more data with a single load and store, the threshold is
-   higher with larger register size.  */
-#ifndef REP_MOVSB_THRESHOLD
-# define REP_MOVSB_THRESHOLD	(2048 * (VEC_SIZE / 16))
-#endif
-
 #ifndef PREFETCH
 # define PREFETCH(addr) prefetcht0 addr
 #endif
@@ -253,9 +242,6 @@  L(movsb):
 	leaq	(%rsi,%rdx), %r9
 	cmpq	%r9, %rdi
 	/* Avoid slow backward REP MOVSB.  */
-# if REP_MOVSB_THRESHOLD <= (VEC_SIZE * 8)
-#  error Unsupported REP_MOVSB_THRESHOLD and VEC_SIZE!
-# endif
 	jb	L(more_8x_vec_backward)
 1:
 	mov	%RDX_LP, %RCX_LP
@@ -331,7 +317,7 @@  L(between_2_3):
 
 #if defined USE_MULTIARCH && IS_IN (libc)
 L(movsb_more_2x_vec):
-	cmpq	$REP_MOVSB_THRESHOLD, %rdx
+	cmp	__x86_rep_movsb_threshold(%rip), %RDX_LP
 	ja	L(movsb)
 #endif
 L(more_2x_vec):
diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
index af2299709c..2bfc95de05 100644
--- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
@@ -58,16 +58,6 @@ 
 # endif
 #endif
 
-/* Threshold to use Enhanced REP STOSB.  Since there is overhead to set
-   up REP STOSB operation, REP STOSB isn't faster on short data.  The
-   memset micro benchmark in glibc shows that 2KB is the approximate
-   value above which REP STOSB becomes faster on processors with
-   Enhanced REP STOSB.  Since the stored value is fixed, larger register
-   size has minimal impact on threshold.  */
-#ifndef REP_STOSB_THRESHOLD
-# define REP_STOSB_THRESHOLD		2048
-#endif
-
 #ifndef SECTION
 # error SECTION is not defined!
 #endif
@@ -181,7 +171,7 @@  ENTRY (MEMSET_SYMBOL (__memset, unaligned_erms))
 	ret
 
 L(stosb_more_2x_vec):
-	cmpq	$REP_STOSB_THRESHOLD, %rdx
+	cmp	__x86_rep_stosb_threshold(%rip), %RDX_LP
 	ja	L(stosb)
 #endif
 L(more_2x_vec):