summary refs log tree commit diff
diff options
context:
space:
mode:
authorJoseph Myers <joseph@codesourcery.com>2019-02-06 17:16:43 +0000
committerJoseph Myers <joseph@codesourcery.com>2019-02-06 17:16:43 +0000
commitc2d8f0b704c2b828bcd8d517a2376c0240c73c09 (patch)
treef60739031829eefd27fb10d61369617efa79f515
parent3b935595859e0232b74594c5aca6da88a31f90b3 (diff)
downloadglibc-c2d8f0b704c2b828bcd8d517a2376c0240c73c09.tar.gz
glibc-c2d8f0b704c2b828bcd8d517a2376c0240c73c09.tar.xz
glibc-c2d8f0b704c2b828bcd8d517a2376c0240c73c09.zip
Avoid "inline" after return type in function definitions.
One group of warnings seen with -Wextra is warnings for static or
inline not at the start of a declaration (-Wold-style-declaration).

This patch fixes various such cases for inline, ensuring it comes at
the start of the declaration (after any static).  A common case of the
fix is "static inline <type> __always_inline"; the definition of
__always_inline starts with __inline, so the natural change is to
"static __always_inline <type>".  Other cases of the warning may be
harder to fix (one pattern is a function definition that gets
rewritten to be static by an including file, "#define funcname static
wrapped_funcname" or similar), but it seems worth fixing these cases
with inline anyway.

Tested for x86_64.

	* elf/dl-load.h (_dl_postprocess_loadcmd): Use __always_inline
	before return type, without separate inline.
	* elf/dl-tunables.c (maybe_enable_malloc_check): Likewise.
	* elf/dl-tunables.h (tunable_is_name): Likewise.
	* malloc/malloc.c (do_set_trim_threshold): Likewise.
	(do_set_top_pad): Likewise.
	(do_set_mmap_threshold): Likewise.
	(do_set_mmaps_max): Likewise.
	(do_set_mallopt_check): Likewise.
	(do_set_perturb_byte): Likewise.
	(do_set_arena_test): Likewise.
	(do_set_arena_max): Likewise.
	(do_set_tcache_max): Likewise.
	(do_set_tcache_count): Likewise.
	(do_set_tcache_unsorted_limit): Likewise.
	* nis/nis_subr.c (count_dots): Likewise.
	* nptl/allocatestack.c (advise_stack_range): Likewise.
	* sysdeps/ieee754/dbl-64/s_sin.c (do_cos): Likewise.
	(do_sin): Likewise.
	(reduce_sincos): Likewise.
	(do_sincos): Likewise.
	* sysdeps/unix/sysv/linux/x86/elision-conf.c
	(do_set_elision_enable): Likewise.
	(TUNABLE_CALLBACK_FNDECL): Likewise.
-rw-r--r--ChangeLog27
-rw-r--r--elf/dl-load.h2
-rw-r--r--elf/dl-tunables.c3
-rw-r--r--elf/dl-tunables.h3
-rw-r--r--malloc/malloc.c33
-rw-r--r--nis/nis_subr.c2
-rw-r--r--nptl/allocatestack.c3
-rw-r--r--sysdeps/ieee754/dbl-64/s_sin.c12
-rw-r--r--sysdeps/unix/sysv/linux/x86/elision-conf.c6
9 files changed, 49 insertions, 42 deletions
diff --git a/ChangeLog b/ChangeLog
index 66fa654f70..c143073ca7 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,30 @@
+2019-02-06  Joseph Myers  <joseph@codesourcery.com>
+
+	* elf/dl-load.h (_dl_postprocess_loadcmd): Use __always_inline
+	before return type, without separate inline.
+	* elf/dl-tunables.c (maybe_enable_malloc_check): Likewise.
+	* elf/dl-tunables.h (tunable_is_name): Likewise.
+	* malloc/malloc.c (do_set_trim_threshold): Likewise.
+	(do_set_top_pad): Likewise.
+	(do_set_mmap_threshold): Likewise.
+	(do_set_mmaps_max): Likewise.
+	(do_set_mallopt_check): Likewise.
+	(do_set_perturb_byte): Likewise.
+	(do_set_arena_test): Likewise.
+	(do_set_arena_max): Likewise.
+	(do_set_tcache_max): Likewise.
+	(do_set_tcache_count): Likewise.
+	(do_set_tcache_unsorted_limit): Likewise.
+	* nis/nis_subr.c (count_dots): Likewise.
+	* nptl/allocatestack.c (advise_stack_range): Likewise.
+	* sysdeps/ieee754/dbl-64/s_sin.c (do_cos): Likewise.
+	(do_sin): Likewise.
+	(reduce_sincos): Likewise.
+	(do_sincos): Likewise.
+	* sysdeps/unix/sysv/linux/x86/elision-conf.c
+	(do_set_elision_enable): Likewise.
+	(TUNABLE_CALLBACK_FNDECL): Likewise.
+
 2019-02-06  Florian Weimer  <fweimer@redhat.com>
 
 	* support/xdlfcn.c (xdlopen, xdlclose): Do not call dlerror.
diff --git a/elf/dl-load.h b/elf/dl-load.h
index 22954c1807..dddbcb8575 100644
--- a/elf/dl-load.h
+++ b/elf/dl-load.h
@@ -83,7 +83,7 @@ struct loadcmd
 /* This is a subroutine of _dl_map_segments.  It should be called for each
    load command, some time after L->l_addr has been set correctly.  It is
    responsible for setting up the l_text_end and l_phdr fields.  */
-static void __always_inline
+static __always_inline void
 _dl_postprocess_loadcmd (struct link_map *l, const ElfW(Ehdr) *header,
                          const struct loadcmd *c)
 {
diff --git a/elf/dl-tunables.c b/elf/dl-tunables.c
index 542e837832..b0980c5ad9 100644
--- a/elf/dl-tunables.c
+++ b/elf/dl-tunables.c
@@ -272,8 +272,7 @@ parse_tunables (char *tunestr, char *valstring)
    for setuid binaries.  We use the special version of access() to avoid
    setting ERRNO, which is a TLS variable since TLS has not yet been set
    up.  */
-static inline void
-__always_inline
+static __always_inline void
 maybe_enable_malloc_check (void)
 {
   tunable_id_t id = TUNABLE_ENUM_NAME (glibc, malloc, check);
diff --git a/elf/dl-tunables.h b/elf/dl-tunables.h
index a72a1dd8f8..58b3b76843 100644
--- a/elf/dl-tunables.h
+++ b/elf/dl-tunables.h
@@ -113,8 +113,7 @@ rtld_hidden_proto (__tunable_get_val)
 # define TUNABLES_FRONTEND_yes TUNABLES_FRONTEND_valstring
 
 /* Compare two name strings, bounded by the name hardcoded in glibc.  */
-static inline bool
-__always_inline
+static __always_inline bool
 tunable_is_name (const char *orig, const char *envname)
 {
   for (;*orig != '\0' && *envname != '\0'; envname++, orig++)
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 13fc1f2049..6e766d11bc 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -5019,8 +5019,7 @@ __malloc_stats (void)
 /*
    ------------------------------ mallopt ------------------------------
  */
-static inline int
-__always_inline
+static __always_inline int
 do_set_trim_threshold (size_t value)
 {
   LIBC_PROBE (memory_mallopt_trim_threshold, 3, value, mp_.trim_threshold,
@@ -5030,8 +5029,7 @@ do_set_trim_threshold (size_t value)
   return 1;
 }
 
-static inline int
-__always_inline
+static __always_inline int
 do_set_top_pad (size_t value)
 {
   LIBC_PROBE (memory_mallopt_top_pad, 3, value, mp_.top_pad,
@@ -5041,8 +5039,7 @@ do_set_top_pad (size_t value)
   return 1;
 }
 
-static inline int
-__always_inline
+static __always_inline int
 do_set_mmap_threshold (size_t value)
 {
   /* Forbid setting the threshold too high.  */
@@ -5057,8 +5054,7 @@ do_set_mmap_threshold (size_t value)
   return 0;
 }
 
-static inline int
-__always_inline
+static __always_inline int
 do_set_mmaps_max (int32_t value)
 {
   LIBC_PROBE (memory_mallopt_mmap_max, 3, value, mp_.n_mmaps_max,
@@ -5068,15 +5064,13 @@ do_set_mmaps_max (int32_t value)
   return 1;
 }
 
-static inline int
-__always_inline
+static __always_inline int
 do_set_mallopt_check (int32_t value)
 {
   return 1;
 }
 
-static inline int
-__always_inline
+static __always_inline int
 do_set_perturb_byte (int32_t value)
 {
   LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte);
@@ -5084,8 +5078,7 @@ do_set_perturb_byte (int32_t value)
   return 1;
 }
 
-static inline int
-__always_inline
+static __always_inline int
 do_set_arena_test (size_t value)
 {
   LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test);
@@ -5093,8 +5086,7 @@ do_set_arena_test (size_t value)
   return 1;
 }
 
-static inline int
-__always_inline
+static __always_inline int
 do_set_arena_max (size_t value)
 {
   LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max);
@@ -5103,8 +5095,7 @@ do_set_arena_max (size_t value)
 }
 
 #if USE_TCACHE
-static inline int
-__always_inline
+static __always_inline int
 do_set_tcache_max (size_t value)
 {
   if (value >= 0 && value <= MAX_TCACHE_SIZE)
@@ -5116,8 +5107,7 @@ do_set_tcache_max (size_t value)
   return 1;
 }
 
-static inline int
-__always_inline
+static __always_inline int
 do_set_tcache_count (size_t value)
 {
   LIBC_PROBE (memory_tunable_tcache_count, 2, value, mp_.tcache_count);
@@ -5125,8 +5115,7 @@ do_set_tcache_count (size_t value)
   return 1;
 }
 
-static inline int
-__always_inline
+static __always_inline int
 do_set_tcache_unsorted_limit (size_t value)
 {
   LIBC_PROBE (memory_tunable_tcache_unsorted_limit, 2, value, mp_.tcache_unsorted_limit);
diff --git a/nis/nis_subr.c b/nis/nis_subr.c
index 299fa27cf7..88a5e3ce5a 100644
--- a/nis/nis_subr.c
+++ b/nis/nis_subr.c
@@ -91,7 +91,7 @@ nis_name_of_r (const_nis_name name, char *buffer, size_t buflen)
 }
 libnsl_hidden_nolink_def (nis_name_of_r, GLIBC_2_1)
 
-static int __always_inline
+static __always_inline int
 count_dots (const_nis_name str)
 {
   int count = 0;
diff --git a/nptl/allocatestack.c b/nptl/allocatestack.c
index 590350647b..d8e8570a7d 100644
--- a/nptl/allocatestack.c
+++ b/nptl/allocatestack.c
@@ -379,8 +379,7 @@ setup_stack_prot (char *mem, size_t size, char *guard, size_t guardsize,
 
 /* Mark the memory of the stack as usable to the kernel.  It frees everything
    except for the space used for the TCB itself.  */
-static inline void
-__always_inline
+static __always_inline void
 advise_stack_range (void *mem, size_t size, uintptr_t pd, size_t guardsize)
 {
   uintptr_t sp = (uintptr_t) CURRENT_STACK_FRAME;
diff --git a/sysdeps/ieee754/dbl-64/s_sin.c b/sysdeps/ieee754/dbl-64/s_sin.c
index 7584afcd2b..26799f1909 100644
--- a/sysdeps/ieee754/dbl-64/s_sin.c
+++ b/sysdeps/ieee754/dbl-64/s_sin.c
@@ -97,8 +97,7 @@ int __branred (double x, double *a, double *aa);
    of the number by combining the sin and cos of X (as computed by a variation
    of the Taylor series) with the values looked up from the sin/cos table to
    get the result.  */
-static inline double
-__always_inline
+static __always_inline double
 do_cos (double x, double dx)
 {
   mynumber u;
@@ -122,8 +121,7 @@ do_cos (double x, double dx)
    the number by combining the sin and cos of X (as computed by a variation of
    the Taylor series) with the values looked up from the sin/cos table to get
    the result.  */
-static inline double
-__always_inline
+static __always_inline double
 do_sin (double x, double dx)
 {
   double xold = x;
@@ -151,8 +149,7 @@ do_sin (double x, double dx)
    is written to *a, the low part to *da.  Range reduction is accurate to 136
    bits so that when x is large and *a very close to zero, all 53 bits of *a
    are correct.  */
-static inline int4
-__always_inline
+static __always_inline int4
 reduce_sincos (double x, double *a, double *da)
 {
   mynumber v;
@@ -178,8 +175,7 @@ reduce_sincos (double x, double *a, double *da)
 }
 
 /* Compute sin or cos (A + DA) for the given quadrant N.  */
-static double
-__always_inline
+static __always_inline double
 do_sincos (double a, double da, int4 n)
 {
   double retval;
diff --git a/sysdeps/unix/sysv/linux/x86/elision-conf.c b/sysdeps/unix/sysv/linux/x86/elision-conf.c
index 56cdc6d15b..6ba93daa0f 100644
--- a/sysdeps/unix/sysv/linux/x86/elision-conf.c
+++ b/sysdeps/unix/sysv/linux/x86/elision-conf.c
@@ -56,8 +56,7 @@ struct elision_config __elision_aconf =
 int __pthread_force_elision attribute_hidden = 0;
 
 #if HAVE_TUNABLES
-static inline void
-__always_inline
+static __always_inline void
 do_set_elision_enable (int32_t elision_enable)
 {
   /* Enable elision if it's avaliable in hardware. It's not necessary to check
@@ -79,8 +78,7 @@ TUNABLE_CALLBACK (set_elision_enable) (tunable_val_t *valp)
 }
 
 #define TUNABLE_CALLBACK_FNDECL(__name, __type)			\
-static inline void						\
-__always_inline							\
+static __always_inline void					\
 do_set_elision_ ## __name (__type value)			\
 {								\
   __elision_aconf.__name = value;				\