File force-elision-race.patch of Package glibc.22972

2018-10-17  Stefan Liebler  <stli@linux.ibm.com>

	[BZ #23275]
	* sysdeps/unix/sysv/linux/s390/force-elision.h: (FORCE_ELISION):
	Ensure that elision path is used if elision is available.
	* sysdeps/unix/sysv/linux/powerpc/force-elision.h (FORCE_ELISION):
	Likewise.
	* sysdeps/unix/sysv/linux/x86/force-elision.h: (FORCE_ELISION):
	Likewise.
	* nptl/pthreadP.h (PTHREAD_MUTEX_TYPE, PTHREAD_MUTEX_TYPE_ELISION)
	(PTHREAD_MUTEX_PSHARED): Use atomic_load_relaxed.
	* nptl/pthread_mutex_consistent.c (pthread_mutex_consistent): Likewise.
	* nptl/pthread_mutex_getprioceiling.c (pthread_mutex_getprioceiling):
	Likewise.
	* nptl/pthread_mutex_lock.c (__pthread_mutex_lock_full)
	(__pthread_mutex_cond_lock_adjust): Likewise.
	* nptl/pthread_mutex_setprioceiling.c (pthread_mutex_setprioceiling):
	Likewise.
	* nptl/pthread_mutex_timedlock.c (__pthread_mutex_timedlock): Likewise.
	* nptl/pthread_mutex_trylock.c (__pthread_mutex_trylock): Likewise.
	* nptl/pthread_mutex_unlock.c (__pthread_mutex_unlock_full): Likewise.
	* sysdeps/nptl/bits/thread-shared-types.h (struct __pthread_mutex_s):
	Add comments.
	* nptl/pthread_mutex_destroy.c (__pthread_mutex_destroy):
	Use atomic_load_relaxed and atomic_store_relaxed.
	* nptl/pthread_mutex_init.c (__pthread_mutex_init):
	Use atomic_store_relaxed.

Index: glibc-2.26/nptl/pthreadP.h
===================================================================
--- glibc-2.26.orig/nptl/pthreadP.h
+++ glibc-2.26/nptl/pthreadP.h
@@ -110,19 +110,23 @@ enum
 };
 #define PTHREAD_MUTEX_PSHARED_BIT 128
 
+/* See concurrency notes regarding __kind in struct __pthread_mutex_s
+   in sysdeps/nptl/bits/thread-shared-types.h.  */
 #define PTHREAD_MUTEX_TYPE(m) \
-  ((m)->__data.__kind & 127)
+  (atomic_load_relaxed (&((m)->__data.__kind)) & 127)
 /* Don't include NO_ELISION, as that type is always the same
    as the underlying lock type.  */
 #define PTHREAD_MUTEX_TYPE_ELISION(m) \
-  ((m)->__data.__kind & (127|PTHREAD_MUTEX_ELISION_NP))
+  (atomic_load_relaxed (&((m)->__data.__kind))	\
+   & (127 | PTHREAD_MUTEX_ELISION_NP))
 
 #if LLL_PRIVATE == 0 && LLL_SHARED == 128
 # define PTHREAD_MUTEX_PSHARED(m) \
-  ((m)->__data.__kind & 128)
+  (atomic_load_relaxed (&((m)->__data.__kind)) & 128)
 #else
 # define PTHREAD_MUTEX_PSHARED(m) \
-  (((m)->__data.__kind & 128) ? LLL_SHARED : LLL_PRIVATE)
+  ((atomic_load_relaxed (&((m)->__data.__kind)) & 128)	\
+   ? LLL_SHARED : LLL_PRIVATE)
 #endif
 
 /* The kernel when waking robust mutexes on exit never uses
Index: glibc-2.26/nptl/pthread_mutex_consistent.c
===================================================================
--- glibc-2.26.orig/nptl/pthread_mutex_consistent.c
+++ glibc-2.26/nptl/pthread_mutex_consistent.c
@@ -23,8 +23,11 @@
 int
 pthread_mutex_consistent (pthread_mutex_t *mutex)
 {
-  /* Test whether this is a robust mutex with a dead owner.  */
-  if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0
+  /* Test whether this is a robust mutex with a dead owner.
+     See concurrency notes regarding __kind in struct __pthread_mutex_s
+     in sysdeps/nptl/bits/thread-shared-types.h.  */
+  if ((atomic_load_relaxed (&(mutex->__data.__kind))
+       & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0
       || mutex->__data.__owner != PTHREAD_MUTEX_INCONSISTENT)
     return EINVAL;
 
Index: glibc-2.26/nptl/pthread_mutex_destroy.c
===================================================================
--- glibc-2.26.orig/nptl/pthread_mutex_destroy.c
+++ glibc-2.26/nptl/pthread_mutex_destroy.c
@@ -27,12 +27,17 @@ __pthread_mutex_destroy (pthread_mutex_t
 {
   LIBC_PROBE (mutex_destroy, 1, mutex);
 
-  if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0
+  /* See concurrency notes regarding __kind in struct __pthread_mutex_s
+     in sysdeps/nptl/bits/thread-shared-types.h.  */
+  if ((atomic_load_relaxed (&(mutex->__data.__kind))
+       & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0
       && mutex->__data.__nusers != 0)
     return EBUSY;
 
-  /* Set to an invalid value.  */
-  mutex->__data.__kind = -1;
+  /* Set to an invalid value.  Relaxed MO is enough as it is undefined behavior
+     if the mutex is used after it has been destroyed.  But you can reinitialize
+     it with pthread_mutex_init.  */
+  atomic_store_relaxed (&(mutex->__data.__kind), -1);
 
   return 0;
 }
Index: glibc-2.26/nptl/pthread_mutex_getprioceiling.c
===================================================================
--- glibc-2.26.orig/nptl/pthread_mutex_getprioceiling.c
+++ glibc-2.26/nptl/pthread_mutex_getprioceiling.c
@@ -24,7 +24,9 @@
 int
 pthread_mutex_getprioceiling (const pthread_mutex_t *mutex, int *prioceiling)
 {
-  if (__builtin_expect ((mutex->__data.__kind
+  /* See concurrency notes regarding __kind in struct __pthread_mutex_s
+     in sysdeps/nptl/bits/thread-shared-types.h.  */
+  if (__builtin_expect ((atomic_load_relaxed (&(mutex->__data.__kind))
 			 & PTHREAD_MUTEX_PRIO_PROTECT_NP) == 0, 0))
     return EINVAL;
 
Index: glibc-2.26/nptl/pthread_mutex_init.c
===================================================================
--- glibc-2.26.orig/nptl/pthread_mutex_init.c
+++ glibc-2.26/nptl/pthread_mutex_init.c
@@ -87,7 +87,7 @@ __pthread_mutex_init (pthread_mutex_t *m
   memset (mutex, '\0', __SIZEOF_PTHREAD_MUTEX_T);
 
   /* Copy the values from the attribute.  */
-  mutex->__data.__kind = imutexattr->mutexkind & ~PTHREAD_MUTEXATTR_FLAG_BITS;
+  int mutex_kind = imutexattr->mutexkind & ~PTHREAD_MUTEXATTR_FLAG_BITS;
 
   if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) != 0)
     {
@@ -97,17 +97,17 @@ __pthread_mutex_init (pthread_mutex_t *m
 	return ENOTSUP;
 #endif
 
-      mutex->__data.__kind |= PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+      mutex_kind |= PTHREAD_MUTEX_ROBUST_NORMAL_NP;
     }
 
   switch (imutexattr->mutexkind & PTHREAD_MUTEXATTR_PROTOCOL_MASK)
     {
     case PTHREAD_PRIO_INHERIT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
-      mutex->__data.__kind |= PTHREAD_MUTEX_PRIO_INHERIT_NP;
+      mutex_kind |= PTHREAD_MUTEX_PRIO_INHERIT_NP;
       break;
 
     case PTHREAD_PRIO_PROTECT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
-      mutex->__data.__kind |= PTHREAD_MUTEX_PRIO_PROTECT_NP;
+      mutex_kind |= PTHREAD_MUTEX_PRIO_PROTECT_NP;
 
       int ceiling = (imutexattr->mutexkind
 		     & PTHREAD_MUTEXATTR_PRIO_CEILING_MASK)
@@ -131,7 +131,11 @@ __pthread_mutex_init (pthread_mutex_t *m
      FUTEX_PRIVATE_FLAG FUTEX_WAKE.  */
   if ((imutexattr->mutexkind & (PTHREAD_MUTEXATTR_FLAG_PSHARED
 				| PTHREAD_MUTEXATTR_FLAG_ROBUST)) != 0)
-    mutex->__data.__kind |= PTHREAD_MUTEX_PSHARED_BIT;
+    mutex_kind |= PTHREAD_MUTEX_PSHARED_BIT;
+
+  /* See concurrency notes regarding __kind in struct __pthread_mutex_s
+     in sysdeps/nptl/bits/thread-shared-types.h.  */
+  atomic_store_relaxed (&(mutex->__data.__kind), mutex_kind);
 
   /* Default values: mutex not used yet.  */
   // mutex->__count = 0;	already done by memset
Index: glibc-2.26/nptl/pthread_mutex_lock.c
===================================================================
--- glibc-2.26.orig/nptl/pthread_mutex_lock.c
+++ glibc-2.26/nptl/pthread_mutex_lock.c
@@ -64,6 +64,8 @@ __pthread_mutex_lock (pthread_mutex_t *m
 {
   assert (sizeof (mutex->__size) >= sizeof (mutex->__data));
 
+  /* See concurrency notes regarding mutex type which is loaded from __kind
+     in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h.  */
   unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
 
   LIBC_PROBE (mutex_entry, 1, mutex);
@@ -352,8 +354,14 @@ __pthread_mutex_lock_full (pthread_mutex
     case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
     case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
       {
-	int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
-	int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+	int kind, robust;
+	{
+	  /* See concurrency notes regarding __kind in struct __pthread_mutex_s
+	     in sysdeps/nptl/bits/thread-shared-types.h.  */
+	  int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
+	  kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP;
+	  robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+	}
 
 	if (robust)
 	  {
@@ -504,7 +512,10 @@ __pthread_mutex_lock_full (pthread_mutex
     case PTHREAD_MUTEX_PP_NORMAL_NP:
     case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
       {
-	int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
+	/* See concurrency notes regarding __kind in struct __pthread_mutex_s
+	   in sysdeps/nptl/bits/thread-shared-types.h.  */
+	int kind = atomic_load_relaxed (&(mutex->__data.__kind))
+	  & PTHREAD_MUTEX_KIND_MASK_NP;
 
 	oldval = mutex->__data.__lock;
 
@@ -610,15 +621,18 @@ void
 internal_function
 __pthread_mutex_cond_lock_adjust (pthread_mutex_t *mutex)
 {
-  assert ((mutex->__data.__kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0);
-  assert ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0);
-  assert ((mutex->__data.__kind & PTHREAD_MUTEX_PSHARED_BIT) == 0);
+  /* See concurrency notes regarding __kind in struct __pthread_mutex_s
+     in sysdeps/nptl/bits/thread-shared-types.h.  */
+  int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
+  assert ((mutex_kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0);
+  assert ((mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0);
+  assert ((mutex_kind & PTHREAD_MUTEX_PSHARED_BIT) == 0);
 
   /* Record the ownership.  */
   pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
   mutex->__data.__owner = id;
 
-  if (mutex->__data.__kind == PTHREAD_MUTEX_PI_RECURSIVE_NP)
+  if (mutex_kind == PTHREAD_MUTEX_PI_RECURSIVE_NP)
     ++mutex->__data.__count;
 }
 #endif
Index: glibc-2.26/nptl/pthread_mutex_setprioceiling.c
===================================================================
--- glibc-2.26.orig/nptl/pthread_mutex_setprioceiling.c
+++ glibc-2.26/nptl/pthread_mutex_setprioceiling.c
@@ -27,9 +27,10 @@ int
 pthread_mutex_setprioceiling (pthread_mutex_t *mutex, int prioceiling,
 			      int *old_ceiling)
 {
-  /* The low bits of __kind aren't ever changed after pthread_mutex_init,
-     so we don't need a lock yet.  */
-  if ((mutex->__data.__kind & PTHREAD_MUTEX_PRIO_PROTECT_NP) == 0)
+  /* See concurrency notes regarding __kind in struct __pthread_mutex_s
+     in sysdeps/nptl/bits/thread-shared-types.h.  */
+  if ((atomic_load_relaxed (&(mutex->__data.__kind))
+       & PTHREAD_MUTEX_PRIO_PROTECT_NP) == 0)
     return EINVAL;
 
   /* See __init_sched_fifo_prio.  */
Index: glibc-2.26/nptl/pthread_mutex_timedlock.c
===================================================================
--- glibc-2.26.orig/nptl/pthread_mutex_timedlock.c
+++ glibc-2.26/nptl/pthread_mutex_timedlock.c
@@ -53,6 +53,8 @@ __pthread_mutex_timedlock (pthread_mutex
   /* We must not check ABSTIME here.  If the thread does not block
      abstime must not be checked for a valid value.  */
 
+  /* See concurrency notes regarding mutex type which is loaded from __kind
+     in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h.  */
   switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex),
 			    PTHREAD_MUTEX_TIMED_NP))
     {
@@ -338,8 +340,14 @@ __pthread_mutex_timedlock (pthread_mutex
     case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
     case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
       {
-	int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
-	int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+	int kind, robust;
+	{
+	  /* See concurrency notes regarding __kind in struct __pthread_mutex_s
+	     in sysdeps/nptl/bits/thread-shared-types.h.  */
+	  int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
+	  kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP;
+	  robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+	}
 
 	if (robust)
 	  {
@@ -509,7 +517,10 @@ __pthread_mutex_timedlock (pthread_mutex
     case PTHREAD_MUTEX_PP_NORMAL_NP:
     case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
       {
-	int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
+	/* See concurrency notes regarding __kind in struct __pthread_mutex_s
+	   in sysdeps/nptl/bits/thread-shared-types.h.  */
+	int kind = atomic_load_relaxed (&(mutex->__data.__kind))
+	  & PTHREAD_MUTEX_KIND_MASK_NP;
 
 	oldval = mutex->__data.__lock;
 
Index: glibc-2.26/nptl/pthread_mutex_trylock.c
===================================================================
--- glibc-2.26.orig/nptl/pthread_mutex_trylock.c
+++ glibc-2.26/nptl/pthread_mutex_trylock.c
@@ -36,6 +36,8 @@ __pthread_mutex_trylock (pthread_mutex_t
   int oldval;
   pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
 
+  /* See concurrency notes regarding mutex type which is loaded from __kind
+     in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h.  */
   switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex),
 			    PTHREAD_MUTEX_TIMED_NP))
     {
@@ -221,8 +223,14 @@ __pthread_mutex_trylock (pthread_mutex_t
     case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
     case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
       {
-	int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
-	int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+	int kind, robust;
+	{
+	  /* See concurrency notes regarding __kind in struct __pthread_mutex_s
+	     in sysdeps/nptl/bits/thread-shared-types.h.  */
+	  int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
+	  kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP;
+	  robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+	}
 
 	if (robust)
 	  {
@@ -374,7 +382,10 @@ __pthread_mutex_trylock (pthread_mutex_t
     case PTHREAD_MUTEX_PP_NORMAL_NP:
     case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
       {
-	int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
+	/* See concurrency notes regarding __kind in struct __pthread_mutex_s
+	   in sysdeps/nptl/bits/thread-shared-types.h.  */
+	int kind = atomic_load_relaxed (&(mutex->__data.__kind))
+	  & PTHREAD_MUTEX_KIND_MASK_NP;
 
 	oldval = mutex->__data.__lock;
 
Index: glibc-2.26/nptl/pthread_mutex_unlock.c
===================================================================
--- glibc-2.26.orig/nptl/pthread_mutex_unlock.c
+++ glibc-2.26/nptl/pthread_mutex_unlock.c
@@ -36,6 +36,8 @@ int
 internal_function attribute_hidden
 __pthread_mutex_unlock_usercnt (pthread_mutex_t *mutex, int decr)
 {
+  /* See concurrency notes regarding mutex type which is loaded from __kind
+     in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h.  */
   int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
   if (__builtin_expect (type &
 		~(PTHREAD_MUTEX_KIND_MASK_NP|PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
@@ -224,13 +226,19 @@ __pthread_mutex_unlock_full (pthread_mut
       /* If the previous owner died and the caller did not succeed in
 	 making the state consistent, mark the mutex as unrecoverable
 	 and make all waiters.  */
-      if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0
+      /* See concurrency notes regarding __kind in struct __pthread_mutex_s
+	 in sysdeps/nptl/bits/thread-shared-types.h.  */
+      if ((atomic_load_relaxed (&(mutex->__data.__kind))
+	   & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0
 	  && __builtin_expect (mutex->__data.__owner
 			       == PTHREAD_MUTEX_INCONSISTENT, 0))
       pi_notrecoverable:
        newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
 
-      if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
+      /* See concurrency notes regarding __kind in struct __pthread_mutex_s
+	 in sysdeps/nptl/bits/thread-shared-types.h.  */
+      if ((atomic_load_relaxed (&(mutex->__data.__kind))
+	   & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
 	{
 	continue_pi_robust:
 	  /* Remove mutex from the list.
@@ -253,7 +261,10 @@ __pthread_mutex_unlock_full (pthread_mut
       /* Unlock.  Load all necessary mutex data before releasing the mutex
 	 to not violate the mutex destruction requirements (see
 	 lll_unlock).  */
-      int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+      /* See concurrency notes regarding __kind in struct __pthread_mutex_s
+	 in sysdeps/nptl/bits/thread-shared-types.h.  */
+      int robust = atomic_load_relaxed (&(mutex->__data.__kind))
+	& PTHREAD_MUTEX_ROBUST_NORMAL_NP;
       private = (robust
 		 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
 		 : PTHREAD_MUTEX_PSHARED (mutex));
Index: glibc-2.26/sysdeps/nptl/bits/thread-shared-types.h
===================================================================
--- glibc-2.26.orig/sysdeps/nptl/bits/thread-shared-types.h
+++ glibc-2.26/sysdeps/nptl/bits/thread-shared-types.h
@@ -105,7 +105,27 @@ struct __pthread_mutex_s
   unsigned int __nusers;
 #endif
   /* KIND must stay at this position in the structure to maintain
-     binary compatibility with static initializers.  */
+     binary compatibility with static initializers.
+
+     Concurrency notes:
+     The __kind of a mutex is initialized either by the static
+     PTHREAD_MUTEX_INITIALIZER or by a call to pthread_mutex_init.
+
+     After a mutex has been initialized, the __kind of a mutex is usually not
+     changed.  BUT it can be set to -1 in pthread_mutex_destroy or elision can
+     be enabled.  This is done concurrently in the pthread_mutex_*lock functions
+     by using the macro FORCE_ELISION. This macro is only defined for
+     architectures which supports lock elision.
+
+     For elision, there are the flags PTHREAD_MUTEX_ELISION_NP and
+     PTHREAD_MUTEX_NO_ELISION_NP which can be set in addition to the already set
+     type of a mutex.
+     Before a mutex is initialized, only PTHREAD_MUTEX_NO_ELISION_NP can be set
+     with pthread_mutexattr_settype.
+     After a mutex has been initialized, the functions pthread_mutex_*lock can
+     enable elision - if the mutex-type and the machine supports it - by setting
+     the flag PTHREAD_MUTEX_ELISION_NP. This is done concurrently. Afterwards
+     the lock / unlock functions are using specific elision code-paths.  */
   int __kind;
   __PTHREAD_COMPAT_PADDING_MID
 #if __WORDSIZE == 64
Index: glibc-2.26/sysdeps/unix/sysv/linux/powerpc/force-elision.h
===================================================================
--- glibc-2.26.orig/sysdeps/unix/sysv/linux/powerpc/force-elision.h
+++ glibc-2.26/sysdeps/unix/sysv/linux/powerpc/force-elision.h
@@ -19,10 +19,46 @@
 #ifdef ENABLE_LOCK_ELISION
 /* Automatically enable elision for existing user lock kinds.  */
 #define FORCE_ELISION(m, s)						\
-  if (__pthread_force_elision						\
-      && (m->__data.__kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0)	\
+  if (__pthread_force_elision)						\
     {									\
-      mutex->__data.__kind |= PTHREAD_MUTEX_ELISION_NP;			\
-      s;								\
+      /* See concurrency notes regarding __kind in			\
+	 struct __pthread_mutex_s in					\
+	 sysdeps/nptl/bits/thread-shared-types.h.			\
+									\
+	 There are the following cases for the kind of a mutex		\
+	 (The mask PTHREAD_MUTEX_ELISION_FLAGS_NP covers the flags	\
+	 PTHREAD_MUTEX_ELISION_NP and PTHREAD_MUTEX_NO_ELISION_NP where	\
+	 only one of both flags can be set):				\
+	 - both flags are not set:					\
+	 This is the first lock operation for this mutex.  Enable	\
+	 elision as it is not enabled so far.				\
+	 Note: It can happen that multiple threads are calling e.g.	\
+	 pthread_mutex_lock at the same time as the first lock		\
+	 operation for this mutex.  Then elision is enabled for this	\
+	 mutex by multiple threads.  Storing with relaxed MO is enough	\
+	 as all threads will store the same new value for the kind of	\
+	 the mutex.  But we have to ensure that we always use the	\
+	 elision path regardless if this thread has enabled elision or	\
+	 another one.							\
+									\
+	 - PTHREAD_MUTEX_ELISION_NP flag is set:			\
+	 Elision was already enabled for this mutex by a previous lock	\
+	 operation.  See case above.  Just use the elision path.	\
+									\
+	 - PTHREAD_MUTEX_NO_ELISION_NP flag is set:			\
+	 Elision was explicitly disabled by pthread_mutexattr_settype.	\
+	 Do not use the elision path.					\
+	 Note: The flag PTHREAD_MUTEX_NO_ELISION_NP will never be	\
+	 changed after mutex initialization.  */			\
+      int mutex_kind = atomic_load_relaxed (&((m)->__data.__kind));	\
+      if ((mutex_kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0)		\
+	{								\
+	  mutex_kind |= PTHREAD_MUTEX_ELISION_NP;			\
+	  atomic_store_relaxed (&((m)->__data.__kind), mutex_kind);	\
+	}								\
+      if ((mutex_kind & PTHREAD_MUTEX_ELISION_NP) != 0)			\
+	{								\
+	  s;								\
+	}								\
     }
 #endif
Index: glibc-2.26/sysdeps/unix/sysv/linux/s390/force-elision.h
===================================================================
--- glibc-2.26.orig/sysdeps/unix/sysv/linux/s390/force-elision.h
+++ glibc-2.26/sysdeps/unix/sysv/linux/s390/force-elision.h
@@ -19,10 +19,46 @@
 #ifdef ENABLE_LOCK_ELISION
 /* Automatically enable elision for existing user lock kinds.  */
 #define FORCE_ELISION(m, s)						\
-  if (__pthread_force_elision						\
-      && (m->__data.__kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0)	\
+  if (__pthread_force_elision)						\
     {									\
-      mutex->__data.__kind |= PTHREAD_MUTEX_ELISION_NP;			\
-      s;								\
+      /* See concurrency notes regarding __kind in			\
+	 struct __pthread_mutex_s in					\
+	 sysdeps/nptl/bits/thread-shared-types.h.			\
+									\
+	 There are the following cases for the kind of a mutex		\
+	 (The mask PTHREAD_MUTEX_ELISION_FLAGS_NP covers the flags	\
+	 PTHREAD_MUTEX_ELISION_NP and PTHREAD_MUTEX_NO_ELISION_NP where	\
+	 only one of both flags can be set):				\
+	 - both flags are not set:					\
+	 This is the first lock operation for this mutex.  Enable	\
+	 elision as it is not enabled so far.				\
+	 Note: It can happen that multiple threads are calling e.g.	\
+	 pthread_mutex_lock at the same time as the first lock		\
+	 operation for this mutex.  Then elision is enabled for this	\
+	 mutex by multiple threads.  Storing with relaxed MO is enough	\
+	 as all threads will store the same new value for the kind of	\
+	 the mutex.  But we have to ensure that we always use the	\
+	 elision path regardless if this thread has enabled elision or	\
+	 another one.							\
+									\
+	 - PTHREAD_MUTEX_ELISION_NP flag is set:			\
+	 Elision was already enabled for this mutex by a previous lock	\
+	 operation.  See case above.  Just use the elision path.	\
+									\
+	 - PTHREAD_MUTEX_NO_ELISION_NP flag is set:			\
+	 Elision was explicitly disabled by pthread_mutexattr_settype.	\
+	 Do not use the elision path.					\
+	 Note: The flag PTHREAD_MUTEX_NO_ELISION_NP will never be	\
+	 changed after mutex initialization.  */			\
+      int mutex_kind = atomic_load_relaxed (&((m)->__data.__kind));	\
+      if ((mutex_kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0)		\
+	{								\
+	  mutex_kind |= PTHREAD_MUTEX_ELISION_NP;			\
+	  atomic_store_relaxed (&((m)->__data.__kind), mutex_kind);	\
+	}								\
+      if ((mutex_kind & PTHREAD_MUTEX_ELISION_NP) != 0)			\
+	{								\
+	  s;								\
+	}								\
     }
 #endif
Index: glibc-2.26/sysdeps/unix/sysv/linux/x86/force-elision.h
===================================================================
--- glibc-2.26.orig/sysdeps/unix/sysv/linux/x86/force-elision.h
+++ glibc-2.26/sysdeps/unix/sysv/linux/x86/force-elision.h
@@ -18,9 +18,45 @@
 
 /* Automatically enable elision for existing user lock kinds.  */
 #define FORCE_ELISION(m, s)						\
-  if (__pthread_force_elision						\
-      && (m->__data.__kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0)	\
+  if (__pthread_force_elision)						\
     {									\
-      mutex->__data.__kind |= PTHREAD_MUTEX_ELISION_NP;			\
-      s;								\
+      /* See concurrency notes regarding __kind in			\
+	 struct __pthread_mutex_s in					\
+	 sysdeps/nptl/bits/thread-shared-types.h.			\
+									\
+	 There are the following cases for the kind of a mutex		\
+	 (The mask PTHREAD_MUTEX_ELISION_FLAGS_NP covers the flags	\
+	 PTHREAD_MUTEX_ELISION_NP and PTHREAD_MUTEX_NO_ELISION_NP where	\
+	 only one of both flags can be set):				\
+	 - both flags are not set:					\
+	 This is the first lock operation for this mutex.  Enable	\
+	 elision as it is not enabled so far.				\
+	 Note: It can happen that multiple threads are calling e.g.	\
+	 pthread_mutex_lock at the same time as the first lock		\
+	 operation for this mutex.  Then elision is enabled for this	\
+	 mutex by multiple threads.  Storing with relaxed MO is enough	\
+	 as all threads will store the same new value for the kind of	\
+	 the mutex.  But we have to ensure that we always use the	\
+	 elision path regardless if this thread has enabled elision or	\
+	 another one.							\
+									\
+	 - PTHREAD_MUTEX_ELISION_NP flag is set:			\
+	 Elision was already enabled for this mutex by a previous lock	\
+	 operation.  See case above.  Just use the elision path.	\
+									\
+	 - PTHREAD_MUTEX_NO_ELISION_NP flag is set:			\
+	 Elision was explicitly disabled by pthread_mutexattr_settype.	\
+	 Do not use the elision path.					\
+	 Note: The flag PTHREAD_MUTEX_NO_ELISION_NP will never be	\
+	 changed after mutex initialization.  */			\
+      int mutex_kind = atomic_load_relaxed (&((m)->__data.__kind));	\
+      if ((mutex_kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0)		\
+	{								\
+	  mutex_kind |= PTHREAD_MUTEX_ELISION_NP;			\
+	  atomic_store_relaxed (&((m)->__data.__kind), mutex_kind);	\
+	}								\
+      if ((mutex_kind & PTHREAD_MUTEX_ELISION_NP) != 0)			\
+	{								\
+	  s;								\
+	}								\
     }
openSUSE Build Service is sponsored by