File malloc-Fix-list_lock-arena-lock-deadlock-BZ-19182.patch of Package glibc.4899

2015-12-21  Florian Weimer  <fweimer@redhat.com>

	[BZ #19182]
	* malloc/arena.c (list_lock): Document lock ordering requirements.
	(free_list_lock): New lock.
	(ptmalloc_lock_all): Comment on free_list_lock.
	(ptmalloc_unlock_all2): Reinitialize free_list_lock.
	(detach_arena): Update comment.  free_list_lock is now needed.
	(_int_new_arena): Use free_list_lock around detach_arena call.
	Acquire arena lock after list_lock.  Add comment, including FIXME
	about incorrect synchronization.
	(get_free_list): Switch to free_list_lock.
	(reused_arena): Acquire free_list_lock around detach_arena call
	and attached threads counter update.  Add two FIXMEs about
	incorrect synchronization.
	(arena_thread_freeres): Switch to free_list_lock.
	* malloc/malloc.c (struct malloc_state): Update comments to
	mention free_list_lock.

Index: glibc-2.19/malloc/arena.c
===================================================================
--- glibc-2.19.orig/malloc/arena.c
+++ glibc-2.19/malloc/arena.c
@@ -75,10 +75,23 @@ extern int sanity_check_heap_info_alignm
 /* Thread specific data */
 
 static tsd_key_t arena_key;
-static mutex_t list_lock = MUTEX_INITIALIZER;
+static mutex_t free_list_lock = MUTEX_INITIALIZER;
 static size_t narenas = 1;
 static mstate free_list;
 
+/* list_lock prevents concurrent writes to the next member of struct
+   malloc_state objects.
+
+   Read access to the next member is supposed to synchronize with the
+   atomic_write_barrier and the write to the next member in
+   _int_new_arena.  This suffers from data races; see the FIXME
+   comments in _int_new_arena and reused_arena.
+
+   list_lock also prevents concurrent forks.  When list_lock is
+   acquired, no arena lock must be acquired, but it is permitted to
+   acquire arena locks after list_lock.  */
+static mutex_t list_lock = MUTEX_INITIALIZER;
+
 #if THREAD_STATS
 static int stat_n_heaps;
 # define THREAD_STAT(x) x
@@ -225,6 +238,9 @@ ptmalloc_lock_all (void)
   if (__malloc_initialized < 1)
     return;
 
+  /* We do not acquire free_list_lock here because we completely
+     reconstruct free_list in ptmalloc_unlock_all2.  */
+
   if (mutex_trylock (&list_lock))
     {
       void *my_arena;
@@ -300,6 +316,7 @@ ptmalloc_unlock_all2 (void)
 
   /* Push all arenas to the free list, except save_arena, which is
      attached to the current thread.  */
+  mutex_init (&free_list_lock);
   if (save_arena != NULL)
     ((mstate) save_arena)->attached_threads = 1;
   free_list = NULL;
@@ -317,6 +334,7 @@ ptmalloc_unlock_all2 (void)
       if (ar_ptr == &main_arena)
         break;
     }
+
   mutex_init (&list_lock);
   atfork_recursive_cntr = 0;
 }
@@ -751,7 +769,7 @@ heap_trim (heap_info *heap, size_t pad)
 /* Create a new arena with initial size "size".  */
 
 /* If REPLACED_ARENA is not NULL, detach it from this thread.  Must be
-   called while list_lock is held.  */
+   called while free_list_lock is held.  */
 static void
 detach_arena (mstate replaced_arena)
 {
@@ -805,19 +823,34 @@ _int_new_arena (size_t size)
   arena_lookup (replaced_arena);
   tsd_setspecific (arena_key, (void *) a);
   mutex_init (&a->mutex);
-  (void) mutex_lock (&a->mutex);
 
   (void) mutex_lock (&list_lock);
 
-  detach_arena (replaced_arena);
-
   /* Add the new arena to the global list.  */
   a->next = main_arena.next;
+  /* FIXME: The barrier is an attempt to synchronize with read access
+     in reused_arena, which does not acquire list_lock while
+     traversing the list.  */
   atomic_write_barrier ();
   main_arena.next = a;
 
   (void) mutex_unlock (&list_lock);
 
+  (void) mutex_lock (&free_list_lock);
+  detach_arena (replaced_arena);
+  (void) mutex_unlock (&free_list_lock);
+
+  /* Lock this arena.  NB: Another thread may have been attached to
+     this arena because the arena is now accessible from the
+     main_arena.next list and could have been picked by reused_arena.
+     This can only happen for the last arena created (before the arena
+     limit is reached).  At this point, some arena has to be attached
+     to two threads.  We could acquire the arena lock before list_lock
+     to make it less likely that reused_arena picks this new arena,
+     but this could result in a deadlock with ptmalloc_lock_all.  */
+
+  (void) mutex_lock (&a->mutex);
+
   THREAD_STAT (++(a->stat_lock_loop));
 
   return a;
@@ -834,7 +867,7 @@ get_free_list (void)
   mstate result = free_list;
   if (result != NULL)
     {
-      (void) mutex_lock (&list_lock);
+      (void) mutex_lock (&free_list_lock);
       result = free_list;
       if (result != NULL)
 	{
@@ -845,7 +878,7 @@ get_free_list (void)
 
 	  detach_arena (replaced_arena);
 	}
-      (void) mutex_unlock (&list_lock);
+      (void) mutex_unlock (&free_list_lock);
 
       if (result != NULL)
         {
@@ -866,6 +899,7 @@ static mstate
 reused_arena (mstate avoid_arena)
 {
   mstate result;
+  /* FIXME: Access to next_to_use suffers from data races.  */
   static mstate next_to_use;
   if (next_to_use == NULL)
     next_to_use = &main_arena;
@@ -878,6 +912,7 @@ reused_arena (mstate avoid_arena)
       if (!arena_is_corrupt (result) && !mutex_trylock (&result->mutex))
         goto out;
 
+      /* FIXME: This is a data race, see _int_new_arena.  */
       result = result->next;
     }
   while (result != next_to_use);
@@ -909,12 +944,13 @@ out:
   /* Attach the arena to the current thread.  Note that we may have
      selected an arena which was on free_list.  */
   {
+    /* Update the arena thread attachment counters.   */
     mstate replaced_arena;
     arena_lookup (replaced_arena);
-    (void) mutex_lock (&list_lock);
+    (void) mutex_lock (&free_list_lock);
     detach_arena (replaced_arena);
     ++result->attached_threads;
-    (void) mutex_unlock (&list_lock);
+    (void) mutex_unlock (&free_list_lock);
   }
 
   LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
@@ -1010,7 +1046,7 @@ arena_thread_freeres (void)
 
   if (a != NULL)
     {
-      (void) mutex_lock (&list_lock);
+      (void) mutex_lock (&free_list_lock);
       /* If this was the last attached thread for this arena, put the
 	 arena on the free list.  */
       assert (a->attached_threads > 0);
@@ -1019,7 +1055,7 @@ arena_thread_freeres (void)
 	  a->next_free = free_list;
 	  free_list = a;
 	}
-      (void) mutex_unlock (&list_lock);
+      (void) mutex_unlock (&free_list_lock);
     }
 }
 text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
Index: glibc-2.19/malloc/malloc.c
===================================================================
--- glibc-2.19.orig/malloc/malloc.c
+++ glibc-2.19/malloc/malloc.c
@@ -1707,12 +1707,12 @@ struct malloc_state
   struct malloc_state *next;
 
   /* Linked list for free arenas.  Access to this field is serialized
-     by list_lock in arena.c.  */
+     by free_list_lock in arena.c.  */
   struct malloc_state *next_free;
 
   /* Number of threads attached to this arena.  0 if the arena is on
-     the free list.  Access to this field is serialized by list_lock
-     in arena.c.  */
+     the free list.  Access to this field is serialized by
+     free_list_lock in arena.c.  */
   INTERNAL_SIZE_T attached_threads;
 
   /* Memory allocated from the system in this arena.  */
openSUSE Build Service is sponsored by