mirror of
https://github.com/void-linux/void-packages.git
synced 2025-04-22 17:17:03 +02:00
musl: apply bug-fix patches
- patches from https://www.openwall.com/lists/musl/2020/05/22/10 - issue explained in https://www.openwall.com/lists/musl/2020/05/22/3
This commit is contained in:
parent
e09ca70969
commit
fde8c166a7
5 changed files with 267 additions and 1 deletions
|
@ -0,0 +1,56 @@
|
|||
>From 4d5aa20a94a2d3fae3e69289dc23ecafbd0c16c4 Mon Sep 17 00:00:00 2001
|
||||
From: Rich Felker <dalias@aerifal.cx>
|
||||
Date: Fri, 22 May 2020 17:35:14 -0400
|
||||
Subject: [PATCH 1/4] reorder thread list unlink in pthread_exit after all
|
||||
locks
|
||||
|
||||
since the backend for LOCK() skips locking if single-threaded, it's
|
||||
unsafe to make the process appear single-threaded before the last use
|
||||
of lock.
|
||||
|
||||
this fixes potential unsynchronized access to a linked list via
|
||||
__dl_thread_cleanup.
|
||||
---
|
||||
src/thread/pthread_create.c | 19 +++++++++++--------
|
||||
1 file changed, 11 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/src/thread/pthread_create.c b/src/thread/pthread_create.c
|
||||
index 5f491092..6a3b0c21 100644
|
||||
--- src/thread/pthread_create.c
|
||||
+++ src/thread/pthread_create.c
|
||||
@@ -90,14 +90,7 @@ _Noreturn void __pthread_exit(void *result)
|
||||
exit(0);
|
||||
}
|
||||
|
||||
- /* At this point we are committed to thread termination. Unlink
|
||||
- * the thread from the list. This change will not be visible
|
||||
- * until the lock is released, which only happens after SYS_exit
|
||||
- * has been called, via the exit futex address pointing at the lock. */
|
||||
- libc.threads_minus_1--;
|
||||
- self->next->prev = self->prev;
|
||||
- self->prev->next = self->next;
|
||||
- self->prev = self->next = self;
|
||||
+ /* At this point we are committed to thread termination. */
|
||||
|
||||
/* Process robust list in userspace to handle non-pshared mutexes
|
||||
* and the detached thread case where the robust list head will
|
||||
@@ -121,6 +114,16 @@ _Noreturn void __pthread_exit(void *result)
|
||||
__do_orphaned_stdio_locks();
|
||||
__dl_thread_cleanup();
|
||||
|
||||
+ /* Last, unlink thread from the list. This change will not be visible
|
||||
+ * until the lock is released, which only happens after SYS_exit
|
||||
+ * has been called, via the exit futex address pointing at the lock.
|
||||
+ * This needs to happen after any possible calls to LOCK() that might
|
||||
+ * skip locking if libc.threads_minus_1 is zero. */
|
||||
+ libc.threads_minus_1--;
|
||||
+ self->next->prev = self->prev;
|
||||
+ self->prev->next = self->next;
|
||||
+ self->prev = self->next = self;
|
||||
+
|
||||
/* This atomic potentially competes with a concurrent pthread_detach
|
||||
* call; the loser is responsible for freeing thread resources. */
|
||||
int state = a_cas(&self->detach_state, DT_JOINABLE, DT_EXITING);
|
||||
--
|
||||
2.21.0
|
||||
|
|
@ -0,0 +1,78 @@
|
|||
>From e01b5939b38aea5ecbe41670643199825874b26c Mon Sep 17 00:00:00 2001
|
||||
From: Rich Felker <dalias@aerifal.cx>
|
||||
Date: Thu, 21 May 2020 23:32:45 -0400
|
||||
Subject: [PATCH 2/4] don't use libc.threads_minus_1 as relaxed atomic for
|
||||
skipping locks
|
||||
|
||||
after all but the last thread exits, the next thread to observe
|
||||
libc.threads_minus_1==0 and conclude that it can skip locking fails to
|
||||
synchronize with any changes to memory that were made by the
|
||||
last-exiting thread. this can produce data races.
|
||||
|
||||
on some archs, at least x86, memory synchronization is unlikely to be
|
||||
a problem; however, with the inline locks in malloc, skipping the lock
|
||||
also eliminated the compiler barrier, and caused code that needed to
|
||||
re-check chunk in-use bits after obtaining the lock to reuse a stale
|
||||
value, possibly from before the process became single-threaded. this
|
||||
in turn produced corruption of the heap state.
|
||||
|
||||
some uses of libc.threads_minus_1 remain, especially for allocation of
|
||||
new TLS in the dynamic linker; otherwise, it could be removed
|
||||
entirely. it's made non-volatile to reflect that the remaining
|
||||
accesses are only made under lock on the thread list.
|
||||
|
||||
instead of libc.threads_minus_1, libc.threaded is now used for
|
||||
skipping locks. the difference is that libc.threaded is permanently
|
||||
true once an additional thread has been created. this will produce
|
||||
some performance regression in processes that are mostly
|
||||
single-threaded but occasionally creating threads. in the future it
|
||||
may be possible to bring back the full lock-skipping, but more care
|
||||
needs to be taken to produce a safe design.
|
||||
---
|
||||
src/internal/libc.h | 2 +-
|
||||
src/malloc/malloc.c | 2 +-
|
||||
src/thread/__lock.c | 2 +-
|
||||
3 files changed, 3 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/src/internal/libc.h b/src/internal/libc.h
|
||||
index ac97dc7e..c0614852 100644
|
||||
--- src/internal/libc.h
|
||||
+++ src/internal/libc.h
|
||||
@@ -21,7 +21,7 @@ struct __libc {
|
||||
int can_do_threads;
|
||||
int threaded;
|
||||
int secure;
|
||||
- volatile int threads_minus_1;
|
||||
+ int threads_minus_1;
|
||||
size_t *auxv;
|
||||
struct tls_module *tls_head;
|
||||
size_t tls_size, tls_align, tls_cnt;
|
||||
diff --git a/src/malloc/malloc.c b/src/malloc/malloc.c
|
||||
index 96982596..2553a62e 100644
|
||||
--- src/malloc/malloc.c
|
||||
+++ src/malloc/malloc.c
|
||||
@@ -26,7 +26,7 @@ int __malloc_replaced;
|
||||
|
||||
static inline void lock(volatile int *lk)
|
||||
{
|
||||
- if (libc.threads_minus_1)
|
||||
+ if (libc.threaded)
|
||||
while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
|
||||
}
|
||||
|
||||
diff --git a/src/thread/__lock.c b/src/thread/__lock.c
|
||||
index 45557c88..5b9b144e 100644
|
||||
--- src/thread/__lock.c
|
||||
+++ src/thread/__lock.c
|
||||
@@ -18,7 +18,7 @@
|
||||
|
||||
void __lock(volatile int *l)
|
||||
{
|
||||
- if (!libc.threads_minus_1) return;
|
||||
+ if (!libc.threaded) return;
|
||||
/* fast path: INT_MIN for the lock, +1 for the congestion */
|
||||
int current = a_cas(l, 0, INT_MIN + 1);
|
||||
if (!current) return;
|
||||
--
|
||||
2.21.0
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
>From f12888e9eb9eed60cc266b899dcafecb4752964a Mon Sep 17 00:00:00 2001
|
||||
From: Rich Felker <dalias@aerifal.cx>
|
||||
Date: Fri, 22 May 2020 17:25:38 -0400
|
||||
Subject: [PATCH 3/4] cut down size of some libc struct members
|
||||
|
||||
these are all flags that can be single-byte values.
|
||||
---
|
||||
src/internal/libc.h | 6 +++---
|
||||
1 file changed, 3 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/src/internal/libc.h b/src/internal/libc.h
|
||||
index c0614852..d47f58e0 100644
|
||||
--- src/internal/libc.h
|
||||
+++ src/internal/libc.h
|
||||
@@ -18,9 +18,9 @@ struct tls_module {
|
||||
};
|
||||
|
||||
struct __libc {
|
||||
- int can_do_threads;
|
||||
- int threaded;
|
||||
- int secure;
|
||||
+ char can_do_threads;
|
||||
+ char threaded;
|
||||
+ char secure;
|
||||
int threads_minus_1;
|
||||
size_t *auxv;
|
||||
struct tls_module *tls_head;
|
||||
--
|
||||
2.21.0
|
||||
|
|
@ -0,0 +1,102 @@
|
|||
>From 8d81ba8c0bc6fe31136cb15c9c82ef4c24965040 Mon Sep 17 00:00:00 2001
|
||||
From: Rich Felker <dalias@aerifal.cx>
|
||||
Date: Fri, 22 May 2020 17:45:47 -0400
|
||||
Subject: [PATCH 4/4] restore lock-skipping for processes that return to
|
||||
single-threaded state
|
||||
|
||||
the design used here relies on the barrier provided by the first lock
|
||||
operation after the process returns to single-threaded state to
|
||||
synchronize with actions by the last thread that exited. by storing
|
||||
the intent to change modes in the same object used to detect whether
|
||||
locking is needed, it's possible to avoid an extra (possibly costly)
|
||||
memory load after the lock is taken.
|
||||
---
|
||||
src/internal/libc.h | 1 +
|
||||
src/malloc/malloc.c | 5 ++++-
|
||||
src/thread/__lock.c | 4 +++-
|
||||
src/thread/pthread_create.c | 8 ++++----
|
||||
4 files changed, 12 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/src/internal/libc.h b/src/internal/libc.h
|
||||
index d47f58e0..619bba86 100644
|
||||
--- src/internal/libc.h
|
||||
+++ src/internal/libc.h
|
||||
@@ -21,6 +21,7 @@ struct __libc {
|
||||
char can_do_threads;
|
||||
char threaded;
|
||||
char secure;
|
||||
+ volatile signed char need_locks;
|
||||
int threads_minus_1;
|
||||
size_t *auxv;
|
||||
struct tls_module *tls_head;
|
||||
diff --git a/src/malloc/malloc.c b/src/malloc/malloc.c
|
||||
index 2553a62e..a803d4c9 100644
|
||||
--- src/malloc/malloc.c
|
||||
+++ src/malloc/malloc.c
|
||||
@@ -26,8 +26,11 @@ int __malloc_replaced;
|
||||
|
||||
static inline void lock(volatile int *lk)
|
||||
{
|
||||
- if (libc.threaded)
|
||||
+ int need_locks = libc.need_locks;
|
||||
+ if (need_locks) {
|
||||
while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
|
||||
+ if (need_locks < 0) libc.need_locks = 0;
|
||||
+ }
|
||||
}
|
||||
|
||||
static inline void unlock(volatile int *lk)
|
||||
diff --git a/src/thread/__lock.c b/src/thread/__lock.c
|
||||
index 5b9b144e..60eece49 100644
|
||||
--- src/thread/__lock.c
|
||||
+++ src/thread/__lock.c
|
||||
@@ -18,9 +18,11 @@
|
||||
|
||||
void __lock(volatile int *l)
|
||||
{
|
||||
- if (!libc.threaded) return;
|
||||
+ int need_locks = libc.need_locks;
|
||||
+ if (!need_locks) return;
|
||||
/* fast path: INT_MIN for the lock, +1 for the congestion */
|
||||
int current = a_cas(l, 0, INT_MIN + 1);
|
||||
+ if (need_locks < 0) libc.need_locks = 0;
|
||||
if (!current) return;
|
||||
/* A first spin loop, for medium congestion. */
|
||||
for (unsigned i = 0; i < 10; ++i) {
|
||||
diff --git a/src/thread/pthread_create.c b/src/thread/pthread_create.c
|
||||
index 6a3b0c21..6bdfb44f 100644
|
||||
--- src/thread/pthread_create.c
|
||||
+++ src/thread/pthread_create.c
|
||||
@@ -118,8 +118,8 @@ _Noreturn void __pthread_exit(void *result)
|
||||
* until the lock is released, which only happens after SYS_exit
|
||||
* has been called, via the exit futex address pointing at the lock.
|
||||
* This needs to happen after any possible calls to LOCK() that might
|
||||
- * skip locking if libc.threads_minus_1 is zero. */
|
||||
- libc.threads_minus_1--;
|
||||
+ * skip locking if process appears single-threaded. */
|
||||
+ if (!--libc.threads_minus_1) libc.need_locks = -1;
|
||||
self->next->prev = self->prev;
|
||||
self->prev->next = self->next;
|
||||
self->prev = self->next = self;
|
||||
@@ -339,7 +339,7 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att
|
||||
~(1UL<<((SIGCANCEL-1)%(8*sizeof(long))));
|
||||
|
||||
__tl_lock();
|
||||
- libc.threads_minus_1++;
|
||||
+ if (!libc.threads_minus_1++) libc.need_locks = 1;
|
||||
ret = __clone((c11 ? start_c11 : start), stack, flags, args, &new->tid, TP_ADJ(new), &__thread_list_lock);
|
||||
|
||||
/* All clone failures translate to EAGAIN. If explicit scheduling
|
||||
@@ -363,7 +363,7 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att
|
||||
new->next->prev = new;
|
||||
new->prev->next = new;
|
||||
} else {
|
||||
- libc.threads_minus_1--;
|
||||
+ if (!--libc.threads_minus_1) libc.need_locks = 0;
|
||||
}
|
||||
__tl_unlock();
|
||||
__restore_sigs(&set);
|
||||
--
|
||||
2.21.0
|
||||
|
||||
|
|
@ -2,7 +2,7 @@
|
|||
pkgname=musl
|
||||
reverts="1.2.0_1"
|
||||
version=1.1.24
|
||||
revision=2
|
||||
revision=3
|
||||
archs="*-musl"
|
||||
bootstrap=yes
|
||||
build_style=gnu-configure
|
||||
|
|
Loading…
Add table
Reference in a new issue