gnu: sbcl-trivial-clipboard: Update to 20200904.
[jackhill/guix/guix.git] / gnu / packages / patches / glibc-2.29-git-updates.patch
CommitLineData
5f3f7039
MB
1This file tracks updates from the "release/2.29/master" branch:
2https://sourceware.org/git/?p=glibc.git;a=shortlog;h=refs/heads/release/2.29/master
3
4Abridged commits are appended to this file.
5
6From ec894251ef11723d10df04fcfd7bd2030c6e43ff Mon Sep 17 00:00:00 2001
7From: Carlos O'Donell <carlos@redhat.com>
8Date: Mon, 21 Jan 2019 22:50:12 -0500
9Subject: [PATCH] nptl: Fix pthread_rwlock_try*lock stalls (Bug 23844)
10diff --git a/nptl/pthread_rwlock_tryrdlock.c b/nptl/pthread_rwlock_tryrdlock.c
11index 368862ff07..2f94f17f36 100644
12--- a/nptl/pthread_rwlock_tryrdlock.c
13+++ b/nptl/pthread_rwlock_tryrdlock.c
14@@ -94,15 +94,22 @@ __pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
15 /* Same as in __pthread_rwlock_rdlock_full:
16 We started the read phase, so we are also responsible for
17 updating the write-phase futex. Relaxed MO is sufficient.
18- Note that there can be no other reader that we have to wake
19- because all other readers will see the read phase started by us
20- (or they will try to start it themselves); if a writer started
21- the read phase, we cannot have started it. Furthermore, we
22- cannot discard a PTHREAD_RWLOCK_FUTEX_USED flag because we will
23- overwrite the value set by the most recent writer (or the readers
24- before it in case of explicit hand-over) and we know that there
25- are no waiting readers. */
26- atomic_store_relaxed (&rwlock->__data.__wrphase_futex, 0);
27+ We have to do the same steps as a writer would when handing over the
28+ read phase to use because other readers cannot distinguish between
29+ us and the writer.
30+ Note that __pthread_rwlock_tryrdlock callers will not have to be
31+ woken up because they will either see the read phase started by us
32+ or they will try to start it themselves; however, callers of
33+ __pthread_rwlock_rdlock_full just increase the reader count and then
34+ check what state the lock is in, so they cannot distinguish between
35+ us and a writer that acquired and released the lock in the
36+ meantime. */
37+ if ((atomic_exchange_relaxed (&rwlock->__data.__wrphase_futex, 0)
38+ & PTHREAD_RWLOCK_FUTEX_USED) != 0)
39+ {
40+ int private = __pthread_rwlock_get_private (rwlock);
41+ futex_wake (&rwlock->__data.__wrphase_futex, INT_MAX, private);
42+ }
43 }
44
45 return 0;
46diff --git a/nptl/pthread_rwlock_trywrlock.c b/nptl/pthread_rwlock_trywrlock.c
47index fd37a71ce4..fae475cc70 100644
48--- a/nptl/pthread_rwlock_trywrlock.c
49+++ b/nptl/pthread_rwlock_trywrlock.c
50@@ -46,8 +46,15 @@ __pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
51 &rwlock->__data.__readers, &r,
52 r | PTHREAD_RWLOCK_WRPHASE | PTHREAD_RWLOCK_WRLOCKED))
53 {
54+ /* We have become the primary writer and we cannot have shared
55+ the PTHREAD_RWLOCK_FUTEX_USED flag with someone else, so we
56+ can simply enable blocking (see full wrlock code). */
57 atomic_store_relaxed (&rwlock->__data.__writers_futex, 1);
58- atomic_store_relaxed (&rwlock->__data.__wrphase_futex, 1);
59+ /* If we started a write phase, we need to enable readers to
60+ wait. If we did not, we must not change it because other threads
61+ may have set the PTHREAD_RWLOCK_FUTEX_USED in the meantime. */
62+ if ((r & PTHREAD_RWLOCK_WRPHASE) == 0)
63+ atomic_store_relaxed (&rwlock->__data.__wrphase_futex, 1);
64 atomic_store_relaxed (&rwlock->__data.__cur_writer,
65 THREAD_GETMEM (THREAD_SELF, tid));
66 return 0;
67diff --git a/support/Makefile b/support/Makefile
68index 432cf2fe6c..c15b93647c 100644
69--- a/support/Makefile
70+++ b/support/Makefile
71@@ -129,6 +129,7 @@ libsupport-routines = \
72 xpthread_mutexattr_settype \
73 xpthread_once \
74 xpthread_rwlock_init \
75+ xpthread_rwlock_destroy \
76 xpthread_rwlock_rdlock \
77 xpthread_rwlock_unlock \
78 xpthread_rwlock_wrlock \
79diff --git a/support/xpthread_rwlock_destroy.c b/support/xpthread_rwlock_destroy.c
80new file mode 100644
81index 0000000000..6d6e953569
82--- /dev/null
83+++ b/support/xpthread_rwlock_destroy.c
84@@ -0,0 +1,26 @@
85+/* pthread_rwlock_destroy with error checking.
86+ Copyright (C) 2019 Free Software Foundation, Inc.
87+ This file is part of the GNU C Library.
88+
89+ The GNU C Library is free software; you can redistribute it and/or
90+ modify it under the terms of the GNU Lesser General Public
91+ License as published by the Free Software Foundation; either
92+ version 2.1 of the License, or (at your option) any later version.
93+
94+ The GNU C Library is distributed in the hope that it will be useful,
95+ but WITHOUT ANY WARRANTY; without even the implied warranty of
96+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
97+ Lesser General Public License for more details.
98+
99+ You should have received a copy of the GNU Lesser General Public
100+ License along with the GNU C Library; if not, see
101+ <http://www.gnu.org/licenses/>. */
102+
103+#include <support/xthread.h>
104+
105+void
106+xpthread_rwlock_destroy (pthread_rwlock_t *rwlock)
107+{
108+ xpthread_check_return ("pthread_rwlock_destroy",
109+ pthread_rwlock_destroy (rwlock));
110+}
111diff --git a/support/xthread.h b/support/xthread.h
112index 47c23235f3..9fe1f68b3b 100644
113--- a/support/xthread.h
114+++ b/support/xthread.h
115@@ -84,6 +84,7 @@ void xpthread_rwlockattr_setkind_np (pthread_rwlockattr_t *attr, int pref);
116 void xpthread_rwlock_wrlock (pthread_rwlock_t *rwlock);
117 void xpthread_rwlock_rdlock (pthread_rwlock_t *rwlock);
118 void xpthread_rwlock_unlock (pthread_rwlock_t *rwlock);
119+void xpthread_rwlock_destroy (pthread_rwlock_t *rwlock);
120
121 __END_DECLS
122
123From 44113a8ba24af23d7bbb174f9087a6b83a76289a Mon Sep 17 00:00:00 2001
124From: Stefan Liebler <stli@linux.ibm.com>
125Date: Thu, 7 Feb 2019 15:18:36 +0100
126Subject: [PATCH] Add compiler barriers around modifications of the robust
127 mutex list for pthread_mutex_trylock. [BZ #24180]
128diff --git a/nptl/pthread_mutex_trylock.c b/nptl/pthread_mutex_trylock.c
129index 8fe43b8f0f..bf2869eca2 100644
130--- a/nptl/pthread_mutex_trylock.c
131+++ b/nptl/pthread_mutex_trylock.c
132@@ -94,6 +94,9 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
133 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
134 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
135 &mutex->__data.__list.__next);
136+ /* We need to set op_pending before starting the operation. Also
137+ see comments at ENQUEUE_MUTEX. */
138+ __asm ("" ::: "memory");
139
140 oldval = mutex->__data.__lock;
141 do
142@@ -119,7 +122,12 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
143 /* But it is inconsistent unless marked otherwise. */
144 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
145
146+ /* We must not enqueue the mutex before we have acquired it.
147+ Also see comments at ENQUEUE_MUTEX. */
148+ __asm ("" ::: "memory");
149 ENQUEUE_MUTEX (mutex);
150+ /* We need to clear op_pending after we enqueue the mutex. */
151+ __asm ("" ::: "memory");
152 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
153
154 /* Note that we deliberately exist here. If we fall
155@@ -135,6 +143,8 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
156 int kind = PTHREAD_MUTEX_TYPE (mutex);
157 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
158 {
159+ /* We do not need to ensure ordering wrt another memory
160+ access. Also see comments at ENQUEUE_MUTEX. */
161 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
162 NULL);
163 return EDEADLK;
164@@ -142,6 +152,8 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
165
166 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
167 {
168+ /* We do not need to ensure ordering wrt another memory
169+ access. */
170 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
171 NULL);
172
173@@ -160,6 +172,9 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
174 id, 0);
175 if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
176 {
177+ /* We haven't acquired the lock as it is already acquired by
178+ another owner. We do not need to ensure ordering wrt another
179+ memory access. */
180 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
181
182 return EBUSY;
183@@ -173,13 +188,20 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
184 if (oldval == id)
185 lll_unlock (mutex->__data.__lock,
186 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
187+ /* FIXME This violates the mutex destruction requirements. See
188+ __pthread_mutex_unlock_full. */
189 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
190 return ENOTRECOVERABLE;
191 }
192 }
193 while ((oldval & FUTEX_OWNER_DIED) != 0);
194
195+ /* We must not enqueue the mutex before we have acquired it.
196+ Also see comments at ENQUEUE_MUTEX. */
197+ __asm ("" ::: "memory");
198 ENQUEUE_MUTEX (mutex);
199+ /* We need to clear op_pending after we enqueue the mutex. */
200+ __asm ("" ::: "memory");
201 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
202
203 mutex->__data.__owner = id;
204@@ -211,10 +233,15 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
205 }
206
207 if (robust)
208- /* Note: robust PI futexes are signaled by setting bit 0. */
209- THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
210- (void *) (((uintptr_t) &mutex->__data.__list.__next)
211- | 1));
212+ {
213+ /* Note: robust PI futexes are signaled by setting bit 0. */
214+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
215+ (void *) (((uintptr_t) &mutex->__data.__list.__next)
216+ | 1));
217+ /* We need to set op_pending before starting the operation. Also
218+ see comments at ENQUEUE_MUTEX. */
219+ __asm ("" ::: "memory");
220+ }
221
222 oldval = mutex->__data.__lock;
223
224@@ -223,12 +250,16 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
225 {
226 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
227 {
228+ /* We do not need to ensure ordering wrt another memory
229+ access. */
230 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
231 return EDEADLK;
232 }
233
234 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
235 {
236+ /* We do not need to ensure ordering wrt another memory
237+ access. */
238 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
239
240 /* Just bump the counter. */
241@@ -250,6 +281,9 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
242 {
243 if ((oldval & FUTEX_OWNER_DIED) == 0)
244 {
245+ /* We haven't acquired the lock as it is already acquired by
246+ another owner. We do not need to ensure ordering wrt another
247+ memory access. */
248 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
249
250 return EBUSY;
251@@ -270,6 +304,9 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
252 if (INTERNAL_SYSCALL_ERROR_P (e, __err)
253 && INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK)
254 {
255+ /* The kernel has not yet finished the mutex owner death.
256+ We do not need to ensure ordering wrt another memory
257+ access. */
258 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
259
260 return EBUSY;
261@@ -287,7 +324,12 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
262 /* But it is inconsistent unless marked otherwise. */
263 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
264
265+ /* We must not enqueue the mutex before we have acquired it.
266+ Also see comments at ENQUEUE_MUTEX. */
267+ __asm ("" ::: "memory");
268 ENQUEUE_MUTEX (mutex);
269+ /* We need to clear op_pending after we enqueue the mutex. */
270+ __asm ("" ::: "memory");
271 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
272
273 /* Note that we deliberately exit here. If we fall
274@@ -310,13 +352,20 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
275 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
276 0, 0);
277
278+ /* To the kernel, this will be visible after the kernel has
279+ acquired the mutex in the syscall. */
280 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
281 return ENOTRECOVERABLE;
282 }
283
284 if (robust)
285 {
286+ /* We must not enqueue the mutex before we have acquired it.
287+ Also see comments at ENQUEUE_MUTEX. */
288+ __asm ("" ::: "memory");
289 ENQUEUE_MUTEX_PI (mutex);
290+ /* We need to clear op_pending after we enqueue the mutex. */
291+ __asm ("" ::: "memory");
292 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
293 }
294
295From c096b008d2671028c21ac8cf01f18a2083e73c44 Mon Sep 17 00:00:00 2001
296From: Florian Weimer <fweimer@redhat.com>
297Date: Fri, 8 Feb 2019 12:54:41 +0100
298Subject: [PATCH] nptl: Avoid fork handler lock for async-signal-safe fork [BZ
299 #24161]
300--- a/nptl/register-atfork.c
301+++ b/nptl/register-atfork.c
302@@ -107,13 +107,14 @@ __unregister_atfork (void *dso_handle)
303 }
304
305 void
306-__run_fork_handlers (enum __run_fork_handler_type who)
307+__run_fork_handlers (enum __run_fork_handler_type who, _Bool do_locking)
308 {
309 struct fork_handler *runp;
310
311 if (who == atfork_run_prepare)
312 {
313- lll_lock (atfork_lock, LLL_PRIVATE);
314+ if (do_locking)
315+ lll_lock (atfork_lock, LLL_PRIVATE);
316 size_t sl = fork_handler_list_size (&fork_handlers);
317 for (size_t i = sl; i > 0; i--)
318 {
319@@ -133,7 +134,8 @@ __run_fork_handlers (enum __run_fork_handler_type who)
320 else if (who == atfork_run_parent && runp->parent_handler)
321 runp->parent_handler ();
322 }
323- lll_unlock (atfork_lock, LLL_PRIVATE);
324+ if (do_locking)
325+ lll_unlock (atfork_lock, LLL_PRIVATE);
326 }
327 }
328
329diff --git a/sysdeps/nptl/fork.c b/sysdeps/nptl/fork.c
330index bd68f18b45..14b69a6f89 100644
331--- a/sysdeps/nptl/fork.c
332+++ b/sysdeps/nptl/fork.c
333@@ -55,7 +55,7 @@ __libc_fork (void)
334 but our current fork implementation is not. */
335 bool multiple_threads = THREAD_GETMEM (THREAD_SELF, header.multiple_threads);
336
337- __run_fork_handlers (atfork_run_prepare);
338+ __run_fork_handlers (atfork_run_prepare, multiple_threads);
339
340 /* If we are not running multiple threads, we do not have to
341 preserve lock state. If fork runs from a signal handler, only
342@@ -134,7 +134,7 @@ __libc_fork (void)
343 __rtld_lock_initialize (GL(dl_load_lock));
344
345 /* Run the handlers registered for the child. */
346- __run_fork_handlers (atfork_run_child);
347+ __run_fork_handlers (atfork_run_child, multiple_threads);
348 }
349 else
350 {
351@@ -149,7 +149,7 @@ __libc_fork (void)
352 }
353
354 /* Run the handlers registered for the parent. */
355- __run_fork_handlers (atfork_run_parent);
356+ __run_fork_handlers (atfork_run_parent, multiple_threads);
357 }
358
359 return pid;
360diff --git a/sysdeps/nptl/fork.h b/sysdeps/nptl/fork.h
361index a1c3b26b68..99ed76034b 100644
362--- a/sysdeps/nptl/fork.h
363+++ b/sysdeps/nptl/fork.h
364@@ -52,10 +52,12 @@ enum __run_fork_handler_type
365 - atfork_run_child: run all the CHILD_HANDLER and unlocks the internal
366 lock.
367 - atfork_run_parent: run all the PARENT_HANDLER and unlocks the internal
368- lock. */
369-extern void __run_fork_handlers (enum __run_fork_handler_type who)
370- attribute_hidden;
371+ lock.
372+
373+ Perform locking only if DO_LOCKING. */
374+extern void __run_fork_handlers (enum __run_fork_handler_type who,
375+ _Bool do_locking) attribute_hidden;
376
377 /* C library side function to register new fork handlers. */
378 extern int __register_atfork (void (*__prepare) (void),
379
380From 067fc32968b601493f4b247a3ac00caeea3f3d61 Mon Sep 17 00:00:00 2001
381From: Florian Weimer <fweimer@redhat.com>
382Date: Fri, 15 Feb 2019 21:27:01 +0100
383Subject: [PATCH] nptl: Fix invalid Systemtap probe in pthread_join [BZ #24211]
384diff --git a/nptl/pthread_join_common.c b/nptl/pthread_join_common.c
385index ecb78ffba5..366feb376b 100644
386--- a/nptl/pthread_join_common.c
387+++ b/nptl/pthread_join_common.c
388@@ -86,6 +86,7 @@ __pthread_timedjoin_ex (pthread_t threadid, void **thread_return,
389 pthread_cleanup_pop (0);
390 }
391
392+ void *pd_result = pd->result;
393 if (__glibc_likely (result == 0))
394 {
395 /* We mark the thread as terminated and as joined. */
396@@ -93,7 +94,7 @@ __pthread_timedjoin_ex (pthread_t threadid, void **thread_return,
397
398 /* Store the return value if the caller is interested. */
399 if (thread_return != NULL)
400- *thread_return = pd->result;
401+ *thread_return = pd_result;
402
403 /* Free the TCB. */
404 __free_tcb (pd);
405@@ -101,7 +102,7 @@ __pthread_timedjoin_ex (pthread_t threadid, void **thread_return,
406 else
407 pd->joinid = NULL;
408
409- LIBC_PROBE (pthread_join_ret, 3, threadid, result, pd->result);
410+ LIBC_PROBE (pthread_join_ret, 3, threadid, result, pd_result);
411
412 return result;
413 }
414
415From bc6f839fb4066be83272c735e662850af2595777 Mon Sep 17 00:00:00 2001
416From: Stefan Liebler <stli@linux.ibm.com>
417Date: Wed, 13 Mar 2019 10:45:35 +0100
418Subject: [PATCH] Fix output of LD_SHOW_AUXV=1.
419diff --git a/elf/dl-sysdep.c b/elf/dl-sysdep.c
420index 5f6c679a3f..5d19b100b2 100644
421--- a/elf/dl-sysdep.c
422+++ b/elf/dl-sysdep.c
423@@ -328,14 +328,9 @@ _dl_show_auxv (void)
424 assert (AT_NULL == 0);
425 assert (AT_IGNORE == 1);
426
427- if (av->a_type == AT_HWCAP || av->a_type == AT_HWCAP2
428- || AT_L1I_CACHEGEOMETRY || AT_L1D_CACHEGEOMETRY
429- || AT_L2_CACHEGEOMETRY || AT_L3_CACHEGEOMETRY)
430- {
431- /* These are handled in a special way per platform. */
432- if (_dl_procinfo (av->a_type, av->a_un.a_val) == 0)
433- continue;
434- }
435+ /* Some entries are handled in a special way per platform. */
436+ if (_dl_procinfo (av->a_type, av->a_un.a_val) == 0)
437+ continue;
438
439 if (idx < sizeof (auxvars) / sizeof (auxvars[0])
440 && auxvars[idx].form != unknown)
441diff --git a/sysdeps/powerpc/dl-procinfo.h b/sysdeps/powerpc/dl-procinfo.h
442index f542f7318f..dfc3b33a72 100644
443--- a/sysdeps/powerpc/dl-procinfo.h
444+++ b/sysdeps/powerpc/dl-procinfo.h
445@@ -225,7 +225,7 @@ _dl_procinfo (unsigned int type, unsigned long int word)
446 break;
447 }
448 default:
449- /* This should not happen. */
450+ /* Fallback to generic output mechanism. */
451 return -1;
452 }
453 _dl_printf ("\n");
454diff --git a/sysdeps/sparc/dl-procinfo.h b/sysdeps/sparc/dl-procinfo.h
455index 282b8c5117..64ee267fc7 100644
456--- a/sysdeps/sparc/dl-procinfo.h
457+++ b/sysdeps/sparc/dl-procinfo.h
458@@ -31,8 +31,8 @@ _dl_procinfo (unsigned int type, unsigned long int word)
459 {
460 int i;
461
462- /* Fallback to unknown output mechanism. */
463- if (type == AT_HWCAP2)
464+ /* Fallback to generic output mechanism. */
465+ if (type != AT_HWCAP)
466 return -1;
467
468 _dl_printf ("AT_HWCAP: ");
469diff --git a/sysdeps/unix/sysv/linux/arm/dl-procinfo.h b/sysdeps/unix/sysv/linux/arm/dl-procinfo.h
470index 66c00297b7..05c62c8687 100644
471--- a/sysdeps/unix/sysv/linux/arm/dl-procinfo.h
472+++ b/sysdeps/unix/sysv/linux/arm/dl-procinfo.h
473@@ -67,7 +67,7 @@ _dl_procinfo (unsigned int type, unsigned long int word)
474 break;
475 }
476 default:
477- /* This should not happen. */
478+ /* Fallback to generic output mechanism. */
479 return -1;
480 }
481 _dl_printf ("\n");
482diff --git a/sysdeps/unix/sysv/linux/i386/dl-procinfo.h b/sysdeps/unix/sysv/linux/i386/dl-procinfo.h
483index 22b43431bc..0585cdaa9c 100644
484--- a/sysdeps/unix/sysv/linux/i386/dl-procinfo.h
485+++ b/sysdeps/unix/sysv/linux/i386/dl-procinfo.h
486@@ -30,8 +30,8 @@ _dl_procinfo (unsigned int type, unsigned long int word)
487 in the kernel sources. */
488 int i;
489
490- /* Fallback to unknown output mechanism. */
491- if (type == AT_HWCAP2)
492+ /* Fallback to generic output mechanism. */
493+ if (type != AT_HWCAP)
494 return -1;
495
496 _dl_printf ("AT_HWCAP: ");
497diff --git a/sysdeps/unix/sysv/linux/s390/dl-procinfo.h b/sysdeps/unix/sysv/linux/s390/dl-procinfo.h
498index 19329a335b..d67fde368f 100644
499--- a/sysdeps/unix/sysv/linux/s390/dl-procinfo.h
500+++ b/sysdeps/unix/sysv/linux/s390/dl-procinfo.h
501@@ -32,8 +32,8 @@ _dl_procinfo (unsigned int type, unsigned long int word)
502 in the kernel sources. */
503 int i;
504
505- /* Fallback to unknown output mechanism. */
506- if (type == AT_HWCAP2)
507+ /* Fallback to generic output mechanism. */
508+ if (type != AT_HWCAP)
509 return -1;
510
511 _dl_printf ("AT_HWCAP: ");
512
513From e28ad442e73b00ae2047d89c8cc7f9b2a0de5436 Mon Sep 17 00:00:00 2001
514From: TAMUKI Shoichi <tamuki@linet.gr.jp>
515Date: Sat, 2 Mar 2019 21:00:28 +0900
516Subject: [PATCH] ja_JP: Change the offset for Taisho gan-nen from 2 to 1 [BZ
517 #24162]
518diff --git a/localedata/locales/ja_JP b/localedata/locales/ja_JP
519index 1fd2fee44b..9bfbb2bb9b 100644
520--- a/localedata/locales/ja_JP
521+++ b/localedata/locales/ja_JP
522@@ -14951,7 +14951,7 @@ era "+:2:1990//01//01:+*:<U5E73><U6210>:%EC%Ey<U5E74>";/
523 "+:2:1927//01//01:1989//01//07:<U662D><U548C>:%EC%Ey<U5E74>";/
524 "+:1:1926//12//25:1926//12//31:<U662D><U548C>:%EC<U5143><U5E74>";/
525 "+:2:1913//01//01:1926//12//24:<U5927><U6B63>:%EC%Ey<U5E74>";/
526- "+:2:1912//07//30:1912//12//31:<U5927><U6B63>:%EC<U5143><U5E74>";/
527+ "+:1:1912//07//30:1912//12//31:<U5927><U6B63>:%EC<U5143><U5E74>";/
528 "+:6:1873//01//01:1912//07//29:<U660E><U6CBB>:%EC%Ey<U5E74>";/
529 "+:1:0001//01//01:1872//12//31:<U897F><U66A6>:%EC%Ey<U5E74>";/
530 "+:1:-0001//12//31:-*:<U7D00><U5143><U524D>:%EC%Ey<U5E74>"
531
532From 0941350c20a52447e53c5169354408e3db591f73 Mon Sep 17 00:00:00 2001
533From: TAMUKI Shoichi <tamuki@linet.gr.jp>
534Date: Tue, 2 Apr 2019 16:46:55 +0900
535Subject: [PATCH] ja_JP locale: Add entry for the new Japanese era [BZ #22964]
536diff --git a/localedata/locales/ja_JP b/localedata/locales/ja_JP
537index 9bfbb2bb9b..c64aaaff55 100644
538--- a/localedata/locales/ja_JP
539+++ b/localedata/locales/ja_JP
540@@ -14946,7 +14946,9 @@ am_pm "<U5348><U524D>";"<U5348><U5F8C>"
541
542 t_fmt_ampm "%p%I<U6642>%M<U5206>%S<U79D2>"
543
544-era "+:2:1990//01//01:+*:<U5E73><U6210>:%EC%Ey<U5E74>";/
545+era "+:2:2020//01//01:+*:<U4EE4><U548C>:%EC%Ey<U5E74>";/
546+ "+:1:2019//05//01:2019//12//31:<U4EE4><U548C>:%EC<U5143><U5E74>";/
547+ "+:2:1990//01//01:2019//04//30:<U5E73><U6210>:%EC%Ey<U5E74>";/
548 "+:1:1989//01//08:1989//12//31:<U5E73><U6210>:%EC<U5143><U5E74>";/
549 "+:2:1927//01//01:1989//01//07:<U662D><U548C>:%EC%Ey<U5E74>";/
550 "+:1:1926//12//25:1926//12//31:<U662D><U548C>:%EC<U5143><U5E74>";/
551
552From 52b7cd6e9a701bb203023d56e84551943dc6a4c0 Mon Sep 17 00:00:00 2001
553From: Adam Maris <amaris@redhat.com>
554Date: Thu, 14 Mar 2019 16:51:16 -0400
555Subject: [PATCH] malloc: Check for large bin list corruption when inserting
556 unsorted chunk
557diff --git a/malloc/malloc.c b/malloc/malloc.c
558index feaf7ee0bf..ce771375b6 100644
559--- a/malloc/malloc.c
560+++ b/malloc/malloc.c
561@@ -3876,10 +3876,14 @@ _int_malloc (mstate av, size_t bytes)
562 {
563 victim->fd_nextsize = fwd;
564 victim->bk_nextsize = fwd->bk_nextsize;
565+ if (__glibc_unlikely (fwd->bk_nextsize->fd_nextsize != fwd))
566+ malloc_printerr ("malloc(): largebin double linked list corrupted (nextsize)");
567 fwd->bk_nextsize = victim;
568 victim->bk_nextsize->fd_nextsize = victim;
569 }
570 bck = fwd->bk;
571+ if (bck->fd != fwd)
572+ malloc_printerr ("malloc(): largebin double linked list corrupted (bk)");
573 }
574 }
575 else
576
577From c6177be4b92d5d7df50a785652d1912db511423e Mon Sep 17 00:00:00 2001
578From: Andreas Schwab <schwab@suse.de>
579Date: Wed, 15 May 2019 17:09:05 +0200
580Subject: [PATCH] Fix crash in _IO_wfile_sync (bug 20568)
581diff --git a/libio/wfileops.c b/libio/wfileops.c
582index 78f20486e5..bab2ba4892 100644
583--- a/libio/wfileops.c
584+++ b/libio/wfileops.c
585@@ -508,11 +508,12 @@ _IO_wfile_sync (FILE *fp)
586 generate the wide characters up to the current reading
587 position. */
588 int nread;
589-
590+ size_t wnread = (fp->_wide_data->_IO_read_ptr
591+ - fp->_wide_data->_IO_read_base);
592 fp->_wide_data->_IO_state = fp->_wide_data->_IO_last_state;
593 nread = (*cv->__codecvt_do_length) (cv, &fp->_wide_data->_IO_state,
594 fp->_IO_read_base,
595- fp->_IO_read_end, delta);
596+ fp->_IO_read_end, wnread);
597 fp->_IO_read_ptr = fp->_IO_read_base + nread;
598 delta = -(fp->_IO_read_end - fp->_IO_read_base - nread);
599 }
600
601From e3f828b8bd6e21922da8be8dee35edef09382d8d Mon Sep 17 00:00:00 2001
602From: Mark Wielaard <mark@klomp.org>
603Date: Wed, 15 May 2019 17:14:01 +0200
604Subject: [PATCH] dlfcn: Guard __dlerror_main_freeres with __libc_once_get
605 (once) [BZ#24476]
606diff --git a/dlfcn/dlerror.c b/dlfcn/dlerror.c
607index 27376582d0..ca42c126c1 100644
608--- a/dlfcn/dlerror.c
609+++ b/dlfcn/dlerror.c
610@@ -72,9 +72,16 @@ __dlerror (void)
611 __libc_once (once, init);
612
613 /* Get error string. */
614- result = (struct dl_action_result *) __libc_getspecific (key);
615- if (result == NULL)
616- result = &last_result;
617+ if (static_buf != NULL)
618+ result = static_buf;
619+ else
620+ {
621+ /* init () has been run and we don't use the static buffer.
622+ So we have a valid key. */
623+ result = (struct dl_action_result *) __libc_getspecific (key);
624+ if (result == NULL)
625+ result = &last_result;
626+ }
627
628 /* Test whether we already returned the string. */
629 if (result->returned != 0)
630@@ -230,13 +237,19 @@ free_key_mem (void *mem)
631 void
632 __dlerror_main_freeres (void)
633 {
634- void *mem;
635 /* Free the global memory if used. */
636 check_free (&last_result);
637- /* Free the TSD memory if used. */
638- mem = __libc_getspecific (key);
639- if (mem != NULL)
640- free_key_mem (mem);
641+
642+ if (__libc_once_get (once) && static_buf == NULL)
643+ {
644+ /* init () has been run and we don't use the static buffer.
645+ So we have a valid key. */
646+ void *mem;
647+ /* Free the TSD memory if used. */
648+ mem = __libc_getspecific (key);
649+ if (mem != NULL)
650+ free_key_mem (mem);
651+ }
652 }
653
654 struct dlfcn_hook *_dlfcn_hook __attribute__((nocommon));
655
656From 95d66fecaabbc92ab53027e808f0fc1929c9f21a Mon Sep 17 00:00:00 2001
657From: Wilco Dijkstra <wdijkstr@arm.com>
658Date: Fri, 10 May 2019 16:38:21 +0100
659Subject: [PATCH] Fix tcache count maximum (BZ #24531)
660diff --git a/malloc/malloc.c b/malloc/malloc.c
661index ce771375b6..0abd653be2 100644
662--- a/malloc/malloc.c
663+++ b/malloc/malloc.c
664@@ -2919,6 +2919,8 @@ typedef struct tcache_perthread_struct
665 tcache_entry *entries[TCACHE_MAX_BINS];
666 } tcache_perthread_struct;
667
668+#define MAX_TCACHE_COUNT 127 /* Maximum value of counts[] entries. */
669+
670 static __thread bool tcache_shutting_down = false;
671 static __thread tcache_perthread_struct *tcache = NULL;
672
673@@ -5124,8 +5126,11 @@ static inline int
674 __always_inline
675 do_set_tcache_count (size_t value)
676 {
677- LIBC_PROBE (memory_tunable_tcache_count, 2, value, mp_.tcache_count);
678- mp_.tcache_count = value;
679+ if (value <= MAX_TCACHE_COUNT)
680+ {
681+ LIBC_PROBE (memory_tunable_tcache_count, 2, value, mp_.tcache_count);
682+ mp_.tcache_count = value;
683+ }
684 return 1;
685 }
686
687From 34fb5f61d3c3f4b8fc616ea259fa19168b58ecd4 Mon Sep 17 00:00:00 2001
688From: "Dmitry V. Levin" <ldv@altlinux.org>
689Date: Wed, 13 Feb 2019 01:20:51 +0000
690Subject: [PATCH] libio: do not attempt to free wide buffers of legacy streams
691 [BZ #24228]
692diff --git a/libio/genops.c b/libio/genops.c
693index 2a0d9b81df..11a15549e8 100644
694--- a/libio/genops.c
695+++ b/libio/genops.c
696@@ -789,9 +789,16 @@ _IO_unbuffer_all (void)
697
698 for (fp = (FILE *) _IO_list_all; fp; fp = fp->_chain)
699 {
700+ int legacy = 0;
701+
702+#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_1)
703+ if (__glibc_unlikely (_IO_vtable_offset (fp) != 0))
704+ legacy = 1;
705+#endif
706+
707 if (! (fp->_flags & _IO_UNBUFFERED)
708 /* Iff stream is un-orientated, it wasn't used. */
709- && fp->_mode != 0)
710+ && (legacy || fp->_mode != 0))
711 {
712 #ifdef _IO_MTSAFE_IO
713 int cnt;
714@@ -805,7 +812,7 @@ _IO_unbuffer_all (void)
715 __sched_yield ();
716 #endif
717
718- if (! dealloc_buffers && !(fp->_flags & _IO_USER_BUF))
719+ if (! legacy && ! dealloc_buffers && !(fp->_flags & _IO_USER_BUF))
720 {
721 fp->_flags |= _IO_USER_BUF;
722
723@@ -816,7 +823,7 @@ _IO_unbuffer_all (void)
724
725 _IO_SETBUF (fp, NULL, 0);
726
727- if (fp->_mode > 0)
728+ if (! legacy && fp->_mode > 0)
729 _IO_wsetb (fp, NULL, NULL, 0);
730
731 #ifdef _IO_MTSAFE_IO
732@@ -827,7 +834,8 @@ _IO_unbuffer_all (void)
733
734 /* Make sure that never again the wide char functions can be
735 used. */
736- fp->_mode = -1;
737+ if (! legacy)
738+ fp->_mode = -1;
739 }
740
741 #ifdef _IO_MTSAFE_IO
742