diff --git a/ChangeLog b/ChangeLog index dc1ed1b..45579de 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +2015-02-10 Evangelos Foutras + + [BZ #17949] + * sysdeps/i386/i686/multiarch/mempcpy_chk.S: Fix position of + jump label. + 2015-02-06 Carlos O'Donell * version.h (RELEASE): Set to "stable". @@ -7,6 +13,7 @@ * sysdeps/unix/sysv/linux/hppa/pthread.h: Sync with pthread.h. 2015-02-05 Paul Pluzhnikov + Paul Eggert [BZ #16618] * stdio-common/tst-sscanf.c (main): Test for buffer overflow. diff --git a/NEWS b/NEWS index 617cdbb..ff79f0d 100644 --- a/NEWS +++ b/NEWS @@ -5,6 +5,12 @@ See the end for copying conditions. Please send GNU C library bug reports via using `glibc' in the "product" field. +Version 2.21.1 + +* The following bugs are resolved with this release: + + 17949. + Version 2.21 * The following bugs are resolved with this release: @@ -21,10 +27,11 @@ Version 2.21 17801, 17803, 17806, 17834, 17844, 17848, 17868, 17869, 17870, 17885, 17892. -* CVE-2015-1472 Under certain conditions wscanf can allocate too little - memory for the to-be-scanned arguments and overflow the allocated - buffer. The implementation now correctly computes the required buffer - size when using malloc. +* CVE-2015-1472 CVE-2015-1473 Under certain conditions wscanf can allocate + too little memory for the to-be-scanned arguments and overflow the + allocated buffer. The implementation now correctly computes the required + buffer size when using malloc, and switches to malloc from alloca as + intended. * A new semaphore algorithm has been implemented in generic C code for all machines. Previous custom assembly implementations of semaphore were diff --git a/sysdeps/i386/i686/multiarch/mempcpy_chk.S b/sysdeps/i386/i686/multiarch/mempcpy_chk.S index 207b648..b6fa202 100644 --- a/sysdeps/i386/i686/multiarch/mempcpy_chk.S +++ b/sysdeps/i386/i686/multiarch/mempcpy_chk.S @@ -36,8 +36,8 @@ ENTRY(__mempcpy_chk) cmpl $0, KIND_OFFSET+__cpu_features@GOTOFF(%ebx) jne 1f call __init_cpu_features - leal __mempcpy_chk_ia32@GOTOFF(%ebx), %eax -1: testl $bit_SSE2, CPUID_OFFSET+index_SSE2+__cpu_features@GOTOFF(%ebx) +1: leal __mempcpy_chk_ia32@GOTOFF(%ebx), %eax + testl $bit_SSE2, CPUID_OFFSET+index_SSE2+__cpu_features@GOTOFF(%ebx) jz 2f leal __mempcpy_chk_sse2_unaligned@GOTOFF(%ebx), %eax testl $bit_Fast_Unaligned_Load, FEATURE_OFFSET+index_Fast_Unaligned_Load+__cpu_features@GOTOFF(%ebx)