summaryrefslogtreecommitdiff
path: root/libc
diff options
context:
space:
mode:
authorjoseph <joseph@7b3dc134-2b1b-0410-93df-9e9f96275f8d>2010-11-07 12:11:40 +0000
committerjoseph <joseph@7b3dc134-2b1b-0410-93df-9e9f96275f8d>2010-11-07 12:11:40 +0000
commit486caa1f257e98014ac78f7f7715876f705c17dd (patch)
tree99cad456a5df680ad632040afc583c4862c83b47 /libc
parente2dc087599c61bfc5ed2ab08b70d4fa69836b8b5 (diff)
Merge changes between r11842 and r11980 from /fsf/trunk.
git-svn-id: svn://svn.eglibc.org/trunk@11981 7b3dc134-2b1b-0410-93df-9e9f96275f8d
Diffstat (limited to 'libc')
-rw-r--r--libc/ChangeLog107
-rw-r--r--libc/Makeconfig8
-rw-r--r--libc/NEWS4
-rw-r--r--libc/elf/dl-deps.c2
-rw-r--r--libc/elf/dl-load.c48
-rw-r--r--libc/elf/dl-object.c56
-rw-r--r--libc/elf/dl-open.c31
-rw-r--r--libc/elf/rtld-Rules13
-rw-r--r--libc/elf/rtld.c27
-rw-r--r--libc/include/dlfcn.h1
-rw-r--r--libc/malloc/malloc.c4
-rw-r--r--libc/math/math.h2
-rw-r--r--libc/po/da.po11
-rw-r--r--libc/string/Makefile2
-rw-r--r--libc/string/bug-strchr1.c14
-rw-r--r--libc/sysdeps/generic/ldsodefs.h13
-rw-r--r--libc/sysdeps/i386/i686/cacheinfo.c4
-rw-r--r--libc/sysdeps/powerpc/dl-procinfo.c5
-rw-r--r--libc/sysdeps/powerpc/dl-procinfo.h7
-rw-r--r--libc/sysdeps/powerpc/powerpc32/a2/memcpy.S511
-rw-r--r--libc/sysdeps/powerpc/powerpc32/power7/mempcpy.S471
-rw-r--r--libc/sysdeps/powerpc/powerpc64/a2/memcpy.S501
-rw-r--r--libc/sysdeps/powerpc/powerpc64/power7/mempcpy.S458
-rw-r--r--libc/sysdeps/unix/sysv/linux/powerpc/powerpc32/a2/Implies2
-rw-r--r--libc/sysdeps/unix/sysv/linux/powerpc/powerpc64/a2/Implies2
-rw-r--r--libc/sysdeps/unix/sysv/linux/ttyname.c33
-rw-r--r--libc/sysdeps/unix/sysv/linux/ttyname_r.c32
-rw-r--r--libc/sysdeps/x86_64/cacheinfo.c20
-rw-r--r--libc/sysdeps/x86_64/multiarch/strchr.S4
29 files changed, 2259 insertions, 134 deletions
diff --git a/libc/ChangeLog b/libc/ChangeLog
index e162666af..ffa4f6836 100644
--- a/libc/ChangeLog
+++ b/libc/ChangeLog
@@ -1,3 +1,110 @@
+2010-11-04 Luis Machado <luisgpm@br.ibm.com>
+
+ * sysdeps/powerpc/powerpc32/power7/mempcpy.S: New file.
+ * sysdeps/powerpc/powerpc64/power7/mempcpy.S: New file.
+
+2010-11-03 H.J. Lu <hongjiu.lu@intel.com>
+
+ [BZ #12191]
+ * sysdeps/i386/i686/cacheinfo.c (__x86_64_raw_data_cache_size): New.
+ (__x86_64_raw_data_cache_size_half): Likewise.
+ (__x86_64_raw_shared_cache_size): Likewise.
+ (__x86_64_raw_shared_cache_size_half): Likewise.
+
+ * sysdeps/x86_64/cacheinfo.c (__x86_64_raw_data_cache_size): New.
+ (__x86_64_raw_data_cache_size_half): Likewise.
+ (__x86_64_raw_shared_cache_size): Likewise.
+ (__x86_64_raw_shared_cache_size_half): Likewise.
+ (init_cacheinfo): Set __x86_64_raw_data_cache_size,
+ __x86_64_raw_data_cache_size_half, __x86_64_raw_shared_cache_size
+ and __x86_64_raw_shared_cache_size_half. Round
+ __x86_64_data_cache_size_half, __x86_64_data_cache_size
+ __x86_64_shared_cache_size_half and __x86_64_shared_cache_size,
+ to multiple of 256 bytes.
+
+2010-11-03 Ulrich Drepper <drepper@gmail.com>
+
+ [BZ #12167]
+ * sysdeps/unix/sysv/linux/ttyname.c (ttyname): Recognize new mangling
+ of inacessible symlinks. Verify result of symlink before returning it.
+ * sysdeps/unix/sysv/linux/ttyname_r.c (__ttyname_r): Likewise.
+ Patch mostly by Miklos Szeredi <miklos@szeredi.hu>.
+
+2010-10-28 Erich Ritz <erichritz@gmail.com>
+
+ * math/math.h (isinf): Fix typo in comment.
+
+2010-11-01 Ulrich Drepper <drepper@gmail.com>
+
+ * po/da.po: Update from translation team.
+
+2010-10-26 Ulrich Drepper <drepper@gmail.com>
+
+ * elf/rtld.c (dl_main): Move assertion after the point where rtld map
+ is added to the list.
+
+2010-10-20 Andreas Krebbel <Andreas.Krebbel@de.ibm.com>
+ Ulrich Drepper <drepper@gmail.com>
+
+ * elf/dl-object.c (_dl_new_object): Don't append the new object to
+ the global list here. Move code to...
+ (_dl_add_to_namespace_list): ...here. New function.
+ * elf/rtld.c (dl_main): Invoke _dl_add_to_namespace_list.
+ * sysdeps/generic/ldsodefs.h (_dl_add_to_namespace_list): Declare.
+ * elf/dl-load.c (lose): Don't remove the element from the list.
+ (_dl_map_object_from_fd): Invoke _dl_add_to_namespace_list.
+ (_dl_map_object): Likewise.
+
+2010-10-25 Ulrich Drepper <drepper@gmail.com>
+
+ [BZ #12159]
+ * sysdeps/x86_64/multiarch/strchr.S: Fix propagation of search byte
+ into all bytes of SSE register.
+ Patch by Richard Li <richardpku@gmail.com>.
+
+2010-10-24 Ulrich Drepper <drepper@gmail.com>
+
+ [BZ #12140]
+ * malloc/malloc.c (_int_free): Fill correct number of bytes when
+ perturbing.
+
+2010-10-20 Michael B. Brutman <brutman@us.ibm.com>
+
+ * sysdeps/powerpc/dl-procinfo.c: Add support for ppca2 platform
+ * sysdeps/powerpc/dl-procinfo.h: Add support for ppca2 platform
+ * sysdeps/powerpc/powerpc32/a2/memcpy.S: New file.
+ * sysdeps/powerpc/powerpc64/a2/memcpy.S: Likewise.
+ * sysdeps/unix/sysv/linux/powerpc/powerpc32/a2/Implies: New
+ submachine.
+ * sysdeps/unix/sysv/linux/powerpc/powerpc64/a2/Implies: Likewise.
+
+2010-10-22 Andreas Schwab <schwab@redhat.com>
+
+ * include/dlfcn.h (__RTLD_SECURE): Define.
+ * elf/dl-load.c (_dl_map_object): Remove preloaded parameter. Use
+ mode & __RTLD_SECURE instead.
+ (open_path): Rename preloaded parameter to secure.
+ * sysdeps/generic/ldsodefs.h (_dl_map_object): Adjust declaration.
+ * elf/dl-open.c (dl_open_worker): Adjust call to _dl_map_object.
+ * elf/dl-deps.c (openaux): Likewise.
+ * elf/rtld.c (struct map_args): Remove is_preloaded.
+ (map_doit): Don't use it.
+ (dl_main): Likewise.
+ (do_preload): Use __RTLD_SECURE instead of is_preloaded.
+ (dlmopen_doit): Add __RTLD_SECURE to mode bits.
+
+2010-09-09 Andreas Schwab <schwab@redhat.com>
+
+ * Makeconfig (sysd-rules-patterns): Add rtld-%:rtld-%.
+ (sysd-rules-targets): Remove duplicates.
+ * elf/rtld-Rules ($(objpfx)rtld-%.os): Add pattern rules with
+ rtld-%.$o dependency.
+
+2010-10-18 Andreas Schwab <schwab@redhat.com>
+
+ * elf/dl-open.c (dl_open_worker): Don't expand DST here, let
+ _dl_map_object do it.
+
2010-10-19 Ulrich Drepper <drepper@gmail.com>
* sysdeps/i386/bits/mathdef.h (FP_FAST_FMA): If the GCC 4.6 port has
diff --git a/libc/Makeconfig b/libc/Makeconfig
index 9db1ee466..c9fe1991a 100644
--- a/libc/Makeconfig
+++ b/libc/Makeconfig
@@ -703,7 +703,7 @@ CPPFLAGS = $($(subdir)-CPPFLAGS) $(+includes) $(defines) \
-include $(..)include/libc-symbols.h $(sysdep-CPPFLAGS) \
$(CPPFLAGS-$(suffix $@)) \
$(foreach lib,$(libof-$(basename $(@F))) \
- $(libof-$(<F)) $(libof-$(@F)),$(CPPFLAGS-$(lib))) \
+ $(libof-$(<F)) $(libof-$(@F)),$(CPPFLAGS-$(lib))) \
$(CPPFLAGS-$(<F)) $(CPPFLAGS-$(@F)) $(CPPFLAGS-$(basename $(@F)))
override CFLAGS = -std=gnu99 $(gnu89-inline-CFLAGS) \
$(filter-out %frame-pointer,$(+cflags)) $(+gccwarn-c) \
@@ -992,7 +992,7 @@ endif
# emitted into sysd-rules. A sysdeps Makeconfig fragment can
# add its own special object file prefix to this list with e.g. foo-%:%
# to have foo-*.? compiled from *.? using $(foo-CPPFLAGS).
-sysd-rules-patterns := %:% rtld-%:% m_%:s_%
+sysd-rules-patterns := %:% rtld-%:rtld-% rtld-%:% m_%:s_%
# Let sysdeps/ subdirs contain a Makeconfig fragment for us to include here.
sysdep-makeconfigs := $(wildcard $(+sysdep_dirs:=/Makeconfig))
@@ -1001,8 +1001,8 @@ include $(sysdep-makeconfigs)
endif
# Compute just the target patterns. Makeconfig has set sysd-rules-patterns.
-sysd-rules-targets := $(foreach p,$(sysd-rules-patterns),\
- $(firstword $(subst :, ,$p)))
+sysd-rules-targets := $(sort $(foreach p,$(sysd-rules-patterns),\
+ $(firstword $(subst :, ,$p))))
endif # Makeconfig not yet included
diff --git a/libc/NEWS b/libc/NEWS
index 48efcbd52..cbe6e511b 100644
--- a/libc/NEWS
+++ b/libc/NEWS
@@ -1,4 +1,4 @@
-GNU C Library NEWS -- history of user-visible changes. 2010-10-13
+GNU C Library NEWS -- history of user-visible changes. 2010-11-5
Copyright (C) 1992-2009, 2010 Free Software Foundation, Inc.
See the end for copying conditions.
@@ -11,7 +11,7 @@ Version 2.13
3268, 7066, 10851, 11611, 11640, 11701, 11840, 11856, 11883, 11903, 11904,
11968, 11979, 12005, 12037, 12067, 12077, 12078, 12092, 12093, 12107, 12108,
- 12113
+ 12113, 12140, 12159, 12167, 12191
* New Linux interfaces: prlimit, prlimit64, fanotify_init, fanotify_mark
diff --git a/libc/elf/dl-deps.c b/libc/elf/dl-deps.c
index 92840e43f..e4dea1aa1 100644
--- a/libc/elf/dl-deps.c
+++ b/libc/elf/dl-deps.c
@@ -62,7 +62,7 @@ openaux (void *a)
{
struct openaux_args *args = (struct openaux_args *) a;
- args->aux = _dl_map_object (args->map, args->name, 0,
+ args->aux = _dl_map_object (args->map, args->name,
(args->map->l_type == lt_executable
? lt_library : args->map->l_type),
args->trace_mode, args->open_mode,
diff --git a/libc/elf/dl-load.c b/libc/elf/dl-load.c
index 0345efb93..697c1f171 100644
--- a/libc/elf/dl-load.c
+++ b/libc/elf/dl-load.c
@@ -798,22 +798,7 @@ lose (int code, int fd, const char *name, char *realname, struct link_map *l,
/* The file might already be closed. */
if (fd != -1)
(void) __close (fd);
- if (l != NULL)
- {
- /* We modify the list of loaded objects. */
- __rtld_lock_lock_recursive (GL(dl_load_write_lock));
- /* Remove the stillborn object from the list and free it. */
- assert (l->l_next == NULL);
- if (l->l_prev == NULL)
- /* No other module loaded. This happens only in the static library,
- or in rtld under --verify. */
- GL(dl_ns)[l->l_ns]._ns_loaded = NULL;
- else
- l->l_prev->l_next = NULL;
- --GL(dl_ns)[l->l_ns]._ns_nloaded;
- free (l);
- __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
- }
+ free (l);
free (realname);
if (r != NULL)
@@ -898,6 +883,9 @@ _dl_map_object_from_fd (const char *name, int fd, struct filebuf *fbp,
never be unloaded. */
__close (fd);
+ /* Add the map for the mirrored object to the object list. */
+ _dl_add_to_namespace_list (l, nsid);
+
return l;
}
#endif
@@ -1492,6 +1480,9 @@ cannot enable executable stack as shared object requires");
add_name_to_object (l, ((const char *) D_PTR (l, l_info[DT_STRTAB])
+ l->l_info[DT_SONAME]->d_un.d_val));
+ /* Now that the object is fully initialized add it to the object list. */
+ _dl_add_to_namespace_list (l, nsid);
+
#ifdef SHARED
/* Auditing checkpoint: we have a new object. */
if (__builtin_expect (GLRO(dl_naudit) > 0, 0)
@@ -1812,7 +1803,7 @@ open_verify (const char *name, struct filebuf *fbp, struct link_map *loader,
if MAY_FREE_DIRS is true. */
static int
-open_path (const char *name, size_t namelen, int preloaded,
+open_path (const char *name, size_t namelen, int secure,
struct r_search_path_struct *sps, char **realname,
struct filebuf *fbp, struct link_map *loader, int whatcode,
bool *found_other_class)
@@ -1894,7 +1885,7 @@ open_path (const char *name, size_t namelen, int preloaded,
/* Remember whether we found any existing directory. */
here_any |= this_dir->status[cnt] != nonexisting;
- if (fd != -1 && __builtin_expect (preloaded, 0)
+ if (fd != -1 && __builtin_expect (secure, 0)
&& INTUSE(__libc_enable_secure))
{
/* This is an extra security effort to make sure nobody can
@@ -1963,7 +1954,7 @@ open_path (const char *name, size_t namelen, int preloaded,
struct link_map *
internal_function
-_dl_map_object (struct link_map *loader, const char *name, int preloaded,
+_dl_map_object (struct link_map *loader, const char *name,
int type, int trace_mode, int mode, Lmid_t nsid)
{
int fd;
@@ -2067,7 +2058,8 @@ _dl_map_object (struct link_map *loader, const char *name, int preloaded,
for (l = loader; l; l = l->l_loader)
if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
{
- fd = open_path (name, namelen, preloaded, &l->l_rpath_dirs,
+ fd = open_path (name, namelen, mode & __RTLD_SECURE,
+ &l->l_rpath_dirs,
&realname, &fb, loader, LA_SER_RUNPATH,
&found_other_class);
if (fd != -1)
@@ -2082,14 +2074,15 @@ _dl_map_object (struct link_map *loader, const char *name, int preloaded,
&& main_map != NULL && main_map->l_type != lt_loaded
&& cache_rpath (main_map, &main_map->l_rpath_dirs, DT_RPATH,
"RPATH"))
- fd = open_path (name, namelen, preloaded, &main_map->l_rpath_dirs,
+ fd = open_path (name, namelen, mode & __RTLD_SECURE,
+ &main_map->l_rpath_dirs,
&realname, &fb, loader ?: main_map, LA_SER_RUNPATH,
&found_other_class);
}
/* Try the LD_LIBRARY_PATH environment variable. */
if (fd == -1 && env_path_list.dirs != (void *) -1)
- fd = open_path (name, namelen, preloaded, &env_path_list,
+ fd = open_path (name, namelen, mode & __RTLD_SECURE, &env_path_list,
&realname, &fb,
loader ?: GL(dl_ns)[LM_ID_BASE]._ns_loaded,
LA_SER_LIBPATH, &found_other_class);
@@ -2098,12 +2091,12 @@ _dl_map_object (struct link_map *loader, const char *name, int preloaded,
if (fd == -1 && loader != NULL
&& cache_rpath (loader, &loader->l_runpath_dirs,
DT_RUNPATH, "RUNPATH"))
- fd = open_path (name, namelen, preloaded,
+ fd = open_path (name, namelen, mode & __RTLD_SECURE,
&loader->l_runpath_dirs, &realname, &fb, loader,
LA_SER_RUNPATH, &found_other_class);
if (fd == -1
- && (__builtin_expect (! preloaded, 1)
+ && (__builtin_expect (! (mode & __RTLD_SECURE), 1)
|| ! INTUSE(__libc_enable_secure)))
{
/* Check the list of libraries in the file /etc/ld.so.cache,
@@ -2169,7 +2162,7 @@ _dl_map_object (struct link_map *loader, const char *name, int preloaded,
&& ((l = loader ?: GL(dl_ns)[nsid]._ns_loaded) == NULL
|| __builtin_expect (!(l->l_flags_1 & DF_1_NODEFLIB), 1))
&& rtld_search_dirs.dirs != (void *) -1)
- fd = open_path (name, namelen, preloaded, &rtld_search_dirs,
+ fd = open_path (name, namelen, mode & __RTLD_SECURE, &rtld_search_dirs,
&realname, &fb, l, LA_SER_DEFAULT, &found_other_class);
/* Add another newline when we are tracing the library loading. */
@@ -2214,7 +2207,7 @@ _dl_map_object (struct link_map *loader, const char *name, int preloaded,
have. */
static const Elf_Symndx dummy_bucket = STN_UNDEF;
- /* Enter the new object in the list of loaded objects. */
+ /* Allocate a new object map. */
if ((name_copy = local_strdup (name)) == NULL
|| (l = _dl_new_object (name_copy, name, type, loader,
mode, nsid)) == NULL)
@@ -2232,6 +2225,9 @@ _dl_map_object (struct link_map *loader, const char *name, int preloaded,
l->l_nbuckets = 1;
l->l_relocated = 1;
+ /* Enter the object in the object list. */
+ _dl_add_to_namespace_list (l, nsid);
+
return l;
}
else if (found_other_class)
diff --git a/libc/elf/dl-object.c b/libc/elf/dl-object.c
index fb20b308f..7a3602931 100644
--- a/libc/elf/dl-object.c
+++ b/libc/elf/dl-object.c
@@ -1,5 +1,5 @@
/* Storage management for the chain of loaded shared objects.
- Copyright (C) 1995-2002,2004,2006-2008,2009 Free Software Foundation, Inc.
+ Copyright (C) 1995-2002,2004,2006-2009,2010 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -26,16 +26,41 @@
#include <assert.h>
+/* Add the new link_map NEW to the end of the namespace list. */
+void
+internal_function
+_dl_add_to_namespace_list (struct link_map *new, Lmid_t nsid)
+{
+ /* We modify the list of loaded objects. */
+ __rtld_lock_lock_recursive (GL(dl_load_write_lock));
+
+ if (GL(dl_ns)[nsid]._ns_loaded != NULL)
+ {
+ struct link_map *l = GL(dl_ns)[nsid]._ns_loaded;
+ while (l->l_next != NULL)
+ l = l->l_next;
+ new->l_prev = l;
+ /* new->l_next = NULL; Would be necessary but we use calloc. */
+ l->l_next = new;
+ }
+ else
+ GL(dl_ns)[nsid]._ns_loaded = new;
+ ++GL(dl_ns)[nsid]._ns_nloaded;
+ new->l_serial = GL(dl_load_adds);
+ ++GL(dl_load_adds);
+
+ __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
+}
+
+
/* Allocate a `struct link_map' for a new object being loaded,
and enter it into the _dl_loaded list. */
-
struct link_map *
internal_function
_dl_new_object (char *realname, const char *libname, int type,
struct link_map *loader, int mode, Lmid_t nsid)
{
struct link_map *l;
- int idx;
size_t libname_len = strlen (libname) + 1;
struct link_map *new;
struct libname_list *newname;
@@ -93,31 +118,12 @@ _dl_new_object (char *realname, const char *libname, int type,
new->l_scope = new->l_scope_mem;
new->l_scope_max = sizeof (new->l_scope_mem) / sizeof (new->l_scope_mem[0]);
- /* We modify the list of loaded objects. */
- __rtld_lock_lock_recursive (GL(dl_load_write_lock));
-
/* Counter for the scopes we have to handle. */
- idx = 0;
+ int idx = 0;
if (GL(dl_ns)[nsid]._ns_loaded != NULL)
- {
- l = GL(dl_ns)[nsid]._ns_loaded;
- while (l->l_next != NULL)
- l = l->l_next;
- new->l_prev = l;
- /* new->l_next = NULL; Would be necessary but we use calloc. */
- l->l_next = new;
-
- /* Add the global scope. */
- new->l_scope[idx++] = &GL(dl_ns)[nsid]._ns_loaded->l_searchlist;
- }
- else
- GL(dl_ns)[nsid]._ns_loaded = new;
- ++GL(dl_ns)[nsid]._ns_nloaded;
- new->l_serial = GL(dl_load_adds);
- ++GL(dl_load_adds);
-
- __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
+ /* Add the global scope. */
+ new->l_scope[idx++] = &GL(dl_ns)[nsid]._ns_loaded->l_searchlist;
/* If we have no loader the new object acts as it. */
if (loader == NULL)
diff --git a/libc/elf/dl-open.c b/libc/elf/dl-open.c
index 610ceaedb..ee09f0ec5 100644
--- a/libc/elf/dl-open.c
+++ b/libc/elf/dl-open.c
@@ -221,38 +221,9 @@ dl_open_worker (void *a)
assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
- /* Maybe we have to expand a DST. */
- if (__builtin_expect (dst != NULL, 0))
- {
- size_t len = strlen (file);
-
- /* Determine how much space we need. We have to allocate the
- memory locally. */
- size_t required = DL_DST_REQUIRED (call_map, file, len,
- _dl_dst_count (dst, 0));
-
- /* Get space for the new file name. */
- char *new_file = (char *) alloca (required + 1);
-
- /* Generate the new file name. */
- _dl_dst_substitute (call_map, file, new_file, 0);
-
- /* If the substitution failed don't try to load. */
- if (*new_file == '\0')
- _dl_signal_error (0, "dlopen", NULL,
- N_("empty dynamic string token substitution"));
-
- /* Now we have a new file name. */
- file = new_file;
-
- /* It does not matter whether call_map is set even if we
- computed it only because of the DST. Since the path contains
- a slash the value is not used. See dl-load.c. */
- }
-
/* Load the named object. */
struct link_map *new;
- args->map = new = _dl_map_object (call_map, file, 0, lt_loaded, 0,
+ args->map = new = _dl_map_object (call_map, file, lt_loaded, 0,
mode | __RTLD_CALLMAP, args->nsid);
/* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
diff --git a/libc/elf/rtld-Rules b/libc/elf/rtld-Rules
index 9f31a560e..10c9452ce 100644
--- a/libc/elf/rtld-Rules
+++ b/libc/elf/rtld-Rules
@@ -1,6 +1,6 @@
# Subroutine makefile for compiling libc modules linked into dynamic linker.
-# Copyright (C) 2002, 2003, 2005, 2006, 2008 Free Software Foundation, Inc.
+# Copyright (C) 2002,2003,2005,2006,2008,2010 Free Software Foundation, Inc.
# This file is part of the GNU C Library.
# The GNU C Library is free software; you can redistribute it and/or
@@ -56,7 +56,7 @@ $(objpfx)rtld-libc.a: $(foreach dir,$(rtld-subdirs),\
# Use the verbose option of ar and tar when not running silently.
ifeq "$(findstring s,$(MAKEFLAGS))" "" # if not -s
verbose := v
-else # -s
+else # -s
verbose :=
endif # not -s
@@ -93,6 +93,12 @@ else
# These are the basic compilation rules corresponding to the Makerules ones.
# The sysd-rules generated makefile already defines pattern rules for rtld-%
# targets built from sysdeps source files.
+$(objpfx)rtld-%.os: rtld-%.S $(before-compile)
+ $(compile-command.S) $(rtld-CPPFLAGS)
+$(objpfx)rtld-%.os: rtld-%.s $(before-compile)
+ $(compile-command.s) $(rtld-CPPFLAGS)
+$(objpfx)rtld-%.os: rtld-%.c $(before-compile)
+ $(compile-command.c) $(rtld-CPPFLAGS)
$(objpfx)rtld-%.os: %.S $(before-compile)
$(compile-command.S) $(rtld-CPPFLAGS)
$(objpfx)rtld-%.os: %.s $(before-compile)
@@ -101,6 +107,9 @@ $(objpfx)rtld-%.os: %.c $(before-compile)
$(compile-command.c) $(rtld-CPPFLAGS)
# The rules for generated source files.
+$(objpfx)rtld-%.os: $(objpfx)rtld-%.S $(before-compile); $(compile-command.S)
+$(objpfx)rtld-%.os: $(objpfx)rtld-%.s $(before-compile); $(compile-command.s)
+$(objpfx)rtld-%.os: $(objpfx)rtld-%.c $(before-compile); $(compile-command.c)
$(objpfx)rtld-%.os: $(objpfx)%.S $(before-compile); $(compile-command.S)
$(objpfx)rtld-%.os: $(objpfx)%.s $(before-compile); $(compile-command.s)
$(objpfx)rtld-%.os: $(objpfx)%.c $(before-compile); $(compile-command.c)
diff --git a/libc/elf/rtld.c b/libc/elf/rtld.c
index 4ca07f8ae..c714be101 100644
--- a/libc/elf/rtld.c
+++ b/libc/elf/rtld.c
@@ -589,7 +589,6 @@ struct map_args
/* Argument to map_doit. */
char *str;
struct link_map *loader;
- int is_preloaded;
int mode;
/* Return value of map_doit. */
struct link_map *map;
@@ -627,16 +626,17 @@ static void
map_doit (void *a)
{
struct map_args *args = (struct map_args *) a;
- args->map = _dl_map_object (args->loader, args->str,
- args->is_preloaded, lt_library, 0, args->mode,
- LM_ID_BASE);
+ args->map = _dl_map_object (args->loader, args->str, lt_library, 0,
+ args->mode, LM_ID_BASE);
}
static void
dlmopen_doit (void *a)
{
struct dlmopen_args *args = (struct dlmopen_args *) a;
- args->map = _dl_open (args->fname, RTLD_LAZY | __RTLD_DLOPEN | __RTLD_AUDIT,
+ args->map = _dl_open (args->fname,
+ (RTLD_LAZY | __RTLD_DLOPEN | __RTLD_AUDIT
+ | __RTLD_SECURE),
dl_main, LM_ID_NEWLM, _dl_argc, INTUSE(_dl_argv),
__environ);
}
@@ -806,8 +806,7 @@ do_preload (char *fname, struct link_map *main_map, const char *where)
args.str = fname;
args.loader = main_map;
- args.is_preloaded = 1;
- args.mode = 0;
+ args.mode = __RTLD_SECURE;
unsigned int old_nloaded = GL(dl_ns)[LM_ID_BASE]._ns_nloaded;
@@ -1054,7 +1053,6 @@ of this helper program; chances are you did not intend to run this program.\n\
args.str = rtld_progname;
args.loader = NULL;
- args.is_preloaded = 0;
args.mode = __RTLD_OPENEXEC;
(void) _dl_catch_error (&objname, &err_str, &malloced, map_doit,
&args);
@@ -1066,7 +1064,7 @@ of this helper program; chances are you did not intend to run this program.\n\
else
{
HP_TIMING_NOW (start);
- _dl_map_object (NULL, rtld_progname, 0, lt_library, 0,
+ _dl_map_object (NULL, rtld_progname, lt_library, 0,
__RTLD_OPENEXEC, LM_ID_BASE);
HP_TIMING_NOW (stop);
@@ -1110,11 +1108,15 @@ of this helper program; chances are you did not intend to run this program.\n\
main_map = _dl_new_object ((char *) "", "", lt_executable, NULL,
__RTLD_OPENEXEC, LM_ID_BASE);
assert (main_map != NULL);
- assert (main_map == GL(dl_ns)[LM_ID_BASE]._ns_loaded);
main_map->l_phdr = phdr;
main_map->l_phnum = phnum;
main_map->l_entry = *user_entry;
+ /* Even though the link map is not yet fully initialized we can add
+ it to the map list since there are no possible users running yet. */
+ _dl_add_to_namespace_list (main_map, LM_ID_BASE);
+ assert (main_map == GL(dl_ns)[LM_ID_BASE]._ns_loaded);
+
/* At this point we are in a bit of trouble. We would have to
fill in the values for l_dev and l_ino. But in general we
do not know where the file is. We also do not handle AT_EXECFD
@@ -1257,7 +1259,7 @@ of this helper program; chances are you did not intend to run this program.\n\
/* We were invoked directly, so the program might not have a
PT_INTERP. */
_dl_rtld_libname.name = GL(dl_rtld_map).l_name;
- /* _dl_rtld_libname.next = NULL; Already zero. */
+ /* _dl_rtld_libname.next = NULL; Already zero. */
GL(dl_rtld_map).l_libname = &_dl_rtld_libname;
}
else
@@ -1382,6 +1384,9 @@ of this helper program; chances are you did not intend to run this program.\n\
l->l_libname->name = memcpy (copy, dsoname, len);
}
+ /* Add the vDSO to the object list. */
+ _dl_add_to_namespace_list (l, LM_ID_BASE);
+
/* Rearrange the list so this DSO appears after rtld_map. */
assert (l->l_next == NULL);
assert (l->l_prev == main_map);
diff --git a/libc/include/dlfcn.h b/libc/include/dlfcn.h
index a67426df3..af92483f5 100644
--- a/libc/include/dlfcn.h
+++ b/libc/include/dlfcn.h
@@ -9,6 +9,7 @@
#define __RTLD_OPENEXEC 0x20000000
#define __RTLD_CALLMAP 0x10000000
#define __RTLD_AUDIT 0x08000000
+#define __RTLD_SECURE 0x04000000 /* Apply additional security checks. */
#define __LM_ID_CALLER -2
diff --git a/libc/malloc/malloc.c b/libc/malloc/malloc.c
index 53ee1ccb8..f7770a394 100644
--- a/libc/malloc/malloc.c
+++ b/libc/malloc/malloc.c
@@ -4850,7 +4850,7 @@ _int_free(mstate av, mchunkptr p)
}
if (__builtin_expect (perturb_byte, 0))
- free_perturb (chunk2mem(p), size - SIZE_SZ);
+ free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
set_fastchunks(av);
unsigned int idx = fastbin_index(size);
@@ -4954,7 +4954,7 @@ _int_free(mstate av, mchunkptr p)
}
if (__builtin_expect (perturb_byte, 0))
- free_perturb (chunk2mem(p), size - SIZE_SZ);
+ free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
/* consolidate backward */
if (!prev_inuse(p)) {
diff --git a/libc/math/math.h b/libc/math/math.h
index 3abf7ae9a..272cea719 100644
--- a/libc/math/math.h
+++ b/libc/math/math.h
@@ -265,7 +265,7 @@ enum
? __isnan (x) : __isnanl (x))
# endif
-/* Return nonzero value is X is positive or negative infinity. */
+/* Return nonzero value if X is positive or negative infinity. */
# ifdef __NO_LONG_DOUBLE_MATH
# define isinf(x) \
(sizeof (x) == sizeof (float) ? __isinff (x) : __isinf (x))
diff --git a/libc/po/da.po b/libc/po/da.po
index e823bfcc6..2923cf117 100644
--- a/libc/po/da.po
+++ b/libc/po/da.po
@@ -1,15 +1,14 @@
-# translation of libc-2.9.90.po to Danish
# Danish messages for GNU libc.
# Copyright (C) 1996, 2009 Free Software Foundation, Inc.
# This file is distributed under the same license as the glibc package.
#
-# Keld Simonsen <keld@dkuug.dk>, 2000-2002, 2009.
+# Keld Simonsen <keld@keldix.com>, 2000-2002, 2009-2010.
msgid ""
msgstr ""
-"Project-Id-Version: libc-2.9.90\n"
+"Project-Id-Version: libc-2.11.1\n"
"POT-Creation-Date: 2009-02-06 12:40-0800\n"
-"PO-Revision-Date: 2009-05-20 11:37+0200\n"
-"Last-Translator: Keld Simonsen <keld@dkuug.dk>\n"
+"PO-Revision-Date: 2010-11-01 10:37+0100\n"
+"Last-Translator: Keld Simonsen <keld@keldix.com>\n"
"Language-Team: Danish <dansk@dansk-gruppen.dk>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=ISO-8859-1\n"
@@ -122,7 +121,7 @@ msgid ""
"Generate message catalog.\vIf INPUT-FILE is -, input is read from standard input. If OUTPUT-FILE\n"
"is -, output is written to standard output.\n"
msgstr ""
-"Generér meddelelseskatalog.\\vHvis INDFIL er '-' lćses inddata fra standard ind.\n"
+"Generér meddelelseskatalog.\vHvis INDFIL er '-' lćses inddata fra standard ind.\n"
"Hvis UDFIL er '-' skrives uddata til standard ud.\n"
#: catgets/gencat.c:124
diff --git a/libc/string/Makefile b/libc/string/Makefile
index f511eb378..0d5366559 100644
--- a/libc/string/Makefile
+++ b/libc/string/Makefile
@@ -59,7 +59,7 @@ tests := tester inl-tester noinl-tester testcopy test-ffs \
bug-strncat1 bug-strspn1 bug-strpbrk1 tst-bswap \
tst-strtok tst-strfry \
bug-strtok1 $(addprefix test-,$(strop-tests)) \
- tst-strxfrm2 tst-endian tst-svc2 bug-strstr1
+ tst-strxfrm2 tst-endian tst-svc2 bug-strstr1 bug-strchr1
tests-$(OPTION_EGLIBC_ENVZ) += bug-envz1
tests-$(OPTION_EGLIBC_LOCALE_CODE) \
+= tst-strxfrm bug-strcoll1
diff --git a/libc/string/bug-strchr1.c b/libc/string/bug-strchr1.c
new file mode 100644
index 000000000..21155d8a7
--- /dev/null
+++ b/libc/string/bug-strchr1.c
@@ -0,0 +1,14 @@
+#include <stdio.h>
+#include <string.h>
+
+static int
+do_test (void)
+{
+ char s[] __attribute__((aligned(16))) = "\xff";
+ char *p = strchr (s, '\xfe');
+ printf ("%p\n", p);
+ return p != NULL;
+}
+
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"
diff --git a/libc/sysdeps/generic/ldsodefs.h b/libc/sysdeps/generic/ldsodefs.h
index ec1e57f88..e79252fe0 100644
--- a/libc/sysdeps/generic/ldsodefs.h
+++ b/libc/sysdeps/generic/ldsodefs.h
@@ -832,11 +832,9 @@ extern void _dl_receive_error (receiver_fct fct, void (*operate) (void *),
/* Open the shared object NAME and map in its segments.
LOADER's DT_RPATH is used in searching for NAME.
- If the object is already opened, returns its existing map.
- For preloaded shared objects PRELOADED is set to a non-zero
- value to allow additional security checks. */
+ If the object is already opened, returns its existing map. */
extern struct link_map *_dl_map_object (struct link_map *loader,
- const char *name, int preloaded,
+ const char *name,
int type, int trace_mode, int mode,
Lmid_t nsid)
internal_function attribute_hidden;
@@ -901,8 +899,11 @@ extern lookup_t _dl_lookup_symbol_x (const char *undef,
extern ElfW(Addr) _dl_symbol_value (struct link_map *map, const char *name)
internal_function;
-/* Allocate a `struct link_map' for a new object being loaded,
- and enter it into the _dl_main_map list. */
+/* Add the new link_map NEW to the end of the namespace list. */
+extern void _dl_add_to_namespace_list (struct link_map *new, Lmid_t nsid)
+ internal_function attribute_hidden;
+
+/* Allocate a `struct link_map' for a new object being loaded. */
extern struct link_map *_dl_new_object (char *realname, const char *libname,
int type, struct link_map *loader,
int mode, Lmid_t nsid)
diff --git a/libc/sysdeps/i386/i686/cacheinfo.c b/libc/sysdeps/i386/i686/cacheinfo.c
index f8b7f521c..363596172 100644
--- a/libc/sysdeps/i386/i686/cacheinfo.c
+++ b/libc/sysdeps/i386/i686/cacheinfo.c
@@ -1,7 +1,11 @@
#define __x86_64_data_cache_size __x86_data_cache_size
+#define __x86_64_raw_data_cache_size __x86_raw_data_cache_size
#define __x86_64_data_cache_size_half __x86_data_cache_size_half
+#define __x86_64_raw_data_cache_size_half __x86_raw_data_cache_size_half
#define __x86_64_shared_cache_size __x86_shared_cache_size
+#define __x86_64_raw_shared_cache_size __x86_raw_shared_cache_size
#define __x86_64_shared_cache_size_half __x86_shared_cache_size_half
+#define __x86_64_raw_shared_cache_size_half __x86_raw_shared_cache_size_half
#define DISABLE_PREFETCHW
#define DISABLE_PREFERRED_MEMORY_INSTRUCTION
diff --git a/libc/sysdeps/powerpc/dl-procinfo.c b/libc/sysdeps/powerpc/dl-procinfo.c
index 1c74c2a90..2ca76d8b7 100644
--- a/libc/sysdeps/powerpc/dl-procinfo.c
+++ b/libc/sysdeps/powerpc/dl-procinfo.c
@@ -68,7 +68,7 @@ PROCINFO_CLASS const char _dl_powerpc_cap_flags[25][10]
#if !defined PROCINFO_DECL && defined SHARED
._dl_powerpc_platforms
#else
-PROCINFO_CLASS const char _dl_powerpc_platforms[8][12]
+PROCINFO_CLASS const char _dl_powerpc_platforms[9][12]
#endif
#ifndef PROCINFO_DECL
= {
@@ -79,7 +79,8 @@ PROCINFO_CLASS const char _dl_powerpc_platforms[8][12]
[PPC_PLATFORM_POWER6] = "power6",
[PPC_PLATFORM_CELL_BE] = "ppc-cell-be",
[PPC_PLATFORM_POWER6X] = "power6x",
- [PPC_PLATFORM_POWER7] = "power7"
+ [PPC_PLATFORM_POWER7] = "power7",
+ [PPC_PLATFORM_PPCA2] = "ppca2"
}
#endif
#if !defined SHARED || defined PROCINFO_DECL
diff --git a/libc/sysdeps/powerpc/dl-procinfo.h b/libc/sysdeps/powerpc/dl-procinfo.h
index 254195a94..2ae35644a 100644
--- a/libc/sysdeps/powerpc/dl-procinfo.h
+++ b/libc/sysdeps/powerpc/dl-procinfo.h
@@ -31,7 +31,7 @@
#define HWCAP_IMPORTANT (PPC_FEATURE_HAS_ALTIVEC \
+ PPC_FEATURE_HAS_DFP)
-#define _DL_PLATFORMS_COUNT 8
+#define _DL_PLATFORMS_COUNT 9
#define _DL_FIRST_PLATFORM 32
/* Mask to filter out platforms. */
@@ -47,6 +47,7 @@
#define PPC_PLATFORM_CELL_BE 5
#define PPC_PLATFORM_POWER6X 6
#define PPC_PLATFORM_POWER7 7
+#define PPC_PLATFORM_PPCA2 8
static inline const char *
__attribute__ ((unused))
@@ -123,6 +124,10 @@ _dl_string_platform (const char *str)
GLRO(dl_powerpc_platforms)[PPC_PLATFORM_CELL_BE] + 3)
== 0)
return _DL_FIRST_PLATFORM + PPC_PLATFORM_CELL_BE;
+ else if (strcmp (str + 3,
+ GLRO(dl_powerpc_platforms)[PPC_PLATFORM_PPCA2] + 3)
+ == 0)
+ return _DL_FIRST_PLATFORM + PPC_PLATFORM_PPCA2;
}
return -1;
diff --git a/libc/sysdeps/powerpc/powerpc32/a2/memcpy.S b/libc/sysdeps/powerpc/powerpc32/a2/memcpy.S
new file mode 100644
index 000000000..472f7a393
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/a2/memcpy.S
@@ -0,0 +1,511 @@
+/* Optimized memcpy implementation for PowerPC A2.
+ Copyright (C) 2010 Free Software Foundation, Inc.
+ Contributed by Michael Brutman <brutman@us.ibm.com>.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+#define PREFETCH_AHEAD 4 /* no cache lines SRC prefetching ahead */
+#define ZERO_AHEAD 2 /* no cache lines DST zeroing ahead */
+
+ .machine a2
+EALIGN (BP_SYM (memcpy), 5, 0)
+ CALL_MCOUNT
+
+ dcbt 0,r4 /* Prefetch ONE SRC cacheline */
+ cmplwi cr1,r5,16 /* is size < 16 ? */
+ mr r6,r3 /* Copy dest reg to r6; */
+ blt+ cr1,L(shortcopy)
+
+
+ /* Big copy (16 bytes or more)
+
+ Figure out how far to the nearest quadword boundary, or if we are
+ on one already.
+
+ r3 - return value (always)
+ r4 - current source addr
+ r5 - copy length
+ r6 - current dest addr
+ */
+
+ neg r8,r3 /* LS 4 bits = # bytes to 8-byte dest bdry */
+ clrlwi r8,r8,32-4 /* align to 16byte boundary */
+ sub r7,r4,r3 /* compute offset to src from dest */
+ cmplwi cr0,r8,0 /* Were we aligned on a 16 byte bdy? */
+ beq+ L(dst_aligned)
+
+
+
+ /* Destination is not aligned on quadword boundary. Get us to one.
+
+ r3 - return value (always)
+ r4 - current source addr
+ r5 - copy length
+ r6 - current dest addr
+ r7 - offset to src from dest
+ r8 - number of bytes to quadword boundary
+ */
+
+ mtcrf 0x01,r8 /* put #bytes to boundary into cr7 */
+ subf r5,r8,r5 /* adjust remaining len */
+
+ bf cr7*4+3,1f
+ lbzx r0,r7,r6 /* copy 1 byte addr */
+ stb r0,0(r6)
+ addi r6,r6,1
+1:
+ bf cr7*4+2,2f
+ lhzx r0,r7,r6 /* copy 2 byte addr */
+ sth r0,0(r6)
+ addi r6,r6,2
+2:
+ bf cr7*4+1,4f
+ lwzx r0,r7,r6 /* copy 4 byte addr */
+ stw r0,0(r6)
+ addi r6,r6,4
+4:
+ bf cr7*4+0,8f
+ lfdx r0,r7,r6 /* copy 8 byte addr */
+ stfd r0,0(r6)
+ addi r6,r6,8
+8:
+ add r4,r7,r6 /* update src addr */
+
+
+
+ /* Dest is quadword aligned now.
+
+ Lots of decisions to make. If we are copying less than a cache
+ line we won't be here long. If we are not on a cache line
+ boundary we need to get there. And then we need to figure out
+ how many cache lines ahead to pre-touch.
+
+ r3 - return value (always)
+ r4 - current source addr
+ r5 - copy length
+ r6 - current dest addr
+ */
+
+
+ .align 4
+L(dst_aligned):
+
+
+#ifdef SHARED
+ mflr r0
+/* Establishes GOT addressability so we can load __cache_line_size
+ from static. This value was set from the aux vector during startup. */
+ bcl 20,31,1f
+1:
+ mflr r9
+ addis r9,r9,__cache_line_size-1b@ha
+ lwz r9,__cache_line_size-1b@l(r9)
+ mtlr r0
+#else
+/* Load __cache_line_size from static. This value was set from the
+ aux vector during startup. */
+ lis r9,__cache_line_size@ha
+ lwz r9,__cache_line_size@l(r9)
+#endif
+
+ cmplwi cr5, r9, 0
+ bne+ cr5,L(cachelineset)
+ li r9,64
+
+
+
+L(cachelineset):
+
+ addi r10,r9,-1
+
+ cmpw cr5,r5,r10 /* Less than a cacheline to go? */
+
+ neg r7,r6 /* How far to next cacheline bdy? */
+
+ addi r6,r6,-8 /* prepare for stdu */
+ cmpwi cr0,r9,128
+ addi r4,r4,-8 /* prepare for ldu */
+
+
+ ble+ cr5,L(lessthancacheline)
+
+ beq- cr0,L(big_lines) /* 128 byte line code */
+
+
+
+
+ /* More than a cacheline left to go, and using 64 byte cachelines */
+
+ clrlwi r7,r7,32-6 /* How far to next cacheline bdy? */
+
+ cmplwi cr6,r7,0 /* Are we on a cacheline bdy already? */
+
+ /* Reduce total len by what it takes to get to the next cache line */
+ subf r5,r7,r5
+ srwi r7,r7,4 /* How many qws to get to the line bdy? */
+
+ /* How many full cache lines to copy after getting to a line bdy? */
+ srwi r10,r5,6
+
+ cmplwi r10,0 /* If no full cache lines to copy ... */
+ li r11,0 /* number cachelines to copy with prefetch */
+ beq L(nocacheprefetch)
+
+
+ /* We are here because we have at least one full cache line to copy,
+ and therefore some pre-touching to do. */
+
+ cmplwi r10,PREFETCH_AHEAD
+ li r12,64+8 /* prefetch distance */
+ ble L(lessthanmaxprefetch)
+
+ /* We can only do so much pre-fetching. R11 will have the count of
+ lines left to prefetch after the initial batch of prefetches
+ are executed. */
+
+ subi r11,r10,PREFETCH_AHEAD
+ li r10,PREFETCH_AHEAD
+
+L(lessthanmaxprefetch):
+ mtctr r10
+
+ /* At this point r10/ctr hold the number of lines to prefetch in this
+ initial batch, and r11 holds any remainder. */
+
+L(prefetchSRC):
+ dcbt r12,r4
+ addi r12,r12,64
+ bdnz L(prefetchSRC)
+
+
+ /* Prefetching is done, or was not needed.
+
+ cr6 - are we on a cacheline boundary already?
+ r7 - number of quadwords to the next cacheline boundary
+ */
+
+L(nocacheprefetch):
+ mtctr r7
+
+ cmplwi cr1,r5,64 /* Less than a cache line to copy? */
+
+ /* How many bytes are left after we copy whatever full
+ cache lines we can get? */
+ clrlwi r5,r5,32-6
+
+ beq cr6,L(cachelinealigned)
+
+
+ /* Copy quadwords up to the next cacheline boundary */
+
+L(aligntocacheline):
+ lfd fp9,0x08(r4)
+ lfdu fp10,0x10(r4)
+ stfd fp9,0x08(r6)
+ stfdu fp10,0x10(r6)
+ bdnz L(aligntocacheline)
+
+
+ .align 4
+L(cachelinealigned): /* copy while cache lines */
+
+ blt- cr1,L(lessthancacheline) /* size <64 */
+
+L(outerloop):
+ cmpwi r11,0
+ mtctr r11
+ beq- L(endloop)
+
+ li r11,64*ZERO_AHEAD +8 /* DCBZ dist */
+
+ .align 4
+ /* Copy whole cachelines, optimized by prefetching SRC cacheline */
+L(loop): /* Copy aligned body */
+ dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
+ lfd fp9, 0x08(r4)
+ dcbz r11,r6
+ lfd fp10, 0x10(r4)
+ lfd fp11, 0x18(r4)
+ lfd fp12, 0x20(r4)
+ stfd fp9, 0x08(r6)
+ stfd fp10, 0x10(r6)
+ stfd fp11, 0x18(r6)
+ stfd fp12, 0x20(r6)
+ lfd fp9, 0x28(r4)
+ lfd fp10, 0x30(r4)
+ lfd fp11, 0x38(r4)
+ lfdu fp12, 0x40(r4)
+ stfd fp9, 0x28(r6)
+ stfd fp10, 0x30(r6)
+ stfd fp11, 0x38(r6)
+ stfdu fp12, 0x40(r6)
+
+ bdnz L(loop)
+
+
+L(endloop):
+ cmpwi r10,0
+ beq- L(endloop2)
+ mtctr r10
+
+L(loop2): /* Copy aligned body */
+ lfd fp9, 0x08(r4)
+ lfd fp10, 0x10(r4)
+ lfd fp11, 0x18(r4)
+ lfd fp12, 0x20(r4)
+ stfd fp9, 0x08(r6)
+ stfd fp10, 0x10(r6)
+ stfd fp11, 0x18(r6)
+ stfd fp12, 0x20(r6)
+ lfd fp9, 0x28(r4)
+ lfd fp10, 0x30(r4)
+ lfd fp11, 0x38(r4)
+ lfdu fp12, 0x40(r4)
+ stfd fp9, 0x28(r6)
+ stfd fp10, 0x30(r6)
+ stfd fp11, 0x38(r6)
+ stfdu fp12, 0x40(r6)
+
+ bdnz L(loop2)
+L(endloop2):
+
+
+ .align 4
+L(lessthancacheline): /* Was there less than cache to do ? */
+ cmplwi cr0,r5,16
+ srwi r7,r5,4 /* divide size by 16 */
+ blt- L(do_lt16)
+ mtctr r7
+
+L(copy_remaining):
+ lfd fp9, 0x08(r4)
+ lfdu fp10, 0x10(r4)
+ stfd fp9, 0x08(r6)
+ stfdu fp10, 0x10(r6)
+ bdnz L(copy_remaining)
+
+L(do_lt16): /* less than 16 ? */
+ cmplwi cr0,r5,0 /* copy remaining bytes (0-15) */
+ beqlr+ /* no rest to copy */
+ addi r4,r4,8
+ addi r6,r6,8
+
+L(shortcopy): /* SIMPLE COPY to handle size =< 15 bytes */
+ mtcrf 0x01,r5
+ sub r7,r4,r6
+ bf- cr7*4+0,8f
+ lfdx fp9,r7,r6 /* copy 8 byte */
+ stfd fp9,0(r6)
+ addi r6,r6,8
+8:
+ bf cr7*4+1,4f
+ lwzx r0,r7,r6 /* copy 4 byte */
+ stw r0,0(r6)
+ addi r6,r6,4
+4:
+ bf cr7*4+2,2f
+ lhzx r0,r7,r6 /* copy 2 byte */
+ sth r0,0(r6)
+ addi r6,r6,2
+2:
+ bf cr7*4+3,1f
+ lbzx r0,r7,r6 /* copy 1 byte */
+ stb r0,0(r6)
+1:
+ blr
+
+
+
+
+
+ /* Similar to above, but for use with 128 byte lines. */
+
+
+L(big_lines):
+
+ clrlwi r7,r7,32-7 /* How far to next cacheline bdy? */
+
+ cmplwi cr6,r7,0 /* Are we on a cacheline bdy already? */
+
+ /* Reduce total len by what it takes to get to the next cache line */
+ subf r5,r7,r5
+ srwi r7,r7,4 /* How many qw to get to the line bdy? */
+
+ /* How many full cache lines to copy after getting to a line bdy? */
+ srwi r10,r5,7
+
+ cmplwi r10,0 /* If no full cache lines to copy ... */
+ li r11,0 /* number cachelines to copy with prefetch */
+ beq L(nocacheprefetch_128)
+
+
+ /* We are here because we have at least one full cache line to copy,
+ and therefore some pre-touching to do. */
+
+ cmplwi r10,PREFETCH_AHEAD
+ li r12,128+8 /* prefetch distance */
+ ble L(lessthanmaxprefetch_128)
+
+ /* We can only do so much pre-fetching. R11 will have the count of
+ lines left to prefetch after the initial batch of prefetches
+ are executed. */
+
+ subi r11,r10,PREFETCH_AHEAD
+ li r10,PREFETCH_AHEAD
+
+L(lessthanmaxprefetch_128):
+ mtctr r10
+
+ /* At this point r10/ctr hold the number of lines to prefetch in this
+ initial batch, and r11 holds any remainder. */
+
+L(prefetchSRC_128):
+ dcbt r12,r4
+ addi r12,r12,128
+ bdnz L(prefetchSRC_128)
+
+
+ /* Prefetching is done, or was not needed.
+
+ cr6 - are we on a cacheline boundary already?
+ r7 - number of quadwords to the next cacheline boundary
+ */
+
+L(nocacheprefetch_128):
+ mtctr r7
+
+ cmplwi cr1,r5,128 /* Less than a cache line to copy? */
+
+ /* How many bytes are left after we copy whatever full
+ cache lines we can get? */
+ clrlwi r5,r5,32-7
+
+ beq cr6,L(cachelinealigned_128)
+
+
+ /* Copy quadwords up to the next cacheline boundary */
+
+L(aligntocacheline_128):
+ lfd fp9,0x08(r4)
+ lfdu fp10,0x10(r4)
+ stfd fp9,0x08(r6)
+ stfdu fp10,0x10(r6)
+ bdnz L(aligntocacheline_128)
+
+
+L(cachelinealigned_128): /* copy while cache lines */
+
+ blt- cr1,L(lessthancacheline) /* size <128 */
+
+L(outerloop_128):
+ cmpwi r11,0
+ mtctr r11
+ beq- L(endloop_128)
+
+ li r11,128*ZERO_AHEAD +8 /* DCBZ dist */
+
+ .align 4
+ /* Copy whole cachelines, optimized by prefetching SRC cacheline */
+L(loop_128): /* Copy aligned body */
+ dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
+ lfd fp9, 0x08(r4)
+ dcbz r11,r6
+ lfd fp10, 0x10(r4)
+ lfd fp11, 0x18(r4)
+ lfd fp12, 0x20(r4)
+ stfd fp9, 0x08(r6)
+ stfd fp10, 0x10(r6)
+ stfd fp11, 0x18(r6)
+ stfd fp12, 0x20(r6)
+ lfd fp9, 0x28(r4)
+ lfd fp10, 0x30(r4)
+ lfd fp11, 0x38(r4)
+ lfd fp12, 0x40(r4)
+ stfd fp9, 0x28(r6)
+ stfd fp10, 0x30(r6)
+ stfd fp11, 0x38(r6)
+ stfd fp12, 0x40(r6)
+ lfd fp9, 0x48(r4)
+ lfd fp10, 0x50(r4)
+ lfd fp11, 0x58(r4)
+ lfd fp12, 0x60(r4)
+ stfd fp9, 0x48(r6)
+ stfd fp10, 0x50(r6)
+ stfd fp11, 0x58(r6)
+ stfd fp12, 0x60(r6)
+ lfd fp9, 0x68(r4)
+ lfd fp10, 0x70(r4)
+ lfd fp11, 0x78(r4)
+ lfdu fp12, 0x80(r4)
+ stfd fp9, 0x68(r6)
+ stfd fp10, 0x70(r6)
+ stfd fp11, 0x78(r6)
+ stfdu fp12, 0x80(r6)
+
+ bdnz L(loop_128)
+
+
+L(endloop_128):
+ cmpwi r10,0
+ beq- L(endloop2_128)
+ mtctr r10
+
+L(loop2_128): /* Copy aligned body */
+ lfd fp9, 0x08(r4)
+ lfd fp10, 0x10(r4)
+ lfd fp11, 0x18(r4)
+ lfd fp12, 0x20(r4)
+ stfd fp9, 0x08(r6)
+ stfd fp10, 0x10(r6)
+ stfd fp11, 0x18(r6)
+ stfd fp12, 0x20(r6)
+ lfd fp9, 0x28(r4)
+ lfd fp10, 0x30(r4)
+ lfd fp11, 0x38(r4)
+ lfd fp12, 0x40(r4)
+ stfd fp9, 0x28(r6)
+ stfd fp10, 0x30(r6)
+ stfd fp11, 0x38(r6)
+ stfd fp12, 0x40(r6)
+ lfd fp9, 0x48(r4)
+ lfd fp10, 0x50(r4)
+ lfd fp11, 0x58(r4)
+ lfd fp12, 0x60(r4)
+ stfd fp9, 0x48(r6)
+ stfd fp10, 0x50(r6)
+ stfd fp11, 0x58(r6)
+ stfd fp12, 0x60(r6)
+ lfd fp9, 0x68(r4)
+ lfd fp10, 0x70(r4)
+ lfd fp11, 0x78(r4)
+ lfdu fp12, 0x80(r4)
+ stfd fp9, 0x68(r6)
+ stfd fp10, 0x70(r6)
+ stfd fp11, 0x78(r6)
+ stfdu fp12, 0x80(r6)
+ bdnz L(loop2_128)
+L(endloop2_128):
+
+ b L(lessthancacheline)
+
+
+END (BP_SYM (memcpy))
+libc_hidden_builtin_def (memcpy)
diff --git a/libc/sysdeps/powerpc/powerpc32/power7/mempcpy.S b/libc/sysdeps/powerpc/powerpc32/power7/mempcpy.S
new file mode 100644
index 000000000..5e0525645
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/power7/mempcpy.S
@@ -0,0 +1,471 @@
+/* Optimized mempcpy implementation for POWER7.
+ Copyright (C) 2010 Free Software Foundation, Inc.
+ Contributed by Luis Machado <luisgpm@br.ibm.com>.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* __ptr_t [r3] __mempcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
+ Returns 'dst' + 'len'. */
+
+ .machine power7
+EALIGN (BP_SYM (__mempcpy), 5, 0)
+ CALL_MCOUNT
+
+ stwu 1,-32(1)
+ cfi_adjust_cfa_offset(32)
+ stw 30,20(1)
+ cfi_offset(30,(20-32))
+ stw 31,24(1)
+ mr 30,3
+ cmplwi cr1,5,31
+ neg 0,3
+ cfi_offset(31,-8)
+ ble cr1,L(copy_LT_32) /* If move < 32 bytes use short move
+ code. */
+
+ andi. 11,3,7 /* Check alignment of DST. */
+ clrlwi 10,4,29 /* Check alignment of SRC. */
+ cmplw cr6,10,11 /* SRC and DST alignments match? */
+ mr 12,4
+ mr 31,5
+ bne cr6,L(copy_GE_32_unaligned)
+
+ srwi 9,5,3 /* Number of full quadwords remaining. */
+
+ beq L(copy_GE_32_aligned_cont)
+
+ clrlwi 0,0,29
+ mtcrf 0x01,0
+ subf 31,0,5
+
+ /* Get the SRC aligned to 8 bytes. */
+
+1: bf 31,2f
+ lbz 6,0(12)
+ addi 12,12,1
+ stb 6,0(3)
+ addi 3,3,1
+2: bf 30,4f
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+4: bf 29,0f
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+0:
+ clrlwi 10,12,29 /* Check alignment of SRC again. */
+ srwi 9,31,3 /* Number of full doublewords remaining. */
+
+L(copy_GE_32_aligned_cont):
+
+ clrlwi 11,31,29
+ mtcrf 0x01,9
+
+ srwi 8,31,5
+ cmplwi cr1,9,4
+ cmplwi cr6,11,0
+ mr 11,12
+
+ /* Copy 1~3 doublewords so the main loop starts
+ at a multiple of 32 bytes. */
+
+ bf 30,1f
+ lfd 6,0(12)
+ lfd 7,8(12)
+ addi 11,12,16
+ mtctr 8
+ stfd 6,0(3)
+ stfd 7,8(3)
+ addi 10,3,16
+ bf 31,4f
+ lfd 0,16(12)
+ stfd 0,16(3)
+ blt cr1,3f
+ addi 11,12,24
+ addi 10,3,24
+ b 4f
+
+ .align 4
+1: /* Copy 1 doubleword and set the counter. */
+ mr 10,3
+ mtctr 8
+ bf 31,4f
+ lfd 6,0(12)
+ addi 11,12,8
+ stfd 6,0(3)
+ addi 10,3,8
+
+ .align 4
+4: /* Main aligned copy loop. Copies 32-bytes at a time. */
+ lfd 6,0(11)
+ lfd 7,8(11)
+ lfd 8,16(11)
+ lfd 0,24(11)
+ addi 11,11,32
+
+ stfd 6,0(10)
+ stfd 7,8(10)
+ stfd 8,16(10)
+ stfd 0,24(10)
+ addi 10,10,32
+ bdnz 4b
+3:
+
+ /* Check for tail bytes. */
+
+ clrrwi 0,31,3
+ mtcrf 0x01,31
+ beq cr6,0f
+
+.L9:
+ add 3,3,0
+ add 12,12,0
+
+ /* At this point we have a tail of 0-7 bytes and we know that the
+ destination is doubleword-aligned. */
+4: /* Copy 4 bytes. */
+ bf 29,2f
+
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+2: /* Copy 2 bytes. */
+ bf 30,1f
+
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+1: /* Copy 1 byte. */
+ bf 31,0f
+
+ lbz 6,0(12)
+ stb 6,0(3)
+0: /* Return DST + LEN pointer. */
+ add 3,30,5
+ lwz 30,20(1)
+ lwz 31,24(1)
+ addi 1,1,32
+ blr
+
+ /* Handle copies of 0~31 bytes. */
+ .align 4
+L(copy_LT_32):
+ cmplwi cr6,5,8
+ mr 12,4
+ mtcrf 0x01,5
+ ble cr6,L(copy_LE_8)
+
+ /* At least 9 bytes to go. */
+ neg 8,4
+ clrrwi 11,4,2
+ andi. 0,8,3
+ cmplwi cr1,5,16
+ mr 10,5
+ beq L(copy_LT_32_aligned)
+
+ /* Force 4-bytes alignment for SRC. */
+ mtocrf 0x01,0
+ subf 10,0,5
+2: bf 30,1f
+
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+1: bf 31,L(end_4bytes_alignment)
+
+ lbz 6,0(12)
+ addi 12,12,1
+ stb 6,0(3)
+ addi 3,3,1
+
+ .align 4
+L(end_4bytes_alignment):
+ cmplwi cr1,10,16
+ mtcrf 0x01,10
+
+L(copy_LT_32_aligned):
+ /* At least 6 bytes to go, and SRC is word-aligned. */
+ blt cr1,8f
+
+ /* Copy 16 bytes. */
+ lwz 6,0(12)
+ lwz 7,4(12)
+ stw 6,0(3)
+ lwz 8,8(12)
+ stw 7,4(3)
+ lwz 6,12(12)
+ addi 12,12,16
+ stw 8,8(3)
+ stw 6,12(3)
+ addi 3,3,16
+8: /* Copy 8 bytes. */
+ bf 28,4f
+
+ lwz 6,0(12)
+ lwz 7,4(12)
+ addi 12,12,8
+ stw 6,0(3)
+ stw 7,4(3)
+ addi 3,3,8
+4: /* Copy 4 bytes. */
+ bf 29,2f
+
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+2: /* Copy 2-3 bytes. */
+ bf 30,1f
+
+ lhz 6,0(12)
+ sth 6,0(3)
+ bf 31,0f
+ lbz 7,2(12)
+ stb 7,2(3)
+
+ /* Return DST + LEN pointer. */
+ add 3,30,5
+ lwz 30,20(1)
+ addi 1,1,32
+ blr
+
+ .align 4
+1: /* Copy 1 byte. */
+ bf 31,0f
+
+ lbz 6,0(12)
+ stb 6,0(3)
+0: /* Return DST + LEN pointer. */
+ add 3,30,5
+ lwz 30,20(1)
+ addi 1,1,32
+ blr
+
+ /* Handles copies of 0~8 bytes. */
+ .align 4
+L(copy_LE_8):
+ bne cr6,4f
+
+ /* Though we could've used lfd/stfd here, they are still
+ slow for unaligned cases. */
+
+ lwz 6,0(4)
+ lwz 7,4(4)
+ stw 6,0(3)
+ stw 7,4(3)
+
+ /* Return DST + LEN pointer. */
+ add 3,30,5
+ lwz 30,20(1)
+ addi 1,1,32
+ blr
+
+ .align 4
+4: /* Copies 4~7 bytes. */
+ bf 29,2b
+
+ lwz 6,0(4)
+ stw 6,0(3)
+ bf 30,5f
+ lhz 7,4(4)
+ sth 7,4(3)
+ bf 31,0f
+ lbz 8,6(4)
+ stb 8,6(3)
+
+ /* Return DST + LEN pointer. */
+ add 3,30,5
+ lwz 30,20(1)
+ addi 1,1,32
+ blr
+
+ .align 4
+5: /* Copy 1 byte. */
+ bf 31,0f
+
+ lbz 6,4(4)
+ stb 6,4(3)
+
+0: /* Return DST + LEN pointer. */
+ add 3,30,5
+ lwz 30,20(1)
+ addi 1,1,32
+ blr
+
+ /* Handle copies of 32+ bytes where DST is aligned (to quadword) but
+ SRC is not. Use aligned quadword loads from SRC, shifted to realign
+ the data, allowing for aligned DST stores. */
+ .align 4
+L(copy_GE_32_unaligned):
+ andi. 11,3,15 /* Check alignment of DST. */
+ clrlwi 0,0,28 /* Number of bytes until the 1st
+ quadword of DST. */
+ srwi 9,5,4 /* Number of full quadwords remaining. */
+
+ beq L(copy_GE_32_unaligned_cont)
+
+ /* SRC is not quadword aligned, get it aligned. */
+
+ mtcrf 0x01,0
+ subf 31,0,5
+
+ /* Vector instructions work best when proper alignment (16-bytes)
+ is present. Move 0~15 bytes as needed to get DST quadword-aligned. */
+1: /* Copy 1 byte. */
+ bf 31,2f
+
+ lbz 6,0(12)
+ addi 12,12,1
+ stb 6,0(3)
+ addi 3,3,1
+2: /* Copy 2 bytes. */
+ bf 30,4f
+
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+4: /* Copy 4 bytes. */
+ bf 29,8f
+
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+8: /* Copy 8 bytes. */
+ bf 28,0f
+
+ lfd 6,0(12)
+ addi 12,12,8
+ stfd 6,0(3)
+ addi 3,3,8
+0:
+ clrlwi 10,12,28 /* Check alignment of SRC. */
+ srwi 9,31,4 /* Number of full quadwords remaining. */
+
+ /* The proper alignment is present, it is OK to copy the bytes now. */
+L(copy_GE_32_unaligned_cont):
+
+ /* Setup two indexes to speed up the indexed vector operations. */
+ clrlwi 11,31,28
+ li 6,16 /* Index for 16-bytes offsets. */
+ li 7,32 /* Index for 32-bytes offsets. */
+ cmplwi cr1,11,0
+ srwi 8,31,5 /* Setup the loop counter. */
+ mr 10,3
+ mr 11,12
+ mtcrf 0x01,9
+ cmplwi cr6,9,1
+ lvsl 5,0,12
+ lvx 3,0,12
+ bf 31,L(setup_unaligned_loop)
+
+ /* Copy another 16 bytes to align to 32-bytes due to the loop . */
+ lvx 4,12,6
+ vperm 6,3,4,5
+ addi 11,12,16
+ addi 10,3,16
+ stvx 6,0,3
+ vor 3,4,4
+
+L(setup_unaligned_loop):
+ mtctr 8
+ ble cr6,L(end_unaligned_loop)
+
+ /* Copy 32 bytes at a time using vector instructions. */
+ .align 4
+L(unaligned_loop):
+
+ /* Note: vr6/vr10 may contain data that was already copied,
+ but in order to get proper alignment, we may have to copy
+ some portions again. This is faster than having unaligned
+ vector instructions though. */
+
+ lvx 4,11,6 /* vr4 = r11+16. */
+ vperm 6,3,4,5 /* Merge the correctly-aligned portions
+ of vr3/vr4 into vr6. */
+ lvx 3,11,7 /* vr3 = r11+32. */
+ vperm 10,4,3,5 /* Merge the correctly-aligned portions
+ of vr3/vr4 into vr10. */
+ addi 11,11,32
+ stvx 6,0,10
+ stvx 10,10,6
+ addi 10,10,32
+
+ bdnz L(unaligned_loop)
+
+ .align 4
+L(end_unaligned_loop):
+
+ /* Check for tail bytes. */
+ clrrwi 0,31,4
+ mtcrf 0x01,31
+ beq cr1,0f
+
+ add 3,3,0
+ add 12,12,0
+
+ /* We have 1~15 tail bytes to copy, and DST is quadword aligned. */
+8: /* Copy 8 bytes. */
+ bf 28,4f
+
+ lwz 6,0(12)
+ lwz 7,4(12)
+ addi 12,12,8
+ stw 6,0(3)
+ stw 7,4(3)
+ addi 3,3,8
+4: /* Copy 4 bytes. */
+ bf 29,2f
+
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+2: /* Copy 2~3 bytes. */
+ bf 30,1f
+
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+1: /* Copy 1 byte. */
+ bf 31,0f
+
+ lbz 6,0(12)
+ stb 6,0(3)
+0: /* Return DST + LEN pointer. */
+ add 3,30,5
+ lwz 30,20(1)
+ lwz 31,24(1)
+ addi 1,1,32
+ blr
+
+END (BP_SYM (__mempcpy))
+libc_hidden_def (BP_SYM (__mempcpy))
+weak_alias (BP_SYM (__mempcpy), BP_SYM (mempcpy))
+libc_hidden_builtin_def (mempcpy)
diff --git a/libc/sysdeps/powerpc/powerpc64/a2/memcpy.S b/libc/sysdeps/powerpc/powerpc64/a2/memcpy.S
new file mode 100644
index 000000000..ac95171aa
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc64/a2/memcpy.S
@@ -0,0 +1,501 @@
+/* Optimized memcpy implementation for PowerPC A2.
+ Copyright (C) 2010 Free Software Foundation, Inc.
+ Contributed by Michael Brutman <brutman@us.ibm.com>.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+#define PREFETCH_AHEAD 4 /* no cache lines SRC prefetching ahead */
+#define ZERO_AHEAD 2 /* no cache lines DST zeroing ahead */
+
+ .section ".toc","aw"
+.LC0:
+ .tc __cache_line_size[TC],__cache_line_size
+ .section ".text"
+ .align 2
+
+
+ .machine a2
+EALIGN (BP_SYM (memcpy), 5, 0)
+ CALL_MCOUNT 3
+
+ dcbt 0,r4 /* Prefetch ONE SRC cacheline */
+ cmpldi cr1,r5,16 /* is size < 16 ? */
+ mr r6,r3 /* Copy dest reg to r6; */
+ blt+ cr1,L(shortcopy)
+
+
+ /* Big copy (16 bytes or more)
+
+ Figure out how far to the nearest quadword boundary, or if we are
+ on one already. Also get the cache line size.
+
+ r3 - return value (always)
+ r4 - current source addr
+ r5 - copy length
+ r6 - current dest addr
+ */
+
+ neg r8,r3 /* LS 4 bits = # bytes to 8-byte dest bdry */
+ ld r9,.LC0@toc(r2) /* Get cache line size (part 1) */
+ clrldi r8,r8,64-4 /* align to 16byte boundary */
+ sub r7,r4,r3 /* compute offset to src from dest */
+ lwz r9,0(r9) /* Get cache line size (part 2) */
+ cmpldi cr0,r8,0 /* Were we aligned on a 16 byte bdy? */
+ addi r10,r9,-1 /* Cache line mask */
+ beq+ L(dst_aligned)
+
+
+
+ /* Destination is not aligned on quadword boundary. Get us to one.
+
+ r3 - return value (always)
+ r4 - current source addr
+ r5 - copy length
+ r6 - current dest addr
+ r7 - offset to src from dest
+ r8 - number of bytes to quadword boundary
+ */
+
+ mtcrf 0x01,r8 /* put #bytes to boundary into cr7 */
+ subf r5,r8,r5 /* adjust remaining len */
+
+ bf cr7*4+3,1f
+ lbzx r0,r7,r6 /* copy 1 byte addr */
+ stb r0,0(r6)
+ addi r6,r6,1
+1:
+ bf cr7*4+2,2f
+ lhzx r0,r7,r6 /* copy 2 byte addr */
+ sth r0,0(r6)
+ addi r6,r6,2
+2:
+ bf cr7*4+1,4f
+ lwzx r0,r7,r6 /* copy 4 byte addr */
+ stw r0,0(r6)
+ addi r6,r6,4
+4:
+ bf cr7*4+0,8f
+ ldx r0,r7,r6 /* copy 8 byte addr */
+ std r0,0(r6)
+ addi r6,r6,8
+8:
+ add r4,r7,r6 /* update src addr */
+
+
+
+ /* Dest is quadword aligned now.
+
+ Lots of decisions to make. If we are copying less than a cache
+ line we won't be here long. If we are not on a cache line
+ boundary we need to get there. And then we need to figure out
+ how many cache lines ahead to pre-touch.
+
+ r3 - return value (always)
+ r4 - current source addr
+ r5 - copy length
+ r6 - current dest addr
+ */
+
+
+ .align 4
+L(dst_aligned):
+
+
+ cmpd cr5,r5,r10 /* Less than a cacheline to go? */
+
+ neg r7,r6 /* How far to next cacheline bdy? */
+
+ addi r6,r6,-8 /* prepare for stdu */
+ cmpdi cr0,r9,128
+ addi r4,r4,-8 /* prepare for ldu */
+
+
+ ble+ cr5,L(lessthancacheline)
+
+ beq- cr0,L(big_lines) /* 128 byte line code */
+
+
+
+ /* More than a cacheline left to go, and using 64 byte cachelines */
+
+ clrldi r7,r7,64-6 /* How far to next cacheline bdy? */
+
+ cmpldi cr6,r7,0 /* Are we on a cacheline bdy already? */
+
+ /* Reduce total len by what it takes to get to the next cache line */
+ subf r5,r7,r5
+ srdi r7,r7,4 /* How many qws to get to the line bdy? */
+
+ /* How many full cache lines to copy after getting to a line bdy? */
+ srdi r10,r5,6
+
+ cmpldi r10,0 /* If no full cache lines to copy ... */
+ li r11,0 /* number cachelines to copy with prefetch */
+ beq L(nocacheprefetch)
+
+
+ /* We are here because we have at least one full cache line to copy,
+ and therefore some pre-touching to do. */
+
+ cmpldi r10,PREFETCH_AHEAD
+ li r12,64+8 /* prefetch distance */
+ ble L(lessthanmaxprefetch)
+
+ /* We can only do so much pre-fetching. R11 will have the count of
+ lines left to prefetch after the initial batch of prefetches
+ are executed. */
+
+ subi r11,r10,PREFETCH_AHEAD
+ li r10,PREFETCH_AHEAD
+
+L(lessthanmaxprefetch):
+ mtctr r10
+
+ /* At this point r10/ctr hold the number of lines to prefetch in this
+ initial batch, and r11 holds any remainder. */
+
+L(prefetchSRC):
+ dcbt r12,r4
+ addi r12,r12,64
+ bdnz L(prefetchSRC)
+
+
+ /* Prefetching is done, or was not needed.
+
+ cr6 - are we on a cacheline boundary already?
+ r7 - number of quadwords to the next cacheline boundary
+ */
+
+L(nocacheprefetch):
+ mtctr r7
+
+ cmpldi cr1,r5,64 /* Less than a cache line to copy? */
+
+ /* How many bytes are left after we copy whatever full
+ cache lines we can get? */
+ clrldi r5,r5,64-6
+
+ beq cr6,L(cachelinealigned)
+
+
+ /* Copy quadwords up to the next cacheline boundary */
+
+L(aligntocacheline):
+ ld r9,0x08(r4)
+ ld r7,0x10(r4)
+ addi r4,r4,0x10
+ std r9,0x08(r6)
+ stdu r7,0x10(r6)
+ bdnz L(aligntocacheline)
+
+
+ .align 4
+L(cachelinealigned): /* copy while cache lines */
+
+ blt- cr1,L(lessthancacheline) /* size <64 */
+
+L(outerloop):
+ cmpdi r11,0
+ mtctr r11
+ beq- L(endloop)
+
+ li r11,64*ZERO_AHEAD +8 /* DCBZ dist */
+
+ .align 4
+ /* Copy whole cachelines, optimized by prefetching SRC cacheline */
+L(loop): /* Copy aligned body */
+ dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
+ ld r9, 0x08(r4)
+ dcbz r11,r6
+ ld r7, 0x10(r4)
+ ld r8, 0x18(r4)
+ ld r0, 0x20(r4)
+ std r9, 0x08(r6)
+ std r7, 0x10(r6)
+ std r8, 0x18(r6)
+ std r0, 0x20(r6)
+ ld r9, 0x28(r4)
+ ld r7, 0x30(r4)
+ ld r8, 0x38(r4)
+ ld r0, 0x40(r4)
+ addi r4, r4,0x40
+ std r9, 0x28(r6)
+ std r7, 0x30(r6)
+ std r8, 0x38(r6)
+ stdu r0, 0x40(r6)
+
+ bdnz L(loop)
+
+
+L(endloop):
+ cmpdi r10,0
+ beq- L(endloop2)
+ mtctr r10
+
+L(loop2): /* Copy aligned body */
+ ld r9, 0x08(r4)
+ ld r7, 0x10(r4)
+ ld r8, 0x18(r4)
+ ld r0, 0x20(r4)
+ std r9, 0x08(r6)
+ std r7, 0x10(r6)
+ std r8, 0x18(r6)
+ std r0, 0x20(r6)
+ ld r9, 0x28(r4)
+ ld r7, 0x30(r4)
+ ld r8, 0x38(r4)
+ ld r0, 0x40(r4)
+ addi r4, r4,0x40
+ std r9, 0x28(r6)
+ std r7, 0x30(r6)
+ std r8, 0x38(r6)
+ stdu r0, 0x40(r6)
+
+ bdnz L(loop2)
+L(endloop2):
+
+
+ .align 4
+L(lessthancacheline): /* Was there less than cache to do ? */
+ cmpldi cr0,r5,16
+ srdi r7,r5,4 /* divide size by 16 */
+ blt- L(do_lt16)
+ mtctr r7
+
+L(copy_remaining):
+ ld r8,0x08(r4)
+ ld r7,0x10(r4)
+ addi r4,r4,0x10
+ std r8,0x08(r6)
+ stdu r7,0x10(r6)
+ bdnz L(copy_remaining)
+
+L(do_lt16): /* less than 16 ? */
+ cmpldi cr0,r5,0 /* copy remaining bytes (0-15) */
+ beqlr+ /* no rest to copy */
+ addi r4,r4,8
+ addi r6,r6,8
+
+L(shortcopy): /* SIMPLE COPY to handle size =< 15 bytes */
+ mtcrf 0x01,r5
+ sub r7,r4,r6
+ bf- cr7*4+0,8f
+ ldx r0,r7,r6 /* copy 8 byte */
+ std r0,0(r6)
+ addi r6,r6,8
+8:
+ bf cr7*4+1,4f
+ lwzx r0,r7,r6 /* copy 4 byte */
+ stw r0,0(r6)
+ addi r6,r6,4
+4:
+ bf cr7*4+2,2f
+ lhzx r0,r7,r6 /* copy 2 byte */
+ sth r0,0(r6)
+ addi r6,r6,2
+2:
+ bf cr7*4+3,1f
+ lbzx r0,r7,r6 /* copy 1 byte */
+ stb r0,0(r6)
+1:
+ blr
+
+
+
+
+
+ /* Similar to above, but for use with 128 byte lines. */
+
+
+L(big_lines):
+
+ clrldi r7,r7,64-7 /* How far to next cacheline bdy? */
+
+ cmpldi cr6,r7,0 /* Are we on a cacheline bdy already? */
+
+ /* Reduce total len by what it takes to get to the next cache line */
+ subf r5,r7,r5
+ srdi r7,r7,4 /* How many qws to get to the line bdy? */
+
+ /* How many full cache lines to copy after getting to a line bdy? */
+ srdi r10,r5,7
+
+ cmpldi r10,0 /* If no full cache lines to copy ... */
+ li r11,0 /* number cachelines to copy with prefetch */
+ beq L(nocacheprefetch_128)
+
+
+ /* We are here because we have at least one full cache line to copy,
+ and therefore some pre-touching to do. */
+
+ cmpldi r10,PREFETCH_AHEAD
+ li r12,128+8 /* prefetch distance */
+ ble L(lessthanmaxprefetch_128)
+
+ /* We can only do so much pre-fetching. R11 will have the count of
+ lines left to prefetch after the initial batch of prefetches
+ are executed. */
+
+ subi r11,r10,PREFETCH_AHEAD
+ li r10,PREFETCH_AHEAD
+
+L(lessthanmaxprefetch_128):
+ mtctr r10
+
+ /* At this point r10/ctr hold the number of lines to prefetch in this
+ initial batch, and r11 holds any remainder. */
+
+L(prefetchSRC_128):
+ dcbt r12,r4
+ addi r12,r12,128
+ bdnz L(prefetchSRC_128)
+
+
+ /* Prefetching is done, or was not needed.
+
+ cr6 - are we on a cacheline boundary already?
+ r7 - number of quadwords to the next cacheline boundary
+ */
+
+L(nocacheprefetch_128):
+ mtctr r7
+
+ cmpldi cr1,r5,128 /* Less than a cache line to copy? */
+
+ /* How many bytes are left after we copy whatever full
+ cache lines we can get? */
+ clrldi r5,r5,64-7
+
+ beq cr6,L(cachelinealigned_128)
+
+
+ /* Copy quadwords up to the next cacheline boundary */
+
+L(aligntocacheline_128):
+ ld r9,0x08(r4)
+ ld r7,0x10(r4)
+ addi r4,r4,0x10
+ std r9,0x08(r6)
+ stdu r7,0x10(r6)
+ bdnz L(aligntocacheline_128)
+
+
+L(cachelinealigned_128): /* copy while cache lines */
+
+ blt- cr1,L(lessthancacheline) /* size <128 */
+
+L(outerloop_128):
+ cmpdi r11,0
+ mtctr r11
+ beq- L(endloop_128)
+
+ li r11,128*ZERO_AHEAD +8 /* DCBZ dist */
+
+ .align 4
+ /* Copy whole cachelines, optimized by prefetching SRC cacheline */
+L(loop_128): /* Copy aligned body */
+ dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
+ ld r9, 0x08(r4)
+ dcbz r11,r6
+ ld r7, 0x10(r4)
+ ld r8, 0x18(r4)
+ ld r0, 0x20(r4)
+ std r9, 0x08(r6)
+ std r7, 0x10(r6)
+ std r8, 0x18(r6)
+ std r0, 0x20(r6)
+ ld r9, 0x28(r4)
+ ld r7, 0x30(r4)
+ ld r8, 0x38(r4)
+ ld r0, 0x40(r4)
+ std r9, 0x28(r6)
+ std r7, 0x30(r6)
+ std r8, 0x38(r6)
+ std r0, 0x40(r6)
+ ld r9, 0x48(r4)
+ ld r7, 0x50(r4)
+ ld r8, 0x58(r4)
+ ld r0, 0x60(r4)
+ std r9, 0x48(r6)
+ std r7, 0x50(r6)
+ std r8, 0x58(r6)
+ std r0, 0x60(r6)
+ ld r9, 0x68(r4)
+ ld r7, 0x70(r4)
+ ld r8, 0x78(r4)
+ ld r0, 0x80(r4)
+ addi r4, r4,0x80
+ std r9, 0x68(r6)
+ std r7, 0x70(r6)
+ std r8, 0x78(r6)
+ stdu r0, 0x80(r6)
+
+ bdnz L(loop_128)
+
+
+L(endloop_128):
+ cmpdi r10,0
+ beq- L(endloop2_128)
+ mtctr r10
+
+L(loop2_128): /* Copy aligned body */
+ ld r9, 0x08(r4)
+ ld r7, 0x10(r4)
+ ld r8, 0x18(r4)
+ ld r0, 0x20(r4)
+ std r9, 0x08(r6)
+ std r7, 0x10(r6)
+ std r8, 0x18(r6)
+ std r0, 0x20(r6)
+ ld r9, 0x28(r4)
+ ld r7, 0x30(r4)
+ ld r8, 0x38(r4)
+ ld r0, 0x40(r4)
+ std r9, 0x28(r6)
+ std r7, 0x30(r6)
+ std r8, 0x38(r6)
+ std r0, 0x40(r6)
+ ld r9, 0x48(r4)
+ ld r7, 0x50(r4)
+ ld r8, 0x58(r4)
+ ld r0, 0x60(r4)
+ std r9, 0x48(r6)
+ std r7, 0x50(r6)
+ std r8, 0x58(r6)
+ std r0, 0x60(r6)
+ ld r9, 0x68(r4)
+ ld r7, 0x70(r4)
+ ld r8, 0x78(r4)
+ ld r0, 0x80(r4)
+ addi r4, r4,0x80
+ std r9, 0x68(r6)
+ std r7, 0x70(r6)
+ std r8, 0x78(r6)
+ stdu r0, 0x80(r6)
+
+ bdnz L(loop2_128)
+L(endloop2_128):
+
+ b L(lessthancacheline)
+
+
+END_GEN_TB (BP_SYM (memcpy),TB_TOCLESS)
+libc_hidden_builtin_def (memcpy)
diff --git a/libc/sysdeps/powerpc/powerpc64/power7/mempcpy.S b/libc/sysdeps/powerpc/powerpc64/power7/mempcpy.S
new file mode 100644
index 000000000..09c08ab51
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc64/power7/mempcpy.S
@@ -0,0 +1,458 @@
+/* Optimized mempcpy implementation for POWER7.
+ Copyright (C) 2010 Free Software Foundation, Inc.
+ Contributed by Luis Machado <luisgpm@br.ibm.com>.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+
+/* __ptr_t [r3] __mempcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
+ Returns 'dst' + 'len'. */
+
+ .machine power7
+EALIGN (BP_SYM (__mempcpy), 5, 0)
+ CALL_MCOUNT 3
+
+ cmpldi cr1,5,31
+ neg 0,3
+ std 3,-16(1)
+ std 31,-8(1)
+ cfi_offset(31,-8)
+ ble cr1,L(copy_LT_32) /* If move < 32 bytes use short move
+ code. */
+
+ andi. 11,3,7 /* Check alignment of DST. */
+
+
+ clrldi 10,4,61 /* Check alignment of SRC. */
+ cmpld cr6,10,11 /* SRC and DST alignments match? */
+ mr 12,4
+ mr 31,5
+ bne cr6,L(copy_GE_32_unaligned)
+
+ srdi 9,5,3 /* Number of full quadwords remaining. */
+
+ beq L(copy_GE_32_aligned_cont)
+
+ clrldi 0,0,61
+ mtcrf 0x01,0
+ subf 31,0,5
+
+ /* Get the SRC aligned to 8 bytes. */
+
+1: bf 31,2f
+ lbz 6,0(12)
+ addi 12,12,1
+ stb 6,0(3)
+ addi 3,3,1
+2: bf 30,4f
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+4: bf 29,0f
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+0:
+ clrldi 10,12,61 /* Check alignment of SRC again. */
+ srdi 9,31,3 /* Number of full doublewords remaining. */
+
+L(copy_GE_32_aligned_cont):
+
+ clrldi 11,31,61
+ mtcrf 0x01,9
+
+ srdi 8,31,5
+ cmpldi cr1,9,4
+ cmpldi cr6,11,0
+ mr 11,12
+
+ /* Copy 1~3 doublewords so the main loop starts
+ at a multiple of 32 bytes. */
+
+ bf 30,1f
+ ld 6,0(12)
+ ld 7,8(12)
+ addi 11,12,16
+ mtctr 8
+ std 6,0(3)
+ std 7,8(3)
+ addi 10,3,16
+ bf 31,4f
+ ld 0,16(12)
+ std 0,16(3)
+ blt cr1,3f
+ addi 11,12,24
+ addi 10,3,24
+ b 4f
+
+ .align 4
+1: /* Copy 1 doubleword and set the counter. */
+ mr 10,3
+ mtctr 8
+ bf 31,4f
+ ld 6,0(12)
+ addi 11,12,8
+ std 6,0(3)
+ addi 10,3,8
+
+ /* Main aligned copy loop. Copies 32-bytes at a time. */
+ .align 4
+4:
+ ld 6,0(11)
+ ld 7,8(11)
+ ld 8,16(11)
+ ld 0,24(11)
+ addi 11,11,32
+
+ std 6,0(10)
+ std 7,8(10)
+ std 8,16(10)
+ std 0,24(10)
+ addi 10,10,32
+ bdnz 4b
+3:
+
+ /* Check for tail bytes. */
+ rldicr 0,31,0,60
+ mtcrf 0x01,31
+ beq cr6,0f
+
+.L9:
+ add 3,3,0
+ add 12,12,0
+
+ /* At this point we have a tail of 0-7 bytes and we know that the
+ destination is doubleword-aligned. */
+4: /* Copy 4 bytes. */
+ bf 29,2f
+
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+2: /* Copy 2 bytes. */
+ bf 30,1f
+
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+1: /* Copy 1 byte. */
+ bf 31,0f
+
+ lbz 6,0(12)
+ stb 6,0(3)
+0: /* Return DST + LEN pointer. */
+ ld 31,-8(1)
+ ld 3,-16(1)
+ add 3,3,5
+ blr
+
+ /* Handle copies of 0~31 bytes. */
+ .align 4
+L(copy_LT_32):
+ cmpldi cr6,5,8
+ mr 12,4
+ mtcrf 0x01,5
+ ble cr6,L(copy_LE_8)
+
+ /* At least 9 bytes to go. */
+ neg 8,4
+ clrrdi 11,4,2
+ andi. 0,8,3
+ cmpldi cr1,5,16
+ mr 10,5
+ beq L(copy_LT_32_aligned)
+
+ /* Force 4-bytes alignment for SRC. */
+ mtocrf 0x01,0
+ subf 10,0,5
+2: bf 30,1f
+
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+1: bf 31,L(end_4bytes_alignment)
+
+ lbz 6,0(12)
+ addi 12,12,1
+ stb 6,0(3)
+ addi 3,3,1
+
+ .align 4
+L(end_4bytes_alignment):
+ cmpldi cr1,10,16
+ mtcrf 0x01,10
+
+L(copy_LT_32_aligned):
+ /* At least 6 bytes to go, and SRC is word-aligned. */
+ blt cr1,8f
+
+ /* Copy 16 bytes. */
+ lwz 6,0(12)
+ lwz 7,4(12)
+ stw 6,0(3)
+ lwz 8,8(12)
+ stw 7,4(3)
+ lwz 6,12(12)
+ addi 12,12,16
+ stw 8,8(3)
+ stw 6,12(3)
+ addi 3,3,16
+8: /* Copy 8 bytes. */
+ bf 28,4f
+
+ lwz 6,0(12)
+ lwz 7,4(12)
+ addi 12,12,8
+ stw 6,0(3)
+ stw 7,4(3)
+ addi 3,3,8
+4: /* Copy 4 bytes. */
+ bf 29,2f
+
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+2: /* Copy 2-3 bytes. */
+ bf 30,1f
+
+ lhz 6,0(12)
+ sth 6,0(3)
+ bf 31,0f
+ lbz 7,2(12)
+ stb 7,2(3)
+ ld 3,-16(1)
+ add 3,3,5
+ blr
+
+ .align 4
+1: /* Copy 1 byte. */
+ bf 31,0f
+
+ lbz 6,0(12)
+ stb 6,0(3)
+0: /* Return DST + LEN pointer. */
+ ld 3,-16(1)
+ add 3,3,5
+ blr
+
+ /* Handles copies of 0~8 bytes. */
+ .align 4
+L(copy_LE_8):
+ bne cr6,4f
+
+ /* Though we could've used ld/std here, they are still
+ slow for unaligned cases. */
+
+ lwz 6,0(4)
+ lwz 7,4(4)
+ stw 6,0(3)
+ stw 7,4(3)
+ ld 3,-16(1) /* Return DST + LEN pointer. */
+ add 3,3,5
+ blr
+
+ .align 4
+4: /* Copies 4~7 bytes. */
+ bf 29,2b
+
+ lwz 6,0(4)
+ stw 6,0(3)
+ bf 30,5f
+ lhz 7,4(4)
+ sth 7,4(3)
+ bf 31,0f
+ lbz 8,6(4)
+ stb 8,6(3)
+ ld 3,-16(1)
+ add 3,3,5
+ blr
+
+ .align 4
+5: /* Copy 1 byte. */
+ bf 31,0f
+
+ lbz 6,4(4)
+ stb 6,4(3)
+
+0: /* Return DST + LEN pointer. */
+ ld 3,-16(1)
+ add 3,3,5
+ blr
+
+ /* Handle copies of 32+ bytes where DST is aligned (to quadword) but
+ SRC is not. Use aligned quadword loads from SRC, shifted to realign
+ the data, allowing for aligned DST stores. */
+ .align 4
+L(copy_GE_32_unaligned):
+ clrldi 0,0,60 /* Number of bytes until the 1st
+ quadword. */
+ andi. 11,3,15 /* Check alignment of DST (against
+ quadwords). */
+ srdi 9,5,4 /* Number of full quadwords remaining. */
+
+ beq L(copy_GE_32_unaligned_cont)
+
+ /* SRC is not quadword aligned, get it aligned. */
+
+ mtcrf 0x01,0
+ subf 31,0,5
+
+ /* Vector instructions work best when proper alignment (16-bytes)
+ is present. Move 0~15 bytes as needed to get DST quadword-aligned. */
+1: /* Copy 1 byte. */
+ bf 31,2f
+
+ lbz 6,0(12)
+ addi 12,12,1
+ stb 6,0(3)
+ addi 3,3,1
+2: /* Copy 2 bytes. */
+ bf 30,4f
+
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+4: /* Copy 4 bytes. */
+ bf 29,8f
+
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+8: /* Copy 8 bytes. */
+ bf 28,0f
+
+ ld 6,0(12)
+ addi 12,12,8
+ std 6,0(3)
+ addi 3,3,8
+0:
+ clrldi 10,12,60 /* Check alignment of SRC. */
+ srdi 9,31,4 /* Number of full quadwords remaining. */
+
+ /* The proper alignment is present, it is OK to copy the bytes now. */
+L(copy_GE_32_unaligned_cont):
+
+ /* Setup two indexes to speed up the indexed vector operations. */
+ clrldi 11,31,60
+ li 6,16 /* Index for 16-bytes offsets. */
+ li 7,32 /* Index for 32-bytes offsets. */
+ cmpldi cr1,11,0
+ srdi 8,31,5 /* Setup the loop counter. */
+ mr 10,3
+ mr 11,12
+ mtcrf 0x01,9
+ cmpldi cr6,9,1
+ lvsl 5,0,12
+ lvx 3,0,12
+ bf 31,L(setup_unaligned_loop)
+
+ /* Copy another 16 bytes to align to 32-bytes due to the loop . */
+ lvx 4,12,6
+ vperm 6,3,4,5
+ addi 11,12,16
+ addi 10,3,16
+ stvx 6,0,3
+ vor 3,4,4
+
+L(setup_unaligned_loop):
+ mtctr 8
+ ble cr6,L(end_unaligned_loop)
+
+ /* Copy 32 bytes at a time using vector instructions. */
+ .align 4
+L(unaligned_loop):
+
+ /* Note: vr6/vr10 may contain data that was already copied,
+ but in order to get proper alignment, we may have to copy
+ some portions again. This is faster than having unaligned
+ vector instructions though. */
+
+ lvx 4,11,6 /* vr4 = r11+16. */
+ vperm 6,3,4,5 /* Merge the correctly-aligned portions
+ of vr3/vr4 into vr6. */
+ lvx 3,11,7 /* vr3 = r11+32. */
+ vperm 10,4,3,5 /* Merge the correctly-aligned portions
+ of vr3/vr4 into vr10. */
+ addi 11,11,32
+ stvx 6,0,10
+ stvx 10,10,6
+ addi 10,10,32
+
+ bdnz L(unaligned_loop)
+
+ .align 4
+L(end_unaligned_loop):
+
+ /* Check for tail bytes. */
+ rldicr 0,31,0,59
+ mtcrf 0x01,31
+ beq cr1,0f
+
+ add 3,3,0
+ add 12,12,0
+
+ /* We have 1~15 tail bytes to copy, and DST is quadword aligned. */
+8: /* Copy 8 bytes. */
+ bf 28,4f
+
+ lwz 6,0(12)
+ lwz 7,4(12)
+ addi 12,12,8
+ stw 6,0(3)
+ stw 7,4(3)
+ addi 3,3,8
+4: /* Copy 4 bytes. */
+ bf 29,2f
+
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+2: /* Copy 2~3 bytes. */
+ bf 30,1f
+
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+1: /* Copy 1 byte. */
+ bf 31,0f
+
+ lbz 6,0(12)
+ stb 6,0(3)
+0: /* Return DST + LEN pointer. */
+ ld 31,-8(1)
+ ld 3,-16(1)
+ add 3,3,5
+ blr
+
+END_GEN_TB (BP_SYM (__mempcpy),TB_TOCLESS)
+libc_hidden_def (BP_SYM (__mempcpy))
+weak_alias (BP_SYM (__mempcpy), BP_SYM (mempcpy))
+libc_hidden_builtin_def (mempcpy)
diff --git a/libc/sysdeps/unix/sysv/linux/powerpc/powerpc32/a2/Implies b/libc/sysdeps/unix/sysv/linux/powerpc/powerpc32/a2/Implies
new file mode 100644
index 000000000..6d72414e5
--- /dev/null
+++ b/libc/sysdeps/unix/sysv/linux/powerpc/powerpc32/a2/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc32/a2/fpu
+powerpc/powerpc32/a2
diff --git a/libc/sysdeps/unix/sysv/linux/powerpc/powerpc64/a2/Implies b/libc/sysdeps/unix/sysv/linux/powerpc/powerpc64/a2/Implies
new file mode 100644
index 000000000..39b19e9c1
--- /dev/null
+++ b/libc/sysdeps/unix/sysv/linux/powerpc/powerpc64/a2/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc64/a2/fpu
+powerpc/powerpc64/a2
diff --git a/libc/sysdeps/unix/sysv/linux/ttyname.c b/libc/sysdeps/unix/sysv/linux/ttyname.c
index 69af6adc6..6cec3a901 100644
--- a/libc/sysdeps/unix/sysv/linux/ttyname.c
+++ b/libc/sysdeps/unix/sysv/linux/ttyname.c
@@ -1,4 +1,5 @@
-/* Copyright (C) 1991,92,93,1996-2002,2006,2009 Free Software Foundation, Inc.
+/* Copyright (C) 1991-1993,1996-2002,2006,2009,2010
+ Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -131,6 +132,9 @@ ttyname (int fd)
if (__builtin_expect (__tcgetattr (fd, &term) < 0, 0))
return NULL;
+ if (__fxstat64 (_STAT_VER, fd, &st) < 0)
+ return NULL;
+
/* We try using the /proc filesystem. */
*_fitoa_word (fd, __stpcpy (procname, "/proc/self/fd/"), 10, 0) = '\0';
@@ -161,13 +165,32 @@ ttyname (int fd)
{
if ((size_t) len >= buflen)
return NULL;
+
+#define UNREACHABLE_LEN strlen ("(unreachable)")
+ if (len > UNREACHABLE_LEN
+ && memcmp (ttyname_buf, "(unreachable)", UNREACHABLE_LEN) == 0)
+ {
+ memmove (ttyname_buf, ttyname_buf + UNREACHABLE_LEN,
+ len - UNREACHABLE_LEN);
+ len -= UNREACHABLE_LEN;
+ }
+
/* readlink need not terminate the string. */
ttyname_buf[len] = '\0';
- return ttyname_buf;
- }
- if (__fxstat64 (_STAT_VER, fd, &st) < 0)
- return NULL;
+ /* Verify readlink result, fall back on iterating through devices. */
+ if (ttyname_buf[0] == '/'
+ && __xstat64 (_STAT_VER, ttyname_buf, &st1) == 0
+#ifdef _STATBUF_ST_RDEV
+ && S_ISCHR (st1.st_mode)
+ && st1.st_rdev == st.st_rdev
+#else
+ && st1.st_ino == st.st_ino
+ && st1.st_dev == st.st_dev
+#endif
+ )
+ return ttyname_buf;
+ }
if (__xstat64 (_STAT_VER, "/dev/pts", &st1) == 0 && S_ISDIR (st1.st_mode))
{
diff --git a/libc/sysdeps/unix/sysv/linux/ttyname_r.c b/libc/sysdeps/unix/sysv/linux/ttyname_r.c
index cef8624dc..2fa750347 100644
--- a/libc/sysdeps/unix/sysv/linux/ttyname_r.c
+++ b/libc/sysdeps/unix/sysv/linux/ttyname_r.c
@@ -1,4 +1,5 @@
-/* Copyright (C) 1991,92,93,1995-2001,2003,2006 Free Software Foundation, Inc.
+/* Copyright (C) 1991-1993,1995-2001,2003,2006,2010
+ Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -122,6 +123,9 @@ __ttyname_r (int fd, char *buf, size_t buflen)
if (__builtin_expect (__tcgetattr (fd, &term) < 0, 0))
return errno;
+ if (__fxstat64 (_STAT_VER, fd, &st) < 0)
+ return errno;
+
/* We try using the /proc filesystem. */
*_fitoa_word (fd, __stpcpy (procname, "/proc/self/fd/"), 10, 0) = '\0';
@@ -145,12 +149,30 @@ __ttyname_r (int fd, char *buf, size_t buflen)
#endif
, 1))
{
+#define UNREACHABLE_LEN strlen ("(unreachable)")
+ if (ret > UNREACHABLE_LEN
+ && memcmp (buf, "(unreachable)", UNREACHABLE_LEN) == 0)
+ {
+ memmove (buf, buf + UNREACHABLE_LEN, ret - UNREACHABLE_LEN);
+ ret -= UNREACHABLE_LEN;
+ }
+
+ /* readlink need not terminate the string. */
buf[ret] = '\0';
- return 0;
- }
- if (__fxstat64 (_STAT_VER, fd, &st) < 0)
- return errno;
+ /* Verify readlink result, fall back on iterating through devices. */
+ if (buf[0] == '/'
+ && __xstat64 (_STAT_VER, buf, &st1) == 0
+#ifdef _STATBUF_ST_RDEV
+ && S_ISCHR (st1.st_mode)
+ && st1.st_rdev == st.st_rdev
+#else
+ && st1.st_ino == st.st_ino
+ && st1.st_dev == st.st_dev
+#endif
+ )
+ return 0;
+ }
/* Prepare the result buffer. */
memcpy (buf, "/dev/pts/", sizeof ("/dev/pts/"));
diff --git a/libc/sysdeps/x86_64/cacheinfo.c b/libc/sysdeps/x86_64/cacheinfo.c
index 54220379e..eae54e725 100644
--- a/libc/sysdeps/x86_64/cacheinfo.c
+++ b/libc/sysdeps/x86_64/cacheinfo.c
@@ -455,13 +455,21 @@ __cache_sysconf (int name)
/* Data cache size for use in memory and string routines, typically
- L1 size. */
+ L1 size, rounded to multiple of 256 bytes. */
long int __x86_64_data_cache_size_half attribute_hidden = 32 * 1024 / 2;
long int __x86_64_data_cache_size attribute_hidden = 32 * 1024;
+/* Similar to __x86_64_data_cache_size_half, but not rounded. */
+long int __x86_64_raw_data_cache_size_half attribute_hidden = 32 * 1024 / 2;
+/* Similar to __x86_64_data_cache_size, but not rounded. */
+long int __x86_64_raw_data_cache_size attribute_hidden = 32 * 1024;
/* Shared cache size for use in memory and string routines, typically
- L2 or L3 size. */
+ L2 or L3 size, rounded to multiple of 256 bytes. */
long int __x86_64_shared_cache_size_half attribute_hidden = 1024 * 1024 / 2;
long int __x86_64_shared_cache_size attribute_hidden = 1024 * 1024;
+/* Similar to __x86_64_shared_cache_size_half, but not rounded. */
+long int __x86_64_raw_shared_cache_size_half attribute_hidden = 1024 * 1024 / 2;
+/* Similar to __x86_64_shared_cache_size, but not rounded. */
+long int __x86_64_raw_shared_cache_size attribute_hidden = 1024 * 1024;
#ifndef DISABLE_PREFETCHW
/* PREFETCHW support flag for use in memory and string routines. */
@@ -661,12 +669,20 @@ init_cacheinfo (void)
if (data > 0)
{
+ __x86_64_raw_data_cache_size_half = data / 2;
+ __x86_64_raw_data_cache_size = data;
+ /* Round data cache size to multiple of 256 bytes. */
+ data = data & ~255L;
__x86_64_data_cache_size_half = data / 2;
__x86_64_data_cache_size = data;
}
if (shared > 0)
{
+ __x86_64_raw_shared_cache_size_half = shared / 2;
+ __x86_64_raw_shared_cache_size = shared;
+ /* Round shared cache size to multiple of 256 bytes. */
+ shared = shared & ~255L;
__x86_64_shared_cache_size_half = shared / 2;
__x86_64_shared_cache_size = shared;
}
diff --git a/libc/sysdeps/x86_64/multiarch/strchr.S b/libc/sysdeps/x86_64/multiarch/strchr.S
index 27eead985..71845a35f 100644
--- a/libc/sysdeps/x86_64/multiarch/strchr.S
+++ b/libc/sysdeps/x86_64/multiarch/strchr.S
@@ -1,5 +1,5 @@
/* strchr with SSE4.2
- Copyright (C) 2009 Free Software Foundation, Inc.
+ Copyright (C) 2009, 2010 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -87,13 +87,13 @@ __strchr_sse42:
pxor %xmm2, %xmm2
movd %esi, %xmm1
movl %edi, %ecx
+ pshufb %xmm2, %xmm1
andl $15, %ecx
movq %rdi, %r8
je L(aligned_start)
/* Handle unaligned string. */
andq $-16, %r8
- pshufb %xmm2, %xmm1
movdqa (%r8), %xmm0
pcmpeqb %xmm0, %xmm2
pcmpeqb %xmm1, %xmm0