]> rtime.felk.cvut.cz Git - l4.git/blob - l4/pkg/uclibc/lib/contrib/uclibc/ldso/ldso/dl-elf.c.orig
update
[l4.git] / l4 / pkg / uclibc / lib / contrib / uclibc / ldso / ldso / dl-elf.c.orig
1 /* vi: set sw=4 ts=4: */
2 /*
3  * This file contains the helper routines to load an ELF shared
4  * library into memory and add the symbol table info to the chain.
5  *
6  * Copyright (C) 2000-2006 by Erik Andersen <andersen@codepoet.org>
7  * Copyright (c) 1994-2000 Eric Youngdale, Peter MacDonald,
8  *                              David Engel, Hongjiu Lu and Mitch D'Souza
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. The name of the above contributors may not be
16  *    used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31
32
33 #include "ldso.h"
34
35 #ifdef __LDSO_CACHE_SUPPORT__
36
37 static caddr_t _dl_cache_addr = NULL;
38 static size_t _dl_cache_size = 0;
39
40 int _dl_map_cache(void)
41 {
42         int fd;
43         struct stat st;
44         header_t *header;
45         libentry_t *libent;
46         int i, strtabsize;
47
48         if (_dl_cache_addr == MAP_FAILED)
49                 return -1;
50         else if (_dl_cache_addr != NULL)
51                 return 0;
52
53         if (_dl_stat(LDSO_CACHE, &st)
54             || (fd = _dl_open(LDSO_CACHE, O_RDONLY|O_CLOEXEC, 0)) < 0) {
55                 _dl_cache_addr = MAP_FAILED;    /* so we won't try again */
56                 return -1;
57         }
58
59         _dl_cache_size = st.st_size;
60         _dl_cache_addr = _dl_mmap(0, _dl_cache_size, PROT_READ, LDSO_CACHE_MMAP_FLAGS, fd, 0);
61         _dl_close(fd);
62         if (_dl_mmap_check_error(_dl_cache_addr)) {
63                 _dl_dprintf(2, "%s:%i: can't map '%s'\n",
64                                 _dl_progname, __LINE__, LDSO_CACHE);
65                 return -1;
66         }
67
68         header = (header_t *) _dl_cache_addr;
69
70         if (_dl_cache_size < sizeof(header_t) ||
71                         _dl_memcmp(header->magic, LDSO_CACHE_MAGIC, LDSO_CACHE_MAGIC_LEN)
72                         || _dl_memcmp(header->version, LDSO_CACHE_VER, LDSO_CACHE_VER_LEN)
73                         || _dl_cache_size <
74                         (sizeof(header_t) + header->nlibs * sizeof(libentry_t))
75                         || _dl_cache_addr[_dl_cache_size - 1] != '\0')
76         {
77                 _dl_dprintf(2, "%s: cache '%s' is corrupt\n", _dl_progname,
78                                 LDSO_CACHE);
79                 goto fail;
80         }
81
82         strtabsize = _dl_cache_size - sizeof(header_t) -
83                 header->nlibs * sizeof(libentry_t);
84         libent = (libentry_t *) & header[1];
85
86         for (i = 0; i < header->nlibs; i++) {
87                 if (libent[i].sooffset >= strtabsize ||
88                                 libent[i].liboffset >= strtabsize)
89                 {
90                         _dl_dprintf(2, "%s: cache '%s' is corrupt\n", _dl_progname, LDSO_CACHE);
91                         goto fail;
92                 }
93         }
94
95         return 0;
96
97 fail:
98         _dl_munmap(_dl_cache_addr, _dl_cache_size);
99         _dl_cache_addr = MAP_FAILED;
100         return -1;
101 }
102
103 int _dl_unmap_cache(void)
104 {
105         if (_dl_cache_addr == NULL || _dl_cache_addr == MAP_FAILED)
106                 return -1;
107
108 #if 1
109         _dl_munmap(_dl_cache_addr, _dl_cache_size);
110         _dl_cache_addr = NULL;
111 #endif
112
113         return 0;
114 }
115 #endif
116
117
118 void
119 _dl_protect_relro (struct elf_resolve *l)
120 {
121         ElfW(Addr) base = (ElfW(Addr)) DL_RELOC_ADDR(l->loadaddr, l->relro_addr);
122         ElfW(Addr) start = (base & PAGE_ALIGN);
123         ElfW(Addr) end = ((base + l->relro_size) & PAGE_ALIGN);
124         _dl_if_debug_dprint("RELRO protecting %s:  start:%x, end:%x\n", l->libname, start, end);
125         if (start != end &&
126             _dl_mprotect ((void *) start, end - start, PROT_READ) < 0) {
127                 _dl_dprintf(2, "%s: cannot apply additional memory protection after relocation", l->libname);
128                 _dl_exit(0);
129         }
130 }
131
132 /* This function's behavior must exactly match that
133  * in uClibc/ldso/util/ldd.c */
134 static struct elf_resolve *
135 search_for_named_library(const char *name, int secure, const char *path_list,
136         struct dyn_elf **rpnt)
137 {
138         char *path, *path_n, *mylibname;
139         struct elf_resolve *tpnt;
140         int done;
141
142         if (path_list==NULL)
143                 return NULL;
144
145         /* We need a writable copy of this string, but we don't
146          * need this allocated permanently since we don't want
147          * to leak memory, so use alloca to put path on the stack */
148         done = _dl_strlen(path_list);
149         path = alloca(done + 1);
150
151         /* another bit of local storage */
152         mylibname = alloca(2050);
153
154         _dl_memcpy(path, path_list, done+1);
155
156         /* Unlike ldd.c, don't bother to eliminate double //s */
157
158         /* Replace colons with zeros in path_list */
159         /* : at the beginning or end of path maps to CWD */
160         /* :: anywhere maps CWD */
161         /* "" maps to CWD */
162         done = 0;
163         path_n = path;
164         do {
165                 if (*path == 0) {
166                         *path = ':';
167                         done = 1;
168                 }
169                 if (*path == ':') {
170                         *path = 0;
171                         if (*path_n)
172                                 _dl_strcpy(mylibname, path_n);
173                         else
174                                 _dl_strcpy(mylibname, "."); /* Assume current dir if empty path */
175                         _dl_strcat(mylibname, "/");
176                         _dl_strcat(mylibname, name);
177                         if ((tpnt = _dl_load_elf_shared_library(secure, rpnt, mylibname)) != NULL)
178                                 return tpnt;
179                         path_n = path+1;
180                 }
181                 path++;
182         } while (!done);
183         return NULL;
184 }
185
186 /* Used to return error codes back to dlopen et. al.  */
187 unsigned long _dl_error_number;
188 unsigned long _dl_internal_error_number;
189
190 struct elf_resolve *_dl_load_shared_library(int secure, struct dyn_elf **rpnt,
191         struct elf_resolve *tpnt, char *full_libname, int __attribute__((unused)) trace_loaded_objects)
192 { (void)tpnt;
193         char *pnt;
194         struct elf_resolve *tpnt1;
195         char *libname;
196
197         _dl_internal_error_number = 0;
198         libname = full_libname;
199
200         /* quick hack to ensure mylibname buffer doesn't overflow.  don't
201            allow full_libname or any directory to be longer than 1024. */
202         if (_dl_strlen(full_libname) > 1024)
203                 goto goof;
204
205         /* Skip over any initial initial './' and '/' stuff to
206          * get the short form libname with no path garbage */
207         pnt = _dl_strrchr(libname, '/');
208         if (pnt) {
209                 libname = pnt + 1;
210         }
211
212         _dl_if_debug_dprint("\tfind library='%s'; searching\n", libname);
213         /* If the filename has any '/', try it straight and leave it at that.
214            For IBCS2 compatibility under linux, we substitute the string
215            /usr/i486-sysv4/lib for /usr/lib in library names. */
216
217         if (libname != full_libname) {
218                 _dl_if_debug_dprint("\ttrying file='%s'\n", full_libname);
219                 tpnt1 = _dl_load_elf_shared_library(secure, rpnt, full_libname);
220                 if (tpnt1) {
221                         return tpnt1;
222                 }
223         }
224
225         /*
226          * The ABI specifies that RPATH is searched before LD_LIBRARY_PATH or
227          * the default path of /usr/lib.  Check in rpath directories.
228          */
229 #ifdef __LDSO_RUNPATH__
230         pnt = (tpnt ? (char *) tpnt->dynamic_info[DT_RPATH] : NULL);
231         if (pnt) {
232                 pnt += (unsigned long) tpnt->dynamic_info[DT_STRTAB];
233                 _dl_if_debug_dprint("\tsearching RPATH='%s'\n", pnt);
234                 if ((tpnt1 = search_for_named_library(libname, secure, pnt, rpnt)) != NULL)
235                         return tpnt1;
236         }
237 #endif
238
239         /* Check in LD_{ELF_}LIBRARY_PATH, if specified and allowed */
240         if (_dl_library_path) {
241                 _dl_if_debug_dprint("\tsearching LD_LIBRARY_PATH='%s'\n", _dl_library_path);
242                 if ((tpnt1 = search_for_named_library(libname, secure, _dl_library_path, rpnt)) != NULL)
243                 {
244                         return tpnt1;
245                 }
246         }
247
248         /*
249          * The ABI specifies that RUNPATH is searched after LD_LIBRARY_PATH.
250          */
251 #ifdef __LDSO_RUNPATH__
252         pnt = (tpnt ? (char *)tpnt->dynamic_info[DT_RUNPATH] : NULL);
253         if (pnt) {
254                 pnt += (unsigned long) tpnt->dynamic_info[DT_STRTAB];
255                 _dl_if_debug_dprint("\tsearching RUNPATH='%s'\n", pnt);
256                 if ((tpnt1 = search_for_named_library(libname, secure, pnt, rpnt)) != NULL)
257                         return tpnt1;
258         }
259 #endif
260
261         /*
262          * Where should the cache be searched?  There is no such concept in the
263          * ABI, so we have some flexibility here.  For now, search it before
264          * the hard coded paths that follow (i.e before /lib and /usr/lib).
265          */
266 #ifdef __LDSO_CACHE_SUPPORT__
267         if (_dl_cache_addr != NULL && _dl_cache_addr != MAP_FAILED) {
268                 int i;
269                 header_t *header = (header_t *) _dl_cache_addr;
270                 libentry_t *libent = (libentry_t *) & header[1];
271                 char *strs = (char *) &libent[header->nlibs];
272
273                 _dl_if_debug_dprint("\tsearching cache='%s'\n", LDSO_CACHE);
274                 for (i = 0; i < header->nlibs; i++) {
275                         if ((libent[i].flags == LIB_ELF
276                              || libent[i].flags == LIB_ELF_LIBC0
277                              || libent[i].flags == LIB_ELF_LIBC5)
278                          && _dl_strcmp(libname, strs + libent[i].sooffset) == 0
279                          && (tpnt1 = _dl_load_elf_shared_library(secure, rpnt, strs + libent[i].liboffset))
280                         ) {
281                                 return tpnt1;
282                         }
283                 }
284         }
285 #endif
286
287         /* Look for libraries wherever the shared library loader
288          * was installed */
289         _dl_if_debug_dprint("\tsearching ldso dir='%s'\n", _dl_ldsopath);
290         tpnt1 = search_for_named_library(libname, secure, _dl_ldsopath, rpnt);
291         if (tpnt1 != NULL)
292                 return tpnt1;
293
294         /* Lastly, search the standard list of paths for the library.
295            This list must exactly match the list in uClibc/ldso/util/ldd.c */
296         _dl_if_debug_dprint("\tsearching full lib path list\n");
297         tpnt1 = search_for_named_library(libname, secure,
298                                         UCLIBC_RUNTIME_PREFIX "lib:"
299                                         UCLIBC_RUNTIME_PREFIX "usr/lib"
300 #ifndef __LDSO_CACHE_SUPPORT__
301                                         ":" UCLIBC_RUNTIME_PREFIX "usr/X11R6/lib"
302 #endif
303                                         , rpnt);
304         if (tpnt1 != NULL)
305                 return tpnt1;
306
307 goof:
308         /* Well, we shot our wad on that one.  All we can do now is punt */
309         if (_dl_internal_error_number)
310                 _dl_error_number = _dl_internal_error_number;
311         else
312                 _dl_error_number = LD_ERROR_NOFILE;
313         _dl_if_debug_dprint("Bummer: could not find '%s'!\n", libname);
314         return NULL;
315 }
316
317
318 /*
319  * Read one ELF library into memory, mmap it into the correct locations and
320  * add the symbol info to the symbol chain.  Perform any relocations that
321  * are required.
322  */
323
324 struct elf_resolve *_dl_load_elf_shared_library(int secure,
325         struct dyn_elf **rpnt, char *libname)
326 {
327         ElfW(Ehdr) *epnt;
328         unsigned long dynamic_addr = 0;
329         ElfW(Dyn) *dpnt;
330         struct elf_resolve *tpnt;
331         ElfW(Phdr) *ppnt;
332 #if defined(USE_TLS) && USE_TLS
333         ElfW(Phdr) *tlsppnt = NULL;
334 #endif
335         char *status, *header;
336         unsigned long dynamic_info[DYNAMIC_SIZE];
337         unsigned long *lpnt;
338         unsigned long libaddr;
339         unsigned long minvma = 0xffffffff, maxvma = 0;
340         unsigned int rtld_flags;
341         int i, flags, piclib, infile;
342         ElfW(Addr) relro_addr = 0;
343         size_t relro_size = 0;
344         struct stat st;
345         uint32_t *p32;
346         DL_LOADADDR_TYPE lib_loadaddr;
347         DL_INIT_LOADADDR_EXTRA_DECLS
348
349         libaddr = 0;
350         infile = _dl_open(libname, O_RDONLY, 0);
351         if (infile < 0) {
352                 _dl_internal_error_number = LD_ERROR_NOFILE;
353                 return NULL;
354         }
355
356         if (_dl_fstat(infile, &st) < 0) {
357                 _dl_internal_error_number = LD_ERROR_NOFILE;
358                 _dl_close(infile);
359                 return NULL;
360         }
361         /* If we are in secure mode (i.e. a setu/gid binary using LD_PRELOAD),
362            we don't load the library if it isn't setuid. */
363         if (secure) {
364                 if (!(st.st_mode & S_ISUID)) {
365                         _dl_close(infile);
366                         return NULL;
367                 }
368         }
369
370         /* Check if file is already loaded */
371         for (tpnt = _dl_loaded_modules; tpnt; tpnt = tpnt->next) {
372 #ifndef __NOT_FOR_L4__
373                 if (_dl_cap_equal(tpnt->st_dev, st.st_dev)) {
374 #else
375                 if (tpnt->st_dev == st.st_dev && tpnt->st_ino == st.st_ino) {
376 #endif
377                         /* Already loaded */
378                         tpnt->usage_count++;
379                         _dl_close(infile);
380                         return tpnt;
381                 }
382         }
383         header = _dl_mmap((void *) 0, _dl_pagesize, PROT_READ | PROT_WRITE,
384                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZE, -1, 0);
385         if (_dl_mmap_check_error(header)) {
386                 _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
387                 _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
388                 _dl_close(infile);
389                 return NULL;
390         }
391
392         _dl_read(infile, header, _dl_pagesize);
393         epnt = (ElfW(Ehdr) *) (intptr_t) header;
394         p32 = (uint32_t*)&epnt->e_ident;
395         if (*p32 != ELFMAG_U32) {
396                 _dl_dprintf(2, "%s: '%s' is not an ELF file\n", _dl_progname,
397                                 libname);
398                 _dl_internal_error_number = LD_ERROR_NOTELF;
399                 _dl_close(infile);
400                 _dl_munmap(header, _dl_pagesize);
401                 return NULL;
402         }
403
404         if ((epnt->e_type != ET_DYN) || (epnt->e_machine != MAGIC1
405 #ifdef MAGIC2
406                                 && epnt->e_machine != MAGIC2
407 #endif
408                                 ))
409         {
410                 _dl_internal_error_number =
411                         (epnt->e_type != ET_DYN ? LD_ERROR_NOTDYN : LD_ERROR_NOTMAGIC);
412                 _dl_dprintf(2, "%s: '%s' is not an ELF executable for " ELF_TARGET
413                                 "\n", _dl_progname, libname);
414                 _dl_close(infile);
415                 _dl_munmap(header, _dl_pagesize);
416                 return NULL;
417         }
418
419         ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
420
421         piclib = 1;
422         for (i = 0; i < epnt->e_phnum; i++) {
423
424                 if (ppnt->p_type == PT_DYNAMIC) {
425                         if (dynamic_addr)
426                                 _dl_dprintf(2, "%s: '%s' has more than one dynamic section\n",
427                                                 _dl_progname, libname);
428                         dynamic_addr = ppnt->p_vaddr;
429                 }
430
431                 if (ppnt->p_type == PT_LOAD) {
432                         /* See if this is a PIC library. */
433                         if (i == 0 && ppnt->p_vaddr > 0x1000000) {
434                                 piclib = 0;
435                                 minvma = ppnt->p_vaddr;
436                         }
437                         if (piclib && ppnt->p_vaddr < minvma) {
438                                 minvma = ppnt->p_vaddr;
439                         }
440                         if (((unsigned long) ppnt->p_vaddr + ppnt->p_memsz) > maxvma) {
441                                 maxvma = ppnt->p_vaddr + ppnt->p_memsz;
442                         }
443                 }
444                 if (ppnt->p_type == PT_TLS) {
445 #if defined(USE_TLS) && USE_TLS
446                         if (ppnt->p_memsz == 0)
447                                 /* Nothing to do for an empty segment.  */
448                                 continue;
449                         else
450                                 /* Save for after 'tpnt' is actually allocated. */
451                                 tlsppnt = ppnt;
452 #else
453                         /*
454                          * Yup, the user was an idiot and tried to sneak in a library with
455                          * TLS in it and we don't support it. Let's fall on our own sword
456                          * and scream at the luser while we die.
457                          */
458                         _dl_dprintf(2, "%s: '%s' library contains unsupported TLS\n",
459                                 _dl_progname, libname);
460                         _dl_internal_error_number = LD_ERROR_TLS_FAILED;
461                         _dl_close(infile);
462                         _dl_munmap(header, _dl_pagesize);
463                         return NULL;
464 #endif
465                 }
466                 ppnt++;
467         }
468
469         DL_CHECK_LIB_TYPE (epnt, piclib, _dl_progname, libname);
470
471         maxvma = (maxvma + ADDR_ALIGN) & PAGE_ALIGN;
472         minvma = minvma & ~0xffffU;
473
474         flags = MAP_PRIVATE /*| MAP_DENYWRITE */ ;
475         if (!piclib)
476                 flags |= MAP_FIXED;
477
478         if (piclib == 0 || piclib == 1) {
479 #ifdef __NOT_FOR_L4__
480                 status = (char *) _dl_mmap((char *) (piclib ? 0 : minvma),
481                                 maxvma - minvma, PROT_NONE, flags | MAP_ANONYMOUS, -1, 0);
482 #else
483                 status = (char *) _dl_mmap((char *) (piclib ? 0 : minvma),
484                                 maxvma - minvma, PROT_NONE, flags | MAP_ANONYMOUS | 0x1000000, -1, 0);
485 #endif
486                 if (_dl_mmap_check_error(status)) {
487                         _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
488                         _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
489                         _dl_close(infile);
490                         _dl_munmap(header, _dl_pagesize);
491                         return NULL;
492                 }
493                 libaddr = (unsigned long) status;
494                 flags |= MAP_FIXED;
495         }
496
497         /* Get the memory to store the library */
498         ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
499
500         DL_INIT_LOADADDR(lib_loadaddr, libaddr, ppnt, epnt->e_phnum);
501
502         for (i = 0; i < epnt->e_phnum; i++) {
503                 if (DL_IS_SPECIAL_SEGMENT (epnt, ppnt)) {
504                         char *addr;
505
506                         addr = DL_MAP_SEGMENT (epnt, ppnt, infile, flags);
507                         if (addr == NULL)
508                                 goto cant_map;
509
510                         DL_INIT_LOADADDR_HDR (lib_loadaddr, addr, ppnt);
511                         ppnt++;
512                         continue;
513                 }
514                 if (ppnt->p_type == PT_GNU_RELRO) {
515                         relro_addr = ppnt->p_vaddr;
516                         relro_size = ppnt->p_memsz;
517                 }
518                 if (ppnt->p_type == PT_LOAD) {
519                         char *tryaddr;
520                         ssize_t size;
521
522                         /* See if this is a PIC library. */
523                         if (i == 0 && ppnt->p_vaddr > 0x1000000) {
524                                 piclib = 0;
525                                 /* flags |= MAP_FIXED; */
526                         }
527
528                         if (ppnt->p_flags & PF_W) {
529                                 unsigned long map_size;
530                                 char *cpnt;
531                                 char *piclib2map = 0;
532
533                                 if (piclib == 2 &&
534                                     /* We might be able to avoid this
535                                        call if memsz doesn't require
536                                        an additional page, but this
537                                        would require mmap to always
538                                        return page-aligned addresses
539                                        and a whole number of pages
540                                        allocated.  Unfortunately on
541                                        uClinux may return misaligned
542                                        addresses and may allocate
543                                        partial pages, so we may end up
544                                        doing unnecessary mmap calls.
545
546                                        This is what we could do if we
547                                        knew mmap would always return
548                                        aligned pages:
549
550                                     ((ppnt->p_vaddr + ppnt->p_filesz
551                                       + ADDR_ALIGN)
552                                      & PAGE_ALIGN)
553                                     < ppnt->p_vaddr + ppnt->p_memsz)
554
555                                        Instead, we have to do this:  */
556                                     ppnt->p_filesz < ppnt->p_memsz)
557                                   {
558                                     piclib2map = (char *)
559                                       _dl_mmap(0, (ppnt->p_vaddr & ADDR_ALIGN)
560                                                + ppnt->p_memsz,
561                                                LXFLAGS(ppnt->p_flags),
562                                                flags | MAP_ANONYMOUS, -1, 0);
563                                     if (_dl_mmap_check_error(piclib2map))
564                                       goto cant_map;
565                                     DL_INIT_LOADADDR_HDR
566                                       (lib_loadaddr, piclib2map
567                                        + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
568                                   }
569
570                                 tryaddr = piclib == 2 ? piclib2map
571                                   : ((char*) (piclib ? libaddr : 0) +
572                                      (ppnt->p_vaddr & PAGE_ALIGN));
573
574                                 size = (ppnt->p_vaddr & ADDR_ALIGN)
575                                   + ppnt->p_filesz;
576
577                                 /* For !MMU, mmap to fixed address will fail.
578                                    So instead of desperately call mmap and fail,
579                                    we set status to MAP_FAILED to save a call
580                                    to mmap ().  */
581 #ifndef __ARCH_USE_MMU__
582                                 if (piclib2map == 0)
583 #endif
584                                   status = (char *) _dl_mmap
585                                     (tryaddr, size, LXFLAGS(ppnt->p_flags),
586                                      flags | (piclib2map ? MAP_FIXED : 0),
587                                      infile, ppnt->p_offset & OFFS_ALIGN);
588 #ifndef __ARCH_USE_MMU__
589                                 else
590                                   status = MAP_FAILED;
591 #endif
592 #ifdef _DL_PREAD
593                                 if (_dl_mmap_check_error(status) && piclib2map
594                                     && (_DL_PREAD (infile, tryaddr, size,
595                                                    ppnt->p_offset & OFFS_ALIGN)
596                                         == size))
597                                   status = tryaddr;
598 #endif
599                                 if (_dl_mmap_check_error(status)
600                                     || (tryaddr && tryaddr != status)) {
601                                 cant_map:
602                                         _dl_dprintf(2, "%s:%i: can't map '%s'\n",
603                                                         _dl_progname, __LINE__, libname);
604                                         _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
605                                         DL_LOADADDR_UNMAP (lib_loadaddr, maxvma - minvma);
606                                         _dl_close(infile);
607                                         _dl_munmap(header, _dl_pagesize);
608                                         return NULL;
609                                 }
610
611                                 if (! piclib2map) {
612                                   DL_INIT_LOADADDR_HDR
613                                     (lib_loadaddr, status
614                                      + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
615                                 }
616                                 /* Now we want to allocate and
617                                    zero-out any data from the end of
618                                    the region we mapped in from the
619                                    file (filesz) to the end of the
620                                    loadable segment (memsz).  We may
621                                    need additional pages for memsz,
622                                    that we map in below, and we can
623                                    count on the kernel to zero them
624                                    out, but we have to zero out stuff
625                                    in the last page that we mapped in
626                                    from the file.  However, we can't
627                                    assume to have actually obtained
628                                    full pages from the kernel, since
629                                    we didn't ask for them, and uClibc
630                                    may not give us full pages for
631                                    small allocations.  So only zero
632                                    out up to memsz or the end of the
633                                    page, whichever comes first.  */
634
635                                 /* CPNT is the beginning of the memsz
636                                    portion not backed by filesz.  */
637                                 cpnt = (char *) (status + size);
638
639                                 /* MAP_SIZE is the address of the
640                                    beginning of the next page.  */
641                                 map_size = (ppnt->p_vaddr + ppnt->p_filesz
642                                             + ADDR_ALIGN) & PAGE_ALIGN;
643
644 #ifndef MIN
645 # define MIN(a,b) ((a) < (b) ? (a) : (b))
646 #endif
647                                 _dl_memset (cpnt, 0,
648                                             MIN (map_size
649                                                  - (ppnt->p_vaddr
650                                                     + ppnt->p_filesz),
651                                                  ppnt->p_memsz
652                                                  - ppnt->p_filesz));
653
654                                 if (map_size < ppnt->p_vaddr + ppnt->p_memsz
655                                     && !piclib2map) {
656                                         tryaddr = map_size + (char*)(piclib ? libaddr : 0);
657                                         status = (char *) _dl_mmap(tryaddr,
658                                                 ppnt->p_vaddr + ppnt->p_memsz - map_size,
659                                                 LXFLAGS(ppnt->p_flags), flags | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
660                                         if (_dl_mmap_check_error(status)
661                                             || tryaddr != status)
662                                                 goto cant_map;
663                                 }
664                         } else {
665                                 tryaddr = (piclib == 2 ? 0
666                                            : (char *) (ppnt->p_vaddr & PAGE_ALIGN)
667                                            + (piclib ? libaddr : 0));
668                                 size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz;
669                                 status = (char *) _dl_mmap
670                                            (tryaddr, size, LXFLAGS(ppnt->p_flags),
671                                             flags | (piclib == 2 ? MAP_EXECUTABLE
672                                                      | MAP_DENYWRITE : 0),
673                                             infile, ppnt->p_offset & OFFS_ALIGN);
674                                 if (_dl_mmap_check_error(status)
675                                     || (tryaddr && tryaddr != status))
676                                   goto cant_map;
677                                 DL_INIT_LOADADDR_HDR
678                                   (lib_loadaddr, status
679                                    + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
680                         }
681
682                         /* if (libaddr == 0 && piclib) {
683                            libaddr = (unsigned long) status;
684                            flags |= MAP_FIXED;
685                            } */
686                 }
687                 ppnt++;
688         }
689         _dl_close(infile);
690
691         /* For a non-PIC library, the addresses are all absolute */
692         if (piclib) {
693                 dynamic_addr = (unsigned long) DL_RELOC_ADDR(lib_loadaddr, dynamic_addr);
694         }
695
696         /*
697          * OK, the ELF library is now loaded into VM in the correct locations
698          * The next step is to go through and do the dynamic linking (if needed).
699          */
700
701         /* Start by scanning the dynamic section to get all of the pointers */
702
703         if (!dynamic_addr) {
704                 _dl_internal_error_number = LD_ERROR_NODYNAMIC;
705                 _dl_dprintf(2, "%s: '%s' is missing a dynamic section\n",
706                                 _dl_progname, libname);
707                 _dl_munmap(header, _dl_pagesize);
708                 return NULL;
709         }
710
711         dpnt = (ElfW(Dyn) *) dynamic_addr;
712         _dl_memset(dynamic_info, 0, sizeof(dynamic_info));
713         rtld_flags = _dl_parse_dynamic_info(dpnt, dynamic_info, NULL, lib_loadaddr);
714         /* If the TEXTREL is set, this means that we need to make the pages
715            writable before we perform relocations.  Do this now. They get set
716            back again later. */
717
718         if (dynamic_info[DT_TEXTREL]) {
719 #ifndef __FORCE_SHAREABLE_TEXT_SEGMENTS__
720                 ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
721                 for (i = 0; i < epnt->e_phnum; i++, ppnt++) {
722                         if (ppnt->p_type == PT_LOAD && !(ppnt->p_flags & PF_W)) {
723                                 _dl_mprotect((void *) ((piclib ? libaddr : 0) +
724                                                         (ppnt->p_vaddr & PAGE_ALIGN)),
725                                                 (ppnt->p_vaddr & ADDR_ALIGN) + (unsigned long) ppnt->p_filesz,
726                                                 PROT_READ | PROT_WRITE | PROT_EXEC);
727                         }
728                 }
729 #else
730                 _dl_dprintf(_dl_debug_file, "Can't modify %s's text section."
731                         " Use GCC option -fPIC for shared objects, please.\n",
732                         libname);
733                 _dl_exit(1);
734 #endif
735         }
736
737         tpnt = _dl_add_elf_hash_table(libname, lib_loadaddr, dynamic_info,
738                         dynamic_addr, 0);
739         tpnt->relro_addr = relro_addr;
740         tpnt->relro_size = relro_size;
741         tpnt->st_dev = st.st_dev;
742         tpnt->st_ino = st.st_ino;
743         tpnt->ppnt = (ElfW(Phdr) *) DL_RELOC_ADDR(tpnt->loadaddr, epnt->e_phoff);
744         tpnt->n_phent = epnt->e_phnum;
745         tpnt->rtld_flags |= rtld_flags;
746
747 #if defined(USE_TLS) && USE_TLS
748         if (tlsppnt) {
749                 _dl_debug_early("Found TLS header for %s\n", libname);
750 # if NO_TLS_OFFSET != 0
751                 tpnt->l_tls_offset = NO_TLS_OFFSET;
752 # endif
753                 tpnt->l_tls_blocksize = tlsppnt->p_memsz;
754                 tpnt->l_tls_align = tlsppnt->p_align;
755                 if (tlsppnt->p_align == 0)
756                         tpnt->l_tls_firstbyte_offset = 0;
757                 else
758                         tpnt->l_tls_firstbyte_offset = tlsppnt->p_vaddr &
759                                 (tlsppnt->p_align - 1);
760                 tpnt->l_tls_initimage_size = tlsppnt->p_filesz;
761                 tpnt->l_tls_initimage = (void *) tlsppnt->p_vaddr;
762
763                 /* Assign the next available module ID.  */
764                 tpnt->l_tls_modid = _dl_next_tls_modid ();
765
766                 /* We know the load address, so add it to the offset. */
767                 if (tpnt->l_tls_initimage != NULL)
768                 {
769 # ifdef __SUPPORT_LD_DEBUG_EARLY__
770                         unsigned int tmp = (unsigned int) tpnt->l_tls_initimage;
771                         tpnt->l_tls_initimage = (char *) tlsppnt->p_vaddr + tpnt->loadaddr;
772                         _dl_debug_early("Relocated TLS initial image from %x to %x (size = %x)\n", tmp, tpnt->l_tls_initimage, tpnt->l_tls_initimage_size);
773                         tmp = 0;
774 # else
775                         tpnt->l_tls_initimage = (char *) tlsppnt->p_vaddr + tpnt->loadaddr;
776 # endif
777                 }
778         }
779 #endif
780
781         /*
782          * Add this object into the symbol chain
783          */
784         if (*rpnt) {
785                 (*rpnt)->next = _dl_malloc(sizeof(struct dyn_elf));
786                 _dl_memset((*rpnt)->next, 0, sizeof(struct dyn_elf));
787                 (*rpnt)->next->prev = (*rpnt);
788                 *rpnt = (*rpnt)->next;
789         }
790 #ifndef SHARED
791         /* When statically linked, the first time we dlopen a DSO
792          * the *rpnt is NULL, so we need to allocate memory for it,
793          * and initialize the _dl_symbol_table.
794          */
795         else {
796                 *rpnt = _dl_symbol_tables = _dl_malloc(sizeof(struct dyn_elf));
797                 _dl_memset(*rpnt, 0, sizeof(struct dyn_elf));
798         }
799 #endif
800         (*rpnt)->dyn = tpnt;
801         tpnt->symbol_scope = _dl_symbol_tables;
802         tpnt->usage_count++;
803         tpnt->libtype = elf_lib;
804
805         /*
806          * OK, the next thing we need to do is to insert the dynamic linker into
807          * the proper entry in the GOT so that the PLT symbols can be properly
808          * resolved.
809          */
810
811         lpnt = (unsigned long *) dynamic_info[DT_PLTGOT];
812
813         if (lpnt) {
814                 lpnt = (unsigned long *) (dynamic_info[DT_PLTGOT]);
815                 INIT_GOT(lpnt, tpnt);
816         }
817
818         _dl_if_debug_dprint("\n\tfile='%s';  generating link map\n", libname);
819         _dl_if_debug_dprint("\t\tdynamic: %x  base: %x\n", dynamic_addr, DL_LOADADDR_BASE(lib_loadaddr));
820         _dl_if_debug_dprint("\t\t  entry: %x  phdr: %x  phnum: %x\n\n",
821                         DL_RELOC_ADDR(lib_loadaddr, epnt->e_entry), tpnt->ppnt, tpnt->n_phent);
822
823         _dl_munmap(header, _dl_pagesize);
824
825         return tpnt;
826 }
827
828 /* now_flag must be RTLD_NOW or zero */
829 int _dl_fixup(struct dyn_elf *rpnt, int now_flag)
830 {
831         int goof = 0;
832         struct elf_resolve *tpnt;
833         ElfW(Word) reloc_size, relative_count;
834         ElfW(Addr) reloc_addr;
835
836         if (rpnt->next)
837                 goof = _dl_fixup(rpnt->next, now_flag);
838         if (goof)
839                 return goof;
840         tpnt = rpnt->dyn;
841
842         if (!(tpnt->init_flag & RELOCS_DONE))
843                 _dl_if_debug_dprint("relocation processing: %s\n", tpnt->libname);
844
845         if (unlikely(tpnt->dynamic_info[UNSUPPORTED_RELOC_TYPE])) {
846                 _dl_if_debug_dprint("%s: can't handle %s relocation records\n",
847                                 _dl_progname, UNSUPPORTED_RELOC_STR);
848                 goof++;
849                 return goof;
850         }
851
852         reloc_size = tpnt->dynamic_info[DT_RELOC_TABLE_SIZE];
853 /* On some machines, notably SPARC & PPC, DT_REL* includes DT_JMPREL in its
854    range.  Note that according to the ELF spec, this is completely legal! */
855 #ifdef ELF_MACHINE_PLTREL_OVERLAP
856         reloc_size -= tpnt->dynamic_info [DT_PLTRELSZ];
857 #endif
858         if (tpnt->dynamic_info[DT_RELOC_TABLE_ADDR] &&
859             !(tpnt->init_flag & RELOCS_DONE)) {
860                 reloc_addr = tpnt->dynamic_info[DT_RELOC_TABLE_ADDR];
861                 relative_count = tpnt->dynamic_info[DT_RELCONT_IDX];
862                 if (relative_count) { /* Optimize the XX_RELATIVE relocations if possible */
863                         reloc_size -= relative_count * sizeof(ELF_RELOC);
864                         elf_machine_relative(tpnt->loadaddr, reloc_addr, relative_count);
865                         reloc_addr += relative_count * sizeof(ELF_RELOC);
866                 }
867                 goof += _dl_parse_relocation_information(rpnt,
868                                 reloc_addr,
869                                 reloc_size);
870                 tpnt->init_flag |= RELOCS_DONE;
871         }
872         if (tpnt->dynamic_info[DT_BIND_NOW])
873                 now_flag = RTLD_NOW;
874         if (tpnt->dynamic_info[DT_JMPREL] &&
875             (!(tpnt->init_flag & JMP_RELOCS_DONE) ||
876              (now_flag && !(tpnt->rtld_flags & now_flag)))) {
877                 tpnt->rtld_flags |= now_flag;
878                 if (!(tpnt->rtld_flags & RTLD_NOW)) {
879                         _dl_parse_lazy_relocation_information(rpnt,
880                                         tpnt->dynamic_info[DT_JMPREL],
881                                         tpnt->dynamic_info [DT_PLTRELSZ]);
882                 } else {
883                         goof += _dl_parse_relocation_information(rpnt,
884                                         tpnt->dynamic_info[DT_JMPREL],
885                                         tpnt->dynamic_info[DT_PLTRELSZ]);
886                 }
887                 tpnt->init_flag |= JMP_RELOCS_DONE;
888         }
889
890 #if 0
891 /* _dl_add_to_slotinfo is called by init_tls() for initial DSO
892    or by dlopen() for dynamically loaded DSO. */
893 #if defined(USE_TLS) && USE_TLS
894         /* Add object to slot information data if necessasy. */
895         if (tpnt->l_tls_blocksize != 0 && tls_init_tp_called)
896                 _dl_add_to_slotinfo ((struct link_map *) tpnt);
897 #endif
898 #endif
899         return goof;
900 }
901 #ifndef NOT_FOR_L4
902 #include <l4/sys/utcb.h>
903 #endif
904
905 /* Minimal printf which handles only %s, %d, and %x */
906 void _dl_dprintf(int fd, const char *fmt, ...)
907 {
908 #if __WORDSIZE > 32
909         long int num;
910 #else
911         int num;
912 #endif
913         va_list args;
914         char *start, *ptr, *string;
915         char *buf;
916
917         if (!fmt)
918                 return;
919
920 #ifndef NOT_FOR_L4
921         l4_msg_regs_t store_mr;
922         l4_buf_regs_t store_br;
923         l4_msg_regs_t *mr = l4_utcb_mr();
924         l4_buf_regs_t *br = l4_utcb_br();
925
926         _dl_memcpy(&store_mr, mr, sizeof(store_mr));
927         _dl_memcpy(&store_br, br, sizeof(store_br));
928 #endif
929
930         buf = _dl_mmap((void *) 0, _dl_pagesize, PROT_READ | PROT_WRITE,
931                         MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
932         if (_dl_mmap_check_error(buf)) {
933                 _dl_write(fd, "mmap of a spare page failed!\n", 29);
934                 _dl_exit(20);
935         }
936
937         start = ptr = buf;
938
939         if (_dl_strlen(fmt) >= (_dl_pagesize - 1)) {
940                 _dl_write(fd, "overflow\n", 11);
941                 _dl_exit(20);
942         }
943
944         _dl_strcpy(buf, fmt);
945         va_start(args, fmt);
946
947         while (start) {
948                 while (*ptr != '%' && *ptr) {
949                         ptr++;
950                 }
951
952                 if (*ptr == '%') {
953                         *ptr++ = '\0';
954                         _dl_write(fd, start, _dl_strlen(start));
955
956                         switch (*ptr++) {
957                                 case 's':
958                                         string = va_arg(args, char *);
959
960                                         if (!string)
961                                                 _dl_write(fd, "(null)", 6);
962                                         else
963                                                 _dl_write(fd, string, _dl_strlen(string));
964                                         break;
965
966                                 case 'i':
967                                 case 'd':
968                                         {
969                                                 char tmp[22];
970 #if __WORDSIZE > 32
971                                                 num = va_arg(args, long int);
972 #else
973                                                 num = va_arg(args, int);
974 #endif
975                                                 string = _dl_simple_ltoa(tmp, num);
976                                                 _dl_write(fd, string, _dl_strlen(string));
977                                                 break;
978                                         }
979                                 case 'x':
980                                 case 'X':
981                                         {
982                                                 char tmp[22];
983 #if __WORDSIZE > 32
984                                                 num = va_arg(args, long int);
985 #else
986                                                 num = va_arg(args, int);
987 #endif
988                                                 string = _dl_simple_ltoahex(tmp, num);
989                                                 _dl_write(fd, string, _dl_strlen(string));
990                                                 break;
991                                         }
992                                 default:
993                                         _dl_write(fd, "(null)", 6);
994                                         break;
995                         }
996
997                         start = ptr;
998                 } else {
999                         _dl_write(fd, start, _dl_strlen(start));
1000                         start = NULL;
1001                 }
1002         }
1003         _dl_munmap(buf, _dl_pagesize);
1004 #ifndef NOT_FOR_L4
1005         _dl_memcpy(mr, &store_mr, sizeof(store_mr));
1006         _dl_memcpy(br, &store_br, sizeof(store_br));
1007 #endif
1008         return;
1009 }
1010
1011 char *_dl_strdup(const char *string)
1012 {
1013         char *retval;
1014         int len;
1015
1016         len = _dl_strlen(string);
1017         retval = _dl_malloc(len + 1);
1018         _dl_strcpy(retval, string);
1019         return retval;
1020 }
1021
1022 unsigned int _dl_parse_dynamic_info(ElfW(Dyn) *dpnt, unsigned long dynamic_info[],
1023                                     void *debug_addr, DL_LOADADDR_TYPE load_off)
1024 {
1025         return __dl_parse_dynamic_info(dpnt, dynamic_info, debug_addr, load_off);
1026 }