Switch <elf.h> over to linux uapi under the covers.
Remove the linker's reliance on BSD cruft and use the glibc-style
ElfW macro. (Other code too, but the linker contains the majority
of the code that needs to work for Elf32 and Elf64.)
All platforms need dl_iterate_phdr_static, so it doesn't make sense
to have that part of the per-architecture configuration.
Bug: 12476126
Change-Id: I1d7f918f1303a392794a6cd8b3512ff56bd6e487
diff --git a/linker/linker.cpp b/linker/linker.cpp
index 2058050..a133d1b 100755
--- a/linker/linker.cpp
+++ b/linker/linker.cpp
@@ -29,9 +29,8 @@
#include <dlfcn.h>
#include <errno.h>
#include <fcntl.h>
-#include <linux/auxvec.h>
+#include <inttypes.h>
#include <pthread.h>
-#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -67,7 +66,7 @@
*/
static bool soinfo_link_image(soinfo* si);
-static Elf_Addr get_elf_exec_load_bias(const Elf_Ehdr* elf);
+static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf);
// We can't use malloc(3) in the dynamic linker. We use a linked list of anonymous
// maps, each a single page in size. The pages are broken up into as many struct soinfo
@@ -451,33 +450,33 @@
return rv;
}
-static Elf_Sym* soinfo_elf_lookup(soinfo* si, unsigned hash, const char* name) {
- Elf_Sym* symtab = si->symtab;
- const char* strtab = si->strtab;
+static ElfW(Sym)* soinfo_elf_lookup(soinfo* si, unsigned hash, const char* name) {
+ ElfW(Sym)* symtab = si->symtab;
+ const char* strtab = si->strtab;
- TRACE_TYPE(LOOKUP, "SEARCH %s in %s@%p %x %zd",
- name, si->name, reinterpret_cast<void*>(si->base), hash, hash % si->nbucket);
+ TRACE_TYPE(LOOKUP, "SEARCH %s in %s@%p %x %zd",
+ name, si->name, reinterpret_cast<void*>(si->base), hash, hash % si->nbucket);
- for (unsigned n = si->bucket[hash % si->nbucket]; n != 0; n = si->chain[n]) {
- Elf_Sym* s = symtab + n;
- if (strcmp(strtab + s->st_name, name)) continue;
+ for (unsigned n = si->bucket[hash % si->nbucket]; n != 0; n = si->chain[n]) {
+ ElfW(Sym)* s = symtab + n;
+ if (strcmp(strtab + s->st_name, name)) continue;
- /* only concern ourselves with global and weak symbol definitions */
- switch (ELF_ST_BIND(s->st_info)) {
- case STB_GLOBAL:
- case STB_WEAK:
- if (s->st_shndx == SHN_UNDEF) {
- continue;
- }
+ /* only concern ourselves with global and weak symbol definitions */
+ switch (ELF_ST_BIND(s->st_info)) {
+ case STB_GLOBAL:
+ case STB_WEAK:
+ if (s->st_shndx == SHN_UNDEF) {
+ continue;
+ }
- TRACE_TYPE(LOOKUP, "FOUND %s in %s (%p) %zd",
- name, si->name, reinterpret_cast<void*>(s->st_value),
- static_cast<size_t>(s->st_size));
- return s;
- }
+ TRACE_TYPE(LOOKUP, "FOUND %s in %s (%p) %zd",
+ name, si->name, reinterpret_cast<void*>(s->st_value),
+ static_cast<size_t>(s->st_size));
+ return s;
}
+ }
- return NULL;
+ return NULL;
}
static unsigned elfhash(const char* _name) {
@@ -493,9 +492,9 @@
return h;
}
-static Elf_Sym* soinfo_do_lookup(soinfo* si, const char* name, soinfo** lsi, soinfo* needed[]) {
+static ElfW(Sym)* soinfo_do_lookup(soinfo* si, const char* name, soinfo** lsi, soinfo* needed[]) {
unsigned elf_hash = elfhash(name);
- Elf_Sym* s = NULL;
+ ElfW(Sym)* s = NULL;
if (si != NULL && somain != NULL) {
@@ -603,7 +602,7 @@
Binary Interface) where in Chapter 5 it discuss resolving "Shared
Object Dependencies" in breadth first search order.
*/
-Elf_Sym* dlsym_handle_lookup(soinfo* si, const char* name) {
+ElfW(Sym)* dlsym_handle_lookup(soinfo* si, const char* name) {
return soinfo_elf_lookup(si, elfhash(name), name);
}
@@ -612,14 +611,14 @@
beginning of the global solist. Otherwise the search starts at the
specified soinfo (for RTLD_NEXT).
*/
-Elf_Sym* dlsym_linear_lookup(const char* name, soinfo** found, soinfo* start) {
+ElfW(Sym)* dlsym_linear_lookup(const char* name, soinfo** found, soinfo* start) {
unsigned elf_hash = elfhash(name);
if (start == NULL) {
start = solist;
}
- Elf_Sym* s = NULL;
+ ElfW(Sym)* s = NULL;
for (soinfo* si = start; (s == NULL) && (si != NULL); si = si->next) {
s = soinfo_elf_lookup(si, elf_hash, name);
if (s != NULL) {
@@ -637,7 +636,7 @@
}
soinfo* find_containing_library(const void* p) {
- Elf_Addr address = reinterpret_cast<Elf_Addr>(p);
+ ElfW(Addr) address = reinterpret_cast<ElfW(Addr)>(p);
for (soinfo* si = solist; si != NULL; si = si->next) {
if (address >= si->base && address - si->base < si->size) {
return si;
@@ -646,13 +645,13 @@
return NULL;
}
-Elf_Sym* dladdr_find_symbol(soinfo* si, const void* addr) {
- Elf_Addr soaddr = reinterpret_cast<Elf_Addr>(addr) - si->base;
+ElfW(Sym)* dladdr_find_symbol(soinfo* si, const void* addr) {
+ ElfW(Addr) soaddr = reinterpret_cast<ElfW(Addr)>(addr) - si->base;
// Search the library's symbol table for any defined symbol which
// contains this address.
for (size_t i = 0; i < si->nchain; ++i) {
- Elf_Sym* sym = &si->symtab[i];
+ ElfW(Sym)* sym = &si->symtab[i];
if (sym->st_shndx != SHN_UNDEF &&
soaddr >= sym->st_value &&
soaddr < sym->st_value + sym->st_size) {
@@ -666,13 +665,13 @@
#if 0
static void dump(soinfo* si)
{
- Elf_Sym* s = si->symtab;
- for (unsigned n = 0; n < si->nchain; n++) {
- TRACE("%04d> %08x: %02x %04x %08x %08x %s", n, s,
- s->st_info, s->st_shndx, s->st_value, s->st_size,
- si->strtab + s->st_name);
- s++;
- }
+ ElfW(Sym)* s = si->symtab;
+ for (unsigned n = 0; n < si->nchain; n++) {
+ TRACE("%04d> %08x: %02x %04x %08x %08x %s", n, s,
+ s->st_info, s->st_shndx, s->st_value, s->st_size,
+ si->strtab + s->st_name);
+ s++;
+ }
}
#endif
@@ -808,7 +807,7 @@
TRACE("unloading '%s'", si->name);
si->CallDestructors();
- for (Elf_Dyn* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
+ for (ElfW(Dyn)* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
if (d->d_tag == DT_NEEDED) {
const char* library_name = si->strtab + d->d_un.d_val;
TRACE("%s needs to unload %s", si->name, library_name);
@@ -859,18 +858,18 @@
}
#if defined(USE_RELA)
-static int soinfo_relocate_a(soinfo* si, Elf_Rela* rela, unsigned count, soinfo* needed[]) {
- Elf_Sym* symtab = si->symtab;
+static int soinfo_relocate_a(soinfo* si, ElfW(Rela)* rela, unsigned count, soinfo* needed[]) {
+ ElfW(Sym)* symtab = si->symtab;
const char* strtab = si->strtab;
- Elf_Sym* s;
- Elf_Rela* start = rela;
+ ElfW(Sym)* s;
+ ElfW(Rela)* start = rela;
soinfo* lsi;
for (size_t idx = 0; idx < count; ++idx, ++rela) {
- unsigned type = ELF_R_TYPE(rela->r_info);
- unsigned sym = ELF_R_SYM(rela->r_info);
- Elf_Addr reloc = static_cast<Elf_Addr>(rela->r_offset + si->load_bias);
- Elf_Addr sym_addr = 0;
+ unsigned type = ELFW(R_TYPE)(rela->r_info);
+ unsigned sym = ELFW(R_SYM)(rela->r_info);
+ ElfW(Addr) reloc = static_cast<ElfW(Addr)>(rela->r_offset + si->load_bias);
+ ElfW(Addr) sym_addr = 0;
char* sym_name = NULL;
DEBUG("Processing '%s' relocation at index %zd", si->name, idx);
@@ -931,7 +930,7 @@
}
} else {
// We got a definition.
- sym_addr = static_cast<Elf_Addr>(s->st_value + lsi->load_bias);
+ sym_addr = static_cast<ElfW(Addr)>(s->st_value + lsi->load_bias);
}
count_relocation(kRelocSymbol);
} else {
@@ -943,117 +942,92 @@
case R_AARCH64_JUMP_SLOT:
count_relocation(kRelocAbsolute);
MARK(rela->r_offset);
- TRACE_TYPE(RELO, "RELO JMP_SLOT %16lx <- %16lx %s\n",
- reloc,
- (sym_addr + rela->r_addend),
- sym_name);
- *reinterpret_cast<Elf_Addr*>(reloc) = (sym_addr + rela->r_addend);
+ TRACE_TYPE(RELO, "RELO JMP_SLOT %16llx <- %16llx %s\n",
+ reloc, (sym_addr + rela->r_addend), sym_name);
+ *reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + rela->r_addend);
break;
case R_AARCH64_GLOB_DAT:
count_relocation(kRelocAbsolute);
MARK(rela->r_offset);
- TRACE_TYPE(RELO, "RELO GLOB_DAT %16lx <- %16lx %s\n",
- reloc,
- (sym_addr + rela->r_addend),
- sym_name);
- *reinterpret_cast<Elf_Addr*>(reloc) = (sym_addr + rela->r_addend);
+ TRACE_TYPE(RELO, "RELO GLOB_DAT %16llx <- %16llx %s\n",
+ reloc, (sym_addr + rela->r_addend), sym_name);
+ *reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + rela->r_addend);
break;
case R_AARCH64_ABS64:
count_relocation(kRelocAbsolute);
MARK(rela->r_offset);
- TRACE_TYPE(RELO, "RELO ABS64 %16lx <- %16lx %s\n",
- reloc,
- (sym_addr + rela->r_addend),
- sym_name);
- *reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr + rela->r_addend);
+ TRACE_TYPE(RELO, "RELO ABS64 %16llx <- %16llx %s\n",
+ reloc, (sym_addr + rela->r_addend), sym_name);
+ *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
break;
case R_AARCH64_ABS32:
count_relocation(kRelocAbsolute);
MARK(rela->r_offset);
- TRACE_TYPE(RELO, "RELO ABS32 %16lx <- %16lx %s\n",
- reloc,
- (sym_addr + rela->r_addend),
- sym_name);
- if ((static_cast<Elf_Addr>(INT32_MIN) <=
- (*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend))) &&
- ((*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend)) <=
- static_cast<Elf_Addr>(UINT32_MAX))) {
- *reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr + rela->r_addend);
+ TRACE_TYPE(RELO, "RELO ABS32 %16llx <- %16llx %s\n",
+ reloc, (sym_addr + rela->r_addend), sym_name);
+ if ((static_cast<ElfW(Addr)>(INT32_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend))) &&
+ ((*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)) <= static_cast<ElfW(Addr)>(UINT32_MAX))) {
+ *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
} else {
- DL_ERR("0x%016lx out of range 0x%016lx to 0x%016lx",
- (*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend)),
- static_cast<Elf_Addr>(INT32_MIN),
- static_cast<Elf_Addr>(UINT32_MAX));
+ DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
+ (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)),
+ static_cast<ElfW(Addr)>(INT32_MIN),
+ static_cast<ElfW(Addr)>(UINT32_MAX));
return -1;
}
break;
case R_AARCH64_ABS16:
count_relocation(kRelocAbsolute);
MARK(rela->r_offset);
- TRACE_TYPE(RELO, "RELO ABS16 %16lx <- %16lx %s\n",
- reloc,
- (sym_addr + rela->r_addend),
- sym_name);
- if ((static_cast<Elf_Addr>(INT16_MIN) <=
- (*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend))) &&
- ((*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend)) <=
- static_cast<Elf_Addr>(UINT16_MAX))) {
- *reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr + rela->r_addend);
+ TRACE_TYPE(RELO, "RELO ABS16 %16llx <- %16llx %s\n",
+ reloc, (sym_addr + rela->r_addend), sym_name);
+ if ((static_cast<ElfW(Addr)>(INT16_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend))) &&
+ ((*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)) <= static_cast<ElfW(Addr)>(UINT16_MAX))) {
+ *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
} else {
- DL_ERR("0x%016lx out of range 0x%016lx to 0x%016lx",
- (*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend)),
- static_cast<Elf_Addr>(INT16_MIN),
- static_cast<Elf_Addr>(UINT16_MAX));
+ DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
+ (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)),
+ static_cast<ElfW(Addr)>(INT16_MIN),
+ static_cast<ElfW(Addr)>(UINT16_MAX));
return -1;
}
break;
case R_AARCH64_PREL64:
count_relocation(kRelocRelative);
MARK(rela->r_offset);
- TRACE_TYPE(RELO, "RELO REL64 %16lx <- %16lx - %16lx %s\n",
- reloc,
- (sym_addr + rela->r_addend),
- rela->r_offset,
- sym_name);
- *reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr + rela->r_addend) - rela->r_offset;
+ TRACE_TYPE(RELO, "RELO REL64 %16llx <- %16llx - %16llx %s\n",
+ reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
+ *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend) - rela->r_offset;
break;
case R_AARCH64_PREL32:
count_relocation(kRelocRelative);
MARK(rela->r_offset);
- TRACE_TYPE(RELO, "RELO REL32 %16lx <- %16lx - %16lx %s\n",
- reloc,
- (sym_addr + rela->r_addend),
- rela->r_offset, sym_name);
- if ((static_cast<Elf_Addr>(INT32_MIN) <=
- (*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
- ((*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <=
- static_cast<Elf_Addr>(UINT32_MAX))) {
- *reinterpret_cast<Elf_Addr*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
+ TRACE_TYPE(RELO, "RELO REL32 %16llx <- %16llx - %16llx %s\n",
+ reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
+ if ((static_cast<ElfW(Addr)>(INT32_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
+ ((*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= static_cast<ElfW(Addr)>(UINT32_MAX))) {
+ *reinterpret_cast<ElfW(Addr)*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
} else {
- DL_ERR("0x%016lx out of range 0x%016lx to 0x%016lx",
- (*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
- static_cast<Elf_Addr>(INT32_MIN),
- static_cast<Elf_Addr>(UINT32_MAX));
+ DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
+ (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
+ static_cast<ElfW(Addr)>(INT32_MIN),
+ static_cast<ElfW(Addr)>(UINT32_MAX));
return -1;
}
break;
case R_AARCH64_PREL16:
count_relocation(kRelocRelative);
MARK(rela->r_offset);
- TRACE_TYPE(RELO, "RELO REL16 %16lx <- %16lx - %16lx %s\n",
- reloc,
- (sym_addr + rela->r_addend),
- rela->r_offset, sym_name);
- if ((static_cast<Elf_Addr>(INT16_MIN) <=
- (*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
- ((*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <=
- static_cast<Elf_Addr>(UINT16_MAX))) {
- *reinterpret_cast<Elf_Addr*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
+ TRACE_TYPE(RELO, "RELO REL16 %16llx <- %16llx - %16llx %s\n",
+ reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
+ if ((static_cast<ElfW(Addr)>(INT16_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
+ ((*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= static_cast<ElfW(Addr)>(UINT16_MAX))) {
+ *reinterpret_cast<ElfW(Addr)*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
} else {
- DL_ERR("0x%016lx out of range 0x%016lx to 0x%016lx",
- (*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
- static_cast<Elf_Addr>(INT16_MIN),
- static_cast<Elf_Addr>(UINT16_MAX));
+ DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
+ (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
+ static_cast<ElfW(Addr)>(INT16_MIN),
+ static_cast<ElfW(Addr)>(UINT16_MAX));
return -1;
}
break;
@@ -1065,10 +1039,9 @@
DL_ERR("odd RELATIVE form...");
return -1;
}
- TRACE_TYPE(RELO, "RELO RELATIVE %16lx <- %16lx\n",
- reloc,
- (si->base + rela->r_addend));
- *reinterpret_cast<Elf_Addr*>(reloc) = (si->base + rela->r_addend);
+ TRACE_TYPE(RELO, "RELO RELATIVE %16llx <- %16llx\n",
+ reloc, (si->base + rela->r_addend));
+ *reinterpret_cast<ElfW(Addr)*>(reloc) = (si->base + rela->r_addend);
break;
case R_AARCH64_COPY:
@@ -1089,13 +1062,13 @@
}
count_relocation(kRelocCopy);
MARK(rela->r_offset);
- TRACE_TYPE(RELO, "RELO COPY %16lx <- %ld @ %16lx %s\n",
- reloc,
- s->st_size,
- (sym_addr + rela->r_addend),
- sym_name);
+ TRACE_TYPE(RELO, "RELO COPY %16llx <- %lld @ %16llx %s\n",
+ reloc,
+ s->st_size,
+ (sym_addr + rela->r_addend),
+ sym_name);
if (reloc == (sym_addr + rela->r_addend)) {
- Elf_Sym *src = soinfo_do_lookup(NULL, sym_name, &lsi, needed);
+ ElfW(Sym)* src = soinfo_do_lookup(NULL, sym_name, &lsi, needed);
if (src == NULL) {
DL_ERR("%s R_AARCH64_COPY relocation source cannot be resolved", si->name);
@@ -1103,12 +1076,12 @@
}
if (lsi->has_DT_SYMBOLIC) {
DL_ERR("%s invalid R_AARCH64_COPY relocation against DT_SYMBOLIC shared "
- "library %s (built with -Bsymbolic?)", si->name, lsi->name);
+ "library %s (built with -Bsymbolic?)", si->name, lsi->name);
return -1;
}
if (s->st_size < src->st_size) {
- DL_ERR("%s R_AARCH64_COPY relocation size mismatch (%ld < %ld)",
- si->name, s->st_size, src->st_size);
+ DL_ERR("%s R_AARCH64_COPY relocation size mismatch (%lld < %lld)",
+ si->name, s->st_size, src->st_size);
return -1;
}
memcpy((void*)reloc, (void*)(src->st_value + lsi->load_bias), src->st_size);
@@ -1118,16 +1091,12 @@
}
break;
case R_AARCH64_TLS_TPREL64:
- TRACE_TYPE(RELO, "RELO TLS_TPREL64 *** %16lx <- %16lx - %16lx\n",
- reloc,
- (sym_addr + rela->r_addend),
- rela->r_offset);
+ TRACE_TYPE(RELO, "RELO TLS_TPREL64 *** %16llx <- %16llx - %16llx\n",
+ reloc, (sym_addr + rela->r_addend), rela->r_offset);
break;
case R_AARCH64_TLS_DTPREL32:
- TRACE_TYPE(RELO, "RELO TLS_DTPREL32 *** %16lx <- %16lx - %16lx\n",
- reloc,
- (sym_addr + rela->r_addend),
- rela->r_offset);
+ TRACE_TYPE(RELO, "RELO TLS_DTPREL32 *** %16llx <- %16llx - %16llx\n",
+ reloc, (sym_addr + rela->r_addend), rela->r_offset);
break;
#elif defined(__x86_64__)
case R_X86_64_JUMP_SLOT:
@@ -1135,14 +1104,14 @@
MARK(rela->r_offset);
TRACE_TYPE(RELO, "RELO JMP_SLOT %08zx <- %08zx %s", static_cast<size_t>(reloc),
static_cast<size_t>(sym_addr + rela->r_addend), sym_name);
- *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr + rela->r_addend;
+ *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
break;
case R_X86_64_GLOB_DAT:
count_relocation(kRelocAbsolute);
MARK(rela->r_offset);
TRACE_TYPE(RELO, "RELO GLOB_DAT %08zx <- %08zx %s", static_cast<size_t>(reloc),
static_cast<size_t>(sym_addr + rela->r_addend), sym_name);
- *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr + rela->r_addend;
+ *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
break;
case R_X86_64_RELATIVE:
count_relocation(kRelocRelative);
@@ -1153,21 +1122,21 @@
}
TRACE_TYPE(RELO, "RELO RELATIVE %08zx <- +%08zx", static_cast<size_t>(reloc),
static_cast<size_t>(si->base));
- *reinterpret_cast<Elf_Addr*>(reloc) = si->base + rela->r_addend;
+ *reinterpret_cast<ElfW(Addr)*>(reloc) = si->base + rela->r_addend;
break;
case R_X86_64_32:
count_relocation(kRelocRelative);
MARK(rela->r_offset);
TRACE_TYPE(RELO, "RELO R_X86_64_32 %08zx <- +%08zx %s", static_cast<size_t>(reloc),
static_cast<size_t>(sym_addr), sym_name);
- *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr + rela->r_addend;
+ *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
break;
case R_X86_64_64:
count_relocation(kRelocRelative);
MARK(rela->r_offset);
TRACE_TYPE(RELO, "RELO R_X86_64_64 %08zx <- +%08zx %s", static_cast<size_t>(reloc),
static_cast<size_t>(sym_addr), sym_name);
- *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr + rela->r_addend;
+ *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
break;
case R_X86_64_PC32:
count_relocation(kRelocRelative);
@@ -1175,7 +1144,7 @@
TRACE_TYPE(RELO, "RELO R_X86_64_PC32 %08zx <- +%08zx (%08zx - %08zx) %s",
static_cast<size_t>(reloc), static_cast<size_t>(sym_addr - reloc),
static_cast<size_t>(sym_addr), static_cast<size_t>(reloc), sym_name);
- *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr + rela->r_addend - reloc;
+ *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend - reloc;
break;
#endif
@@ -1187,21 +1156,19 @@
return 0;
}
#else
-static int soinfo_relocate(soinfo* si, Elf_Rel* rel, unsigned count,
- soinfo* needed[])
-{
- Elf_Sym* symtab = si->symtab;
+static int soinfo_relocate(soinfo* si, ElfW(Rel)* rel, unsigned count, soinfo* needed[]) {
+ ElfW(Sym)* symtab = si->symtab;
const char* strtab = si->strtab;
- Elf_Sym* s;
- Elf_Rel* start = rel;
+ ElfW(Sym)* s;
+ ElfW(Rel)* start = rel;
soinfo* lsi;
for (size_t idx = 0; idx < count; ++idx, ++rel) {
- unsigned type = ELF_R_TYPE(rel->r_info);
- // TODO: don't use unsigned for 'sym'. Use uint32_t or Elf_Addr instead.
- unsigned sym = ELF_R_SYM(rel->r_info);
- Elf_Addr reloc = static_cast<Elf_Addr>(rel->r_offset + si->load_bias);
- Elf_Addr sym_addr = 0;
+ unsigned type = ELFW(R_TYPE)(rel->r_info);
+ // TODO: don't use unsigned for 'sym'. Use uint32_t or ElfW(Addr) instead.
+ unsigned sym = ELFW(R_SYM)(rel->r_info);
+ ElfW(Addr) reloc = static_cast<ElfW(Addr)>(rel->r_offset + si->load_bias);
+ ElfW(Addr) sym_addr = 0;
char* sym_name = NULL;
DEBUG("Processing '%s' relocation at index %zd", si->name, idx);
@@ -1266,7 +1233,7 @@
}
} else {
// We got a definition.
- sym_addr = static_cast<Elf_Addr>(s->st_value + lsi->load_bias);
+ sym_addr = static_cast<ElfW(Addr)>(s->st_value + lsi->load_bias);
}
count_relocation(kRelocSymbol);
} else {
@@ -1279,26 +1246,26 @@
count_relocation(kRelocAbsolute);
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name);
- *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr;
+ *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
break;
case R_ARM_GLOB_DAT:
count_relocation(kRelocAbsolute);
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name);
- *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr;
+ *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
break;
case R_ARM_ABS32:
count_relocation(kRelocAbsolute);
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO ABS %08x <- %08x %s", reloc, sym_addr, sym_name);
- *reinterpret_cast<Elf_Addr*>(reloc) += sym_addr;
+ *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
break;
case R_ARM_REL32:
count_relocation(kRelocRelative);
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO REL32 %08x <- %08x - %08x %s",
reloc, sym_addr, rel->r_offset, sym_name);
- *reinterpret_cast<Elf_Addr*>(reloc) += sym_addr - rel->r_offset;
+ *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr - rel->r_offset;
break;
case R_ARM_COPY:
if ((si->flags & FLAG_EXE) == 0) {
@@ -1320,7 +1287,7 @@
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO %08x <- %d @ %08x %s", reloc, s->st_size, sym_addr, sym_name);
if (reloc == sym_addr) {
- Elf_Sym *src = soinfo_do_lookup(NULL, sym_name, &lsi, needed);
+ ElfW(Sym)* src = soinfo_do_lookup(NULL, sym_name, &lsi, needed);
if (src == NULL) {
DL_ERR("%s R_ARM_COPY relocation source cannot be resolved", si->name);
@@ -1347,26 +1314,26 @@
count_relocation(kRelocAbsolute);
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name);
- *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr;
+ *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
break;
case R_386_GLOB_DAT:
count_relocation(kRelocAbsolute);
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name);
- *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr;
+ *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
break;
case R_386_32:
count_relocation(kRelocRelative);
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO R_386_32 %08x <- +%08x %s", reloc, sym_addr, sym_name);
- *reinterpret_cast<Elf_Addr*>(reloc) += sym_addr;
+ *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
break;
case R_386_PC32:
count_relocation(kRelocRelative);
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO R_386_PC32 %08x <- +%08x (%08x - %08x) %s",
reloc, (sym_addr - reloc), sym_addr, reloc, sym_name);
- *reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr - reloc);
+ *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr - reloc);
break;
#elif defined(__mips__)
case R_MIPS_REL32:
@@ -1375,9 +1342,9 @@
TRACE_TYPE(RELO, "RELO REL32 %08x <- %08x %s",
reloc, sym_addr, (sym_name) ? sym_name : "*SECTIONHDR*");
if (s) {
- *reinterpret_cast<Elf_Addr*>(reloc) += sym_addr;
+ *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
} else {
- *reinterpret_cast<Elf_Addr*>(reloc) += si->base;
+ *reinterpret_cast<ElfW(Addr)*>(reloc) += si->base;
}
break;
#endif
@@ -1395,7 +1362,7 @@
}
TRACE_TYPE(RELO, "RELO RELATIVE %p <- +%p",
reinterpret_cast<void*>(reloc), reinterpret_cast<void*>(si->base));
- *reinterpret_cast<Elf_Addr*>(reloc) += si->base;
+ *reinterpret_cast<ElfW(Addr)*>(reloc) += si->base;
break;
default:
@@ -1416,7 +1383,7 @@
unsigned local_gotno = si->mips_local_gotno;
unsigned gotsym = si->mips_gotsym;
unsigned symtabno = si->mips_symtabno;
- Elf_Sym* symtab = si->symtab;
+ ElfW(Sym)* symtab = si->symtab;
/*
* got[0] is address of lazy resolver function
@@ -1441,11 +1408,11 @@
}
/* Now for the global GOT entries */
- Elf_Sym* sym = symtab + gotsym;
+ ElfW(Sym)* sym = symtab + gotsym;
got = si->plt_got + local_gotno;
for (size_t g = gotsym; g < symtabno; g++, sym++, got++) {
const char* sym_name;
- Elf_Sym* s;
+ ElfW(Sym)* s;
soinfo* lsi;
/* This is an undefined reference... try to locate it */
@@ -1536,7 +1503,7 @@
}
if (dynamic != NULL) {
- for (Elf_Dyn* d = dynamic; d->d_tag != DT_NULL; ++d) {
+ for (ElfW(Dyn)* d = dynamic; d->d_tag != DT_NULL; ++d) {
if (d->d_tag == DT_NEEDED) {
const char* library_name = strtab + d->d_un.d_val;
TRACE("\"%s\": calling constructors in DT_NEEDED \"%s\"", name, library_name);
@@ -1625,8 +1592,8 @@
static bool soinfo_link_image(soinfo* si) {
/* "base" might wrap around UINT32_MAX. */
- Elf_Addr base = si->load_bias;
- const Elf_Phdr *phdr = si->phdr;
+ ElfW(Addr) base = si->load_bias;
+ const ElfW(Phdr)* phdr = si->phdr;
int phnum = si->phnum;
bool relocating_linker = (si->flags & FLAG_LINKER) != 0;
@@ -1638,7 +1605,7 @@
/* Extract dynamic section */
size_t dynamic_count;
- Elf_Word dynamic_flags;
+ ElfW(Word) dynamic_flags;
phdr_table_get_dynamic_section(phdr, phnum, base, &si->dynamic,
&dynamic_count, &dynamic_flags);
if (si->dynamic == NULL) {
@@ -1659,7 +1626,7 @@
// Extract useful information from dynamic section.
uint32_t needed_count = 0;
- for (Elf_Dyn* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
+ for (ElfW(Dyn)* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
DEBUG("d = %p, d[0](tag) = %p d[1](val) = %p",
d, reinterpret_cast<void*>(d->d_tag), reinterpret_cast<void*>(d->d_un.d_val));
switch (d->d_tag) {
@@ -1673,7 +1640,7 @@
si->strtab = (const char *) (base + d->d_un.d_ptr);
break;
case DT_SYMTAB:
- si->symtab = (Elf_Sym *) (base + d->d_un.d_ptr);
+ si->symtab = (ElfW(Sym)*) (base + d->d_un.d_ptr);
break;
#if !defined(__LP64__)
case DT_PLTREL:
@@ -1685,16 +1652,16 @@
#endif
case DT_JMPREL:
#if defined(USE_RELA)
- si->plt_rela = (Elf_Rela*) (base + d->d_un.d_ptr);
+ si->plt_rela = (ElfW(Rela)*) (base + d->d_un.d_ptr);
#else
- si->plt_rel = (Elf_Rel*) (base + d->d_un.d_ptr);
+ si->plt_rel = (ElfW(Rel)*) (base + d->d_un.d_ptr);
#endif
break;
case DT_PLTRELSZ:
#if defined(USE_RELA)
- si->plt_rela_count = d->d_un.d_val / sizeof(Elf_Rela);
+ si->plt_rela_count = d->d_un.d_val / sizeof(ElfW(Rela));
#else
- si->plt_rel_count = d->d_un.d_val / sizeof(Elf_Rel);
+ si->plt_rel_count = d->d_un.d_val / sizeof(ElfW(Rel));
#endif
break;
#if !defined(__LP64__)
@@ -1712,10 +1679,10 @@
break;
#if defined(USE_RELA)
case DT_RELA:
- si->rela = (Elf_Rela*) (base + d->d_un.d_ptr);
+ si->rela = (ElfW(Rela)*) (base + d->d_un.d_ptr);
break;
case DT_RELASZ:
- si->rela_count = d->d_un.d_val / sizeof(Elf_Rela);
+ si->rela_count = d->d_un.d_val / sizeof(ElfW(Rela));
break;
case DT_REL:
DL_ERR("unsupported DT_REL in \"%s\"", si->name);
@@ -1725,10 +1692,10 @@
return false;
#else
case DT_REL:
- si->rel = (Elf_Rel*) (base + d->d_un.d_ptr);
+ si->rel = (ElfW(Rel)*) (base + d->d_un.d_ptr);
break;
case DT_RELSZ:
- si->rel_count = d->d_un.d_val / sizeof(Elf_Rel);
+ si->rel_count = d->d_un.d_val / sizeof(ElfW(Rel));
break;
case DT_RELA:
DL_ERR("unsupported DT_RELA in \"%s\"", si->name);
@@ -1747,21 +1714,21 @@
DEBUG("%s constructors (DT_INIT_ARRAY) found at %p", si->name, si->init_array);
break;
case DT_INIT_ARRAYSZ:
- si->init_array_count = ((unsigned)d->d_un.d_val) / sizeof(Elf_Addr);
+ si->init_array_count = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
break;
case DT_FINI_ARRAY:
si->fini_array = reinterpret_cast<linker_function_t*>(base + d->d_un.d_ptr);
DEBUG("%s destructors (DT_FINI_ARRAY) found at %p", si->name, si->fini_array);
break;
case DT_FINI_ARRAYSZ:
- si->fini_array_count = ((unsigned)d->d_un.d_val) / sizeof(Elf_Addr);
+ si->fini_array_count = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
break;
case DT_PREINIT_ARRAY:
si->preinit_array = reinterpret_cast<linker_function_t*>(base + d->d_un.d_ptr);
DEBUG("%s constructors (DT_PREINIT_ARRAY) found at %p", si->name, si->preinit_array);
break;
case DT_PREINIT_ARRAYSZ:
- si->preinit_array_count = ((unsigned)d->d_un.d_val) / sizeof(Elf_Addr);
+ si->preinit_array_count = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
break;
case DT_TEXTREL:
#if defined(__LP64__)
@@ -1868,7 +1835,7 @@
soinfo** needed = (soinfo**) alloca((1 + needed_count) * sizeof(soinfo*));
soinfo** pneeded = needed;
- for (Elf_Dyn* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
+ for (ElfW(Dyn)* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
if (d->d_tag == DT_NEEDED) {
const char* library_name = si->strtab + d->d_un.d_val;
DEBUG("%s needs %s", si->name, library_name);
@@ -1964,21 +1931,21 @@
*/
static void add_vdso(KernelArgumentBlock& args UNUSED) {
#if defined(AT_SYSINFO_EHDR)
- Elf_Ehdr* ehdr_vdso = reinterpret_cast<Elf_Ehdr*>(args.getauxval(AT_SYSINFO_EHDR));
- if (ehdr_vdso == NULL) {
- return;
- }
+ ElfW(Ehdr)* ehdr_vdso = reinterpret_cast<ElfW(Ehdr)*>(args.getauxval(AT_SYSINFO_EHDR));
+ if (ehdr_vdso == NULL) {
+ return;
+ }
- soinfo* si = soinfo_alloc("[vdso]");
+ soinfo* si = soinfo_alloc("[vdso]");
- si->phdr = reinterpret_cast<Elf_Phdr*>(reinterpret_cast<char*>(ehdr_vdso) + ehdr_vdso->e_phoff);
- si->phnum = ehdr_vdso->e_phnum;
- si->base = reinterpret_cast<Elf_Addr>(ehdr_vdso);
- si->size = phdr_table_get_load_size(si->phdr, si->phnum);
- si->flags = 0;
- si->load_bias = get_elf_exec_load_bias(ehdr_vdso);
+ si->phdr = reinterpret_cast<ElfW(Phdr)*>(reinterpret_cast<char*>(ehdr_vdso) + ehdr_vdso->e_phoff);
+ si->phnum = ehdr_vdso->e_phnum;
+ si->base = reinterpret_cast<ElfW(Addr)>(ehdr_vdso);
+ si->size = phdr_table_get_load_size(si->phdr, si->phnum);
+ si->flags = 0;
+ si->load_bias = get_elf_exec_load_bias(ehdr_vdso);
- soinfo_link_image(si);
+ soinfo_link_image(si);
#endif
}
@@ -1987,7 +1954,7 @@
* fixed it's own GOT. It is safe to make references to externs
* and other non-local data at this point.
*/
-static Elf_Addr __linker_init_post_relocation(KernelArgumentBlock& args, Elf_Addr linker_base) {
+static ElfW(Addr) __linker_init_post_relocation(KernelArgumentBlock& args, ElfW(Addr) linker_base) {
/* NOTE: we store the args pointer on a special location
* of the temporary TLS area in order to pass it to
* the C Library's runtime initializer.
@@ -2071,15 +2038,15 @@
* warning: .dynamic section for "/system/bin/linker" is not at the
* expected address (wrong library or version mismatch?)
*/
- Elf_Ehdr *elf_hdr = (Elf_Ehdr *) linker_base;
- Elf_Phdr *phdr = (Elf_Phdr*)((unsigned char*) linker_base + elf_hdr->e_phoff);
+ ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(linker_base);
+ ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>((unsigned char*) linker_base + elf_hdr->e_phoff);
phdr_table_get_dynamic_section(phdr, elf_hdr->e_phnum, linker_base,
&linker_soinfo.dynamic, NULL, NULL);
insert_soinfo_into_debug_map(&linker_soinfo);
}
// Extract information passed from the kernel.
- si->phdr = reinterpret_cast<Elf_Phdr*>(args.getauxval(AT_PHDR));
+ si->phdr = reinterpret_cast<ElfW(Phdr)*>(args.getauxval(AT_PHDR));
si->phnum = args.getauxval(AT_PHNUM);
si->entry = args.getauxval(AT_ENTRY);
@@ -2092,8 +2059,8 @@
si->load_bias = 0;
for (size_t i = 0; i < si->phnum; ++i) {
if (si->phdr[i].p_type == PT_PHDR) {
- si->load_bias = reinterpret_cast<Elf_Addr>(si->phdr) - si->phdr[i].p_vaddr;
- si->base = reinterpret_cast<Elf_Addr>(si->phdr) - si->phdr[i].p_offset;
+ si->load_bias = reinterpret_cast<ElfW(Addr)>(si->phdr) - si->phdr[i].p_vaddr;
+ si->base = reinterpret_cast<ElfW(Addr)>(si->phdr) - si->phdr[i].p_offset;
break;
}
}
@@ -2183,14 +2150,14 @@
* load bias, i.e. add the value of any p_vaddr in the file to get
* the corresponding address in memory.
*/
-static Elf_Addr get_elf_exec_load_bias(const Elf_Ehdr* elf) {
- Elf_Addr offset = elf->e_phoff;
- const Elf_Phdr* phdr_table = (const Elf_Phdr*)((char*)elf + offset);
- const Elf_Phdr* phdr_end = phdr_table + elf->e_phnum;
+static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf) {
+ ElfW(Addr) offset = elf->e_phoff;
+ const ElfW(Phdr)* phdr_table = reinterpret_cast<const ElfW(Phdr)*>((char*)elf + offset);
+ const ElfW(Phdr)* phdr_end = phdr_table + elf->e_phnum;
- for (const Elf_Phdr* phdr = phdr_table; phdr < phdr_end; phdr++) {
+ for (const ElfW(Phdr)* phdr = phdr_table; phdr < phdr_end; phdr++) {
if (phdr->p_type == PT_LOAD) {
- return reinterpret_cast<Elf_Addr>(elf) + phdr->p_offset - phdr->p_vaddr;
+ return reinterpret_cast<ElfW(Addr)>(elf) + phdr->p_offset - phdr->p_vaddr;
}
}
return 0;
@@ -2205,12 +2172,12 @@
* relocations, any attempt to reference an extern variable, extern
* function, or other GOT reference will generate a segfault.
*/
-extern "C" Elf_Addr __linker_init(void* raw_args) {
+extern "C" ElfW(Addr) __linker_init(void* raw_args) {
KernelArgumentBlock args(raw_args);
- Elf_Addr linker_addr = args.getauxval(AT_BASE);
- Elf_Ehdr* elf_hdr = reinterpret_cast<Elf_Ehdr*>(linker_addr);
- Elf_Phdr* phdr = (Elf_Phdr*)((unsigned char*) linker_addr + elf_hdr->e_phoff);
+ ElfW(Addr) linker_addr = args.getauxval(AT_BASE);
+ ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(linker_addr);
+ ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>((unsigned char*) linker_addr + elf_hdr->e_phoff);
soinfo linker_so;
memset(&linker_so, 0, sizeof(soinfo));
@@ -2239,7 +2206,7 @@
// We have successfully fixed our own relocations. It's safe to run
// the main part of the linker now.
args.abort_message_ptr = &gAbortMessage;
- Elf_Addr start_address = __linker_init_post_relocation(args, linker_addr);
+ ElfW(Addr) start_address = __linker_init_post_relocation(args, linker_addr);
set_soinfo_pool_protection(PROT_READ);