David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 The Android Open Source Project |
| 3 | * All rights reserved. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions |
| 7 | * are met: |
| 8 | * * Redistributions of source code must retain the above copyright |
| 9 | * notice, this list of conditions and the following disclaimer. |
| 10 | * * Redistributions in binary form must reproduce the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer in |
| 12 | * the documentation and/or other materials provided with the |
| 13 | * distribution. |
| 14 | * |
| 15 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 16 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 17 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| 18 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| 19 | * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, |
| 21 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS |
| 22 | * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
| 23 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
| 24 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT |
| 25 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 26 | * SUCH DAMAGE. |
| 27 | */ |
| 28 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 29 | #include "linker_phdr.h" |
| 30 | |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 31 | #include <errno.h> |
Marcus Oakland | e365f9d | 2013-10-10 15:19:31 +0100 | [diff] [blame] | 32 | #include <machine/exec.h> |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 33 | #include <sys/mman.h> |
Torne (Richard Coles) | 183ad9d | 2014-02-27 13:18:00 +0000 | [diff] [blame] | 34 | #include <sys/types.h> |
| 35 | #include <sys/stat.h> |
| 36 | #include <unistd.h> |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 37 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 38 | #include "linker.h" |
| 39 | #include "linker_debug.h" |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 40 | |
| 41 | /** |
| 42 | TECHNICAL NOTE ON ELF LOADING. |
| 43 | |
| 44 | An ELF file's program header table contains one or more PT_LOAD |
| 45 | segments, which corresponds to portions of the file that need to |
| 46 | be mapped into the process' address space. |
| 47 | |
| 48 | Each loadable segment has the following important properties: |
| 49 | |
| 50 | p_offset -> segment file offset |
| 51 | p_filesz -> segment file size |
| 52 | p_memsz -> segment memory size (always >= p_filesz) |
| 53 | p_vaddr -> segment's virtual address |
| 54 | p_flags -> segment flags (e.g. readable, writable, executable) |
| 55 | |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 56 | We will ignore the p_paddr and p_align fields of ElfW(Phdr) for now. |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 57 | |
| 58 | The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz) |
| 59 | ranges of virtual addresses. A few rules apply: |
| 60 | |
| 61 | - the virtual address ranges should not overlap. |
| 62 | |
| 63 | - if a segment's p_filesz is smaller than its p_memsz, the extra bytes |
| 64 | between them should always be initialized to 0. |
| 65 | |
| 66 | - ranges do not necessarily start or end at page boundaries. Two distinct |
| 67 | segments can have their start and end on the same page. In this case, the |
| 68 | page inherits the mapping flags of the latter segment. |
| 69 | |
| 70 | Finally, the real load addrs of each segment is not p_vaddr. Instead the |
| 71 | loader decides where to load the first segment, then will load all others |
| 72 | relative to the first one to respect the initial range layout. |
| 73 | |
| 74 | For example, consider the following list: |
| 75 | |
| 76 | [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ], |
| 77 | [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ], |
| 78 | |
| 79 | This corresponds to two segments that cover these virtual address ranges: |
| 80 | |
| 81 | 0x30000...0x34000 |
| 82 | 0x40000...0x48000 |
| 83 | |
| 84 | If the loader decides to load the first segment at address 0xa0000000 |
| 85 | then the segments' load address ranges will be: |
| 86 | |
| 87 | 0xa0030000...0xa0034000 |
| 88 | 0xa0040000...0xa0048000 |
| 89 | |
| 90 | In other words, all segments must be loaded at an address that has the same |
| 91 | constant offset from their p_vaddr value. This offset is computed as the |
| 92 | difference between the first segment's load address, and its p_vaddr value. |
| 93 | |
| 94 | However, in practice, segments do _not_ start at page boundaries. Since we |
| 95 | can only memory-map at page boundaries, this means that the bias is |
| 96 | computed as: |
| 97 | |
| 98 | load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr) |
| 99 | |
| 100 | (NOTE: The value must be used as a 32-bit unsigned integer, to deal with |
| 101 | possible wrap around UINT32_MAX for possible large p_vaddr values). |
| 102 | |
| 103 | And that the phdr0_load_address must start at a page boundary, with |
| 104 | the segment's real content starting at: |
| 105 | |
| 106 | phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr) |
| 107 | |
| 108 | Note that ELF requires the following condition to make the mmap()-ing work: |
| 109 | |
| 110 | PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset) |
| 111 | |
| 112 | The load_bias must be added to any p_vaddr value read from the ELF file to |
| 113 | determine the corresponding memory address. |
| 114 | |
| 115 | **/ |
| 116 | |
Elliott Hughes | faf05ba | 2014-02-11 16:59:37 -0800 | [diff] [blame] | 117 | #define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0) |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 118 | #define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \ |
| 119 | MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \ |
| 120 | MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE)) |
| 121 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 122 | ElfReader::ElfReader(const char* name, int fd) |
| 123 | : name_(name), fd_(fd), |
Dmitriy Ivanov | 851135b | 2014-08-29 12:02:36 -0700 | [diff] [blame] | 124 | phdr_num_(0), phdr_mmap_(nullptr), phdr_table_(nullptr), phdr_size_(0), |
| 125 | load_start_(nullptr), load_size_(0), load_bias_(0), |
| 126 | loaded_phdr_(nullptr) { |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 127 | } |
| 128 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 129 | ElfReader::~ElfReader() { |
Dmitriy Ivanov | 851135b | 2014-08-29 12:02:36 -0700 | [diff] [blame] | 130 | if (phdr_mmap_ != nullptr) { |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 131 | munmap(phdr_mmap_, phdr_size_); |
| 132 | } |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 133 | } |
| 134 | |
Torne (Richard Coles) | 12bbb91 | 2014-02-06 14:34:21 +0000 | [diff] [blame] | 135 | bool ElfReader::Load(const android_dlextinfo* extinfo) { |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 136 | return ReadElfHeader() && |
| 137 | VerifyElfHeader() && |
| 138 | ReadProgramHeader() && |
Torne (Richard Coles) | 12bbb91 | 2014-02-06 14:34:21 +0000 | [diff] [blame] | 139 | ReserveAddressSpace(extinfo) && |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 140 | LoadSegments() && |
| 141 | FindPhdr(); |
| 142 | } |
| 143 | |
| 144 | bool ElfReader::ReadElfHeader() { |
| 145 | ssize_t rc = TEMP_FAILURE_RETRY(read(fd_, &header_, sizeof(header_))); |
| 146 | if (rc < 0) { |
| 147 | DL_ERR("can't read file \"%s\": %s", name_, strerror(errno)); |
| 148 | return false; |
| 149 | } |
| 150 | if (rc != sizeof(header_)) { |
Elliott Hughes | c620059 | 2013-09-30 18:43:46 -0700 | [diff] [blame] | 151 | DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_, |
| 152 | static_cast<size_t>(rc)); |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 153 | return false; |
| 154 | } |
| 155 | return true; |
| 156 | } |
| 157 | |
| 158 | bool ElfReader::VerifyElfHeader() { |
Elliott Hughes | 625993d | 2014-07-15 16:53:13 -0700 | [diff] [blame] | 159 | if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) { |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 160 | DL_ERR("\"%s\" has bad ELF magic", name_); |
| 161 | return false; |
| 162 | } |
| 163 | |
Elliott Hughes | c00f2cb | 2013-10-04 17:01:33 -0700 | [diff] [blame] | 164 | // Try to give a clear diagnostic for ELF class mismatches, since they're |
| 165 | // an easy mistake to make during the 32-bit/64-bit transition period. |
| 166 | int elf_class = header_.e_ident[EI_CLASS]; |
| 167 | #if defined(__LP64__) |
| 168 | if (elf_class != ELFCLASS64) { |
| 169 | if (elf_class == ELFCLASS32) { |
| 170 | DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_); |
| 171 | } else { |
| 172 | DL_ERR("\"%s\" has unknown ELF class: %d", name_, elf_class); |
| 173 | } |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 174 | return false; |
| 175 | } |
Elliott Hughes | c00f2cb | 2013-10-04 17:01:33 -0700 | [diff] [blame] | 176 | #else |
| 177 | if (elf_class != ELFCLASS32) { |
| 178 | if (elf_class == ELFCLASS64) { |
| 179 | DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_); |
| 180 | } else { |
| 181 | DL_ERR("\"%s\" has unknown ELF class: %d", name_, elf_class); |
| 182 | } |
| 183 | return false; |
| 184 | } |
| 185 | #endif |
| 186 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 187 | if (header_.e_ident[EI_DATA] != ELFDATA2LSB) { |
| 188 | DL_ERR("\"%s\" not little-endian: %d", name_, header_.e_ident[EI_DATA]); |
| 189 | return false; |
| 190 | } |
| 191 | |
| 192 | if (header_.e_type != ET_DYN) { |
| 193 | DL_ERR("\"%s\" has unexpected e_type: %d", name_, header_.e_type); |
| 194 | return false; |
| 195 | } |
| 196 | |
| 197 | if (header_.e_version != EV_CURRENT) { |
| 198 | DL_ERR("\"%s\" has unexpected e_version: %d", name_, header_.e_version); |
| 199 | return false; |
| 200 | } |
| 201 | |
Marcus Oakland | e365f9d | 2013-10-10 15:19:31 +0100 | [diff] [blame] | 202 | if (header_.e_machine != ELF_TARG_MACH) { |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 203 | DL_ERR("\"%s\" has unexpected e_machine: %d", name_, header_.e_machine); |
| 204 | return false; |
| 205 | } |
| 206 | |
| 207 | return true; |
| 208 | } |
| 209 | |
| 210 | // Loads the program header table from an ELF file into a read-only private |
| 211 | // anonymous mmap-ed block. |
| 212 | bool ElfReader::ReadProgramHeader() { |
| 213 | phdr_num_ = header_.e_phnum; |
| 214 | |
| 215 | // Like the kernel, we only accept program header tables that |
| 216 | // are smaller than 64KiB. |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 217 | if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) { |
Elliott Hughes | c620059 | 2013-09-30 18:43:46 -0700 | [diff] [blame] | 218 | DL_ERR("\"%s\" has invalid e_phnum: %zd", name_, phdr_num_); |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 219 | return false; |
| 220 | } |
| 221 | |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 222 | ElfW(Addr) page_min = PAGE_START(header_.e_phoff); |
| 223 | ElfW(Addr) page_max = PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(ElfW(Phdr)))); |
| 224 | ElfW(Addr) page_offset = PAGE_OFFSET(header_.e_phoff); |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 225 | |
| 226 | phdr_size_ = page_max - page_min; |
| 227 | |
Dmitriy Ivanov | 851135b | 2014-08-29 12:02:36 -0700 | [diff] [blame] | 228 | void* mmap_result = mmap(nullptr, phdr_size_, PROT_READ, MAP_PRIVATE, fd_, page_min); |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 229 | if (mmap_result == MAP_FAILED) { |
| 230 | DL_ERR("\"%s\" phdr mmap failed: %s", name_, strerror(errno)); |
| 231 | return false; |
| 232 | } |
| 233 | |
| 234 | phdr_mmap_ = mmap_result; |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 235 | phdr_table_ = reinterpret_cast<ElfW(Phdr)*>(reinterpret_cast<char*>(mmap_result) + page_offset); |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 236 | return true; |
| 237 | } |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 238 | |
Brian Carlstrom | e7dffe1 | 2013-01-10 16:39:58 -0800 | [diff] [blame] | 239 | /* Returns the size of the extent of all the possibly non-contiguous |
| 240 | * loadable segments in an ELF program header table. This corresponds |
| 241 | * to the page-aligned size in bytes that needs to be reserved in the |
| 242 | * process' address space. If there are no loadable segments, 0 is |
| 243 | * returned. |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 244 | * |
Dmitriy Ivanov | 851135b | 2014-08-29 12:02:36 -0700 | [diff] [blame] | 245 | * If out_min_vaddr or out_max_vaddr are not null, they will be |
Brian Carlstrom | e7dffe1 | 2013-01-10 16:39:58 -0800 | [diff] [blame] | 246 | * set to the minimum and maximum addresses of pages to be reserved, |
| 247 | * or 0 if there is nothing to load. |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 248 | */ |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 249 | size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count, |
| 250 | ElfW(Addr)* out_min_vaddr, |
| 251 | ElfW(Addr)* out_max_vaddr) { |
| 252 | ElfW(Addr) min_vaddr = UINTPTR_MAX; |
| 253 | ElfW(Addr) max_vaddr = 0; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 254 | |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 255 | bool found_pt_load = false; |
| 256 | for (size_t i = 0; i < phdr_count; ++i) { |
| 257 | const ElfW(Phdr)* phdr = &phdr_table[i]; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 258 | |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 259 | if (phdr->p_type != PT_LOAD) { |
| 260 | continue; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 261 | } |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 262 | found_pt_load = true; |
| 263 | |
| 264 | if (phdr->p_vaddr < min_vaddr) { |
| 265 | min_vaddr = phdr->p_vaddr; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 266 | } |
| 267 | |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 268 | if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) { |
| 269 | max_vaddr = phdr->p_vaddr + phdr->p_memsz; |
| 270 | } |
| 271 | } |
| 272 | if (!found_pt_load) { |
| 273 | min_vaddr = 0; |
| 274 | } |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 275 | |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 276 | min_vaddr = PAGE_START(min_vaddr); |
| 277 | max_vaddr = PAGE_END(max_vaddr); |
| 278 | |
Dmitriy Ivanov | 851135b | 2014-08-29 12:02:36 -0700 | [diff] [blame] | 279 | if (out_min_vaddr != nullptr) { |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 280 | *out_min_vaddr = min_vaddr; |
| 281 | } |
Dmitriy Ivanov | 851135b | 2014-08-29 12:02:36 -0700 | [diff] [blame] | 282 | if (out_max_vaddr != nullptr) { |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 283 | *out_max_vaddr = max_vaddr; |
| 284 | } |
| 285 | return max_vaddr - min_vaddr; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 286 | } |
| 287 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 288 | // Reserve a virtual address range big enough to hold all loadable |
| 289 | // segments of a program header table. This is done by creating a |
| 290 | // private anonymous mmap() with PROT_NONE. |
Torne (Richard Coles) | 12bbb91 | 2014-02-06 14:34:21 +0000 | [diff] [blame] | 291 | bool ElfReader::ReserveAddressSpace(const android_dlextinfo* extinfo) { |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 292 | ElfW(Addr) min_vaddr; |
Brian Carlstrom | e7dffe1 | 2013-01-10 16:39:58 -0800 | [diff] [blame] | 293 | load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr); |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 294 | if (load_size_ == 0) { |
| 295 | DL_ERR("\"%s\" has no loadable segments", name_); |
| 296 | return false; |
| 297 | } |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 298 | |
Brian Carlstrom | e7dffe1 | 2013-01-10 16:39:58 -0800 | [diff] [blame] | 299 | uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr); |
Torne (Richard Coles) | 12bbb91 | 2014-02-06 14:34:21 +0000 | [diff] [blame] | 300 | void* start; |
| 301 | size_t reserved_size = 0; |
| 302 | bool reserved_hint = true; |
| 303 | |
Dmitriy Ivanov | 851135b | 2014-08-29 12:02:36 -0700 | [diff] [blame] | 304 | if (extinfo != nullptr) { |
Torne (Richard Coles) | 12bbb91 | 2014-02-06 14:34:21 +0000 | [diff] [blame] | 305 | if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS) { |
| 306 | reserved_size = extinfo->reserved_size; |
| 307 | reserved_hint = false; |
| 308 | } else if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS_HINT) { |
| 309 | reserved_size = extinfo->reserved_size; |
| 310 | } |
| 311 | } |
| 312 | |
| 313 | if (load_size_ > reserved_size) { |
| 314 | if (!reserved_hint) { |
| 315 | DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"", |
| 316 | reserved_size - load_size_, load_size_, name_); |
| 317 | return false; |
| 318 | } |
| 319 | int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS; |
| 320 | start = mmap(addr, load_size_, PROT_NONE, mmap_flags, -1, 0); |
| 321 | if (start == MAP_FAILED) { |
| 322 | DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_); |
| 323 | return false; |
| 324 | } |
| 325 | } else { |
| 326 | start = extinfo->reserved_addr; |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 327 | } |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 328 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 329 | load_start_ = start; |
Brian Carlstrom | e7dffe1 | 2013-01-10 16:39:58 -0800 | [diff] [blame] | 330 | load_bias_ = reinterpret_cast<uint8_t*>(start) - addr; |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 331 | return true; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 332 | } |
| 333 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 334 | bool ElfReader::LoadSegments() { |
| 335 | for (size_t i = 0; i < phdr_num_; ++i) { |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 336 | const ElfW(Phdr)* phdr = &phdr_table_[i]; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 337 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 338 | if (phdr->p_type != PT_LOAD) { |
| 339 | continue; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 340 | } |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 341 | |
| 342 | // Segment addresses in memory. |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 343 | ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_; |
| 344 | ElfW(Addr) seg_end = seg_start + phdr->p_memsz; |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 345 | |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 346 | ElfW(Addr) seg_page_start = PAGE_START(seg_start); |
| 347 | ElfW(Addr) seg_page_end = PAGE_END(seg_end); |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 348 | |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 349 | ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz; |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 350 | |
| 351 | // File offsets. |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 352 | ElfW(Addr) file_start = phdr->p_offset; |
| 353 | ElfW(Addr) file_end = file_start + phdr->p_filesz; |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 354 | |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 355 | ElfW(Addr) file_page_start = PAGE_START(file_start); |
| 356 | ElfW(Addr) file_length = file_end - file_page_start; |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 357 | |
Brian Carlstrom | 82dcc79 | 2013-05-21 16:49:24 -0700 | [diff] [blame] | 358 | if (file_length != 0) { |
Elliott Hughes | faf05ba | 2014-02-11 16:59:37 -0800 | [diff] [blame] | 359 | void* seg_addr = mmap(reinterpret_cast<void*>(seg_page_start), |
Brian Carlstrom | 82dcc79 | 2013-05-21 16:49:24 -0700 | [diff] [blame] | 360 | file_length, |
| 361 | PFLAGS_TO_PROT(phdr->p_flags), |
| 362 | MAP_FIXED|MAP_PRIVATE, |
| 363 | fd_, |
| 364 | file_page_start); |
| 365 | if (seg_addr == MAP_FAILED) { |
Elliott Hughes | c620059 | 2013-09-30 18:43:46 -0700 | [diff] [blame] | 366 | DL_ERR("couldn't map \"%s\" segment %zd: %s", name_, i, strerror(errno)); |
Brian Carlstrom | 82dcc79 | 2013-05-21 16:49:24 -0700 | [diff] [blame] | 367 | return false; |
| 368 | } |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 369 | } |
| 370 | |
| 371 | // if the segment is writable, and does not end on a page boundary, |
| 372 | // zero-fill it until the page limit. |
| 373 | if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) { |
Elliott Hughes | faf05ba | 2014-02-11 16:59:37 -0800 | [diff] [blame] | 374 | memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end)); |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 375 | } |
| 376 | |
| 377 | seg_file_end = PAGE_END(seg_file_end); |
| 378 | |
| 379 | // seg_file_end is now the first page address after the file |
| 380 | // content. If seg_end is larger, we need to zero anything |
| 381 | // between them. This is done by using a private anonymous |
| 382 | // map for all extra pages. |
| 383 | if (seg_page_end > seg_file_end) { |
Elliott Hughes | faf05ba | 2014-02-11 16:59:37 -0800 | [diff] [blame] | 384 | void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end), |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 385 | seg_page_end - seg_file_end, |
| 386 | PFLAGS_TO_PROT(phdr->p_flags), |
| 387 | MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE, |
| 388 | -1, |
| 389 | 0); |
| 390 | if (zeromap == MAP_FAILED) { |
| 391 | DL_ERR("couldn't zero fill \"%s\" gap: %s", name_, strerror(errno)); |
| 392 | return false; |
| 393 | } |
| 394 | } |
| 395 | } |
| 396 | return true; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 397 | } |
| 398 | |
Elliott Hughes | 105bc26 | 2012-08-15 16:56:00 -0700 | [diff] [blame] | 399 | /* Used internally. Used to set the protection bits of all loaded segments |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 400 | * with optional extra flags (i.e. really PROT_WRITE). Used by |
| 401 | * phdr_table_protect_segments and phdr_table_unprotect_segments. |
| 402 | */ |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 403 | static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count, |
| 404 | ElfW(Addr) load_bias, int extra_prot_flags) { |
| 405 | const ElfW(Phdr)* phdr = phdr_table; |
| 406 | const ElfW(Phdr)* phdr_limit = phdr + phdr_count; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 407 | |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 408 | for (; phdr < phdr_limit; phdr++) { |
| 409 | if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) { |
| 410 | continue; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 411 | } |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 412 | |
| 413 | ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias; |
| 414 | ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias; |
| 415 | |
Elliott Hughes | faf05ba | 2014-02-11 16:59:37 -0800 | [diff] [blame] | 416 | int ret = mprotect(reinterpret_cast<void*>(seg_page_start), |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 417 | seg_page_end - seg_page_start, |
| 418 | PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags); |
| 419 | if (ret < 0) { |
| 420 | return -1; |
| 421 | } |
| 422 | } |
| 423 | return 0; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 424 | } |
| 425 | |
| 426 | /* Restore the original protection modes for all loadable segments. |
| 427 | * You should only call this after phdr_table_unprotect_segments and |
| 428 | * applying all relocations. |
| 429 | * |
| 430 | * Input: |
| 431 | * phdr_table -> program header table |
Elliott Hughes | 105bc26 | 2012-08-15 16:56:00 -0700 | [diff] [blame] | 432 | * phdr_count -> number of entries in tables |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 433 | * load_bias -> load bias |
| 434 | * Return: |
| 435 | * 0 on error, -1 on failure (error code in errno). |
| 436 | */ |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 437 | int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias) { |
| 438 | return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0); |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 439 | } |
| 440 | |
| 441 | /* Change the protection of all loaded segments in memory to writable. |
| 442 | * This is useful before performing relocations. Once completed, you |
| 443 | * will have to call phdr_table_protect_segments to restore the original |
| 444 | * protection flags on all segments. |
| 445 | * |
| 446 | * Note that some writable segments can also have their content turned |
| 447 | * to read-only by calling phdr_table_protect_gnu_relro. This is no |
| 448 | * performed here. |
| 449 | * |
| 450 | * Input: |
| 451 | * phdr_table -> program header table |
Elliott Hughes | 105bc26 | 2012-08-15 16:56:00 -0700 | [diff] [blame] | 452 | * phdr_count -> number of entries in tables |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 453 | * load_bias -> load bias |
| 454 | * Return: |
| 455 | * 0 on error, -1 on failure (error code in errno). |
| 456 | */ |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 457 | int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias) { |
| 458 | return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE); |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 459 | } |
| 460 | |
| 461 | /* Used internally by phdr_table_protect_gnu_relro and |
| 462 | * phdr_table_unprotect_gnu_relro. |
| 463 | */ |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 464 | static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count, |
| 465 | ElfW(Addr) load_bias, int prot_flags) { |
| 466 | const ElfW(Phdr)* phdr = phdr_table; |
| 467 | const ElfW(Phdr)* phdr_limit = phdr + phdr_count; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 468 | |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 469 | for (phdr = phdr_table; phdr < phdr_limit; phdr++) { |
| 470 | if (phdr->p_type != PT_GNU_RELRO) { |
| 471 | continue; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 472 | } |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 473 | |
| 474 | // Tricky: what happens when the relro segment does not start |
| 475 | // or end at page boundaries? We're going to be over-protective |
| 476 | // here and put every page touched by the segment as read-only. |
| 477 | |
| 478 | // This seems to match Ian Lance Taylor's description of the |
| 479 | // feature at http://www.airs.com/blog/archives/189. |
| 480 | |
| 481 | // Extract: |
| 482 | // Note that the current dynamic linker code will only work |
| 483 | // correctly if the PT_GNU_RELRO segment starts on a page |
| 484 | // boundary. This is because the dynamic linker rounds the |
| 485 | // p_vaddr field down to the previous page boundary. If |
| 486 | // there is anything on the page which should not be read-only, |
| 487 | // the program is likely to fail at runtime. So in effect the |
| 488 | // linker must only emit a PT_GNU_RELRO segment if it ensures |
| 489 | // that it starts on a page boundary. |
| 490 | ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias; |
| 491 | ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias; |
| 492 | |
Elliott Hughes | faf05ba | 2014-02-11 16:59:37 -0800 | [diff] [blame] | 493 | int ret = mprotect(reinterpret_cast<void*>(seg_page_start), |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 494 | seg_page_end - seg_page_start, |
| 495 | prot_flags); |
| 496 | if (ret < 0) { |
| 497 | return -1; |
| 498 | } |
| 499 | } |
| 500 | return 0; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 501 | } |
| 502 | |
| 503 | /* Apply GNU relro protection if specified by the program header. This will |
| 504 | * turn some of the pages of a writable PT_LOAD segment to read-only, as |
| 505 | * specified by one or more PT_GNU_RELRO segments. This must be always |
| 506 | * performed after relocations. |
| 507 | * |
Ard Biesheuvel | 12c78bb | 2012-08-14 12:30:09 +0200 | [diff] [blame] | 508 | * The areas typically covered are .got and .data.rel.ro, these are |
| 509 | * read-only from the program's POV, but contain absolute addresses |
| 510 | * that need to be relocated before use. |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 511 | * |
| 512 | * Input: |
| 513 | * phdr_table -> program header table |
Elliott Hughes | 105bc26 | 2012-08-15 16:56:00 -0700 | [diff] [blame] | 514 | * phdr_count -> number of entries in tables |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 515 | * load_bias -> load bias |
| 516 | * Return: |
| 517 | * 0 on error, -1 on failure (error code in errno). |
| 518 | */ |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 519 | int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias) { |
| 520 | return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ); |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 521 | } |
| 522 | |
Torne (Richard Coles) | 183ad9d | 2014-02-27 13:18:00 +0000 | [diff] [blame] | 523 | /* Serialize the GNU relro segments to the given file descriptor. This can be |
| 524 | * performed after relocations to allow another process to later share the |
| 525 | * relocated segment, if it was loaded at the same address. |
| 526 | * |
| 527 | * Input: |
| 528 | * phdr_table -> program header table |
| 529 | * phdr_count -> number of entries in tables |
| 530 | * load_bias -> load bias |
| 531 | * fd -> writable file descriptor to use |
| 532 | * Return: |
| 533 | * 0 on error, -1 on failure (error code in errno). |
| 534 | */ |
| 535 | int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias, |
| 536 | int fd) { |
| 537 | const ElfW(Phdr)* phdr = phdr_table; |
| 538 | const ElfW(Phdr)* phdr_limit = phdr + phdr_count; |
| 539 | ssize_t file_offset = 0; |
| 540 | |
| 541 | for (phdr = phdr_table; phdr < phdr_limit; phdr++) { |
| 542 | if (phdr->p_type != PT_GNU_RELRO) { |
| 543 | continue; |
| 544 | } |
| 545 | |
| 546 | ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias; |
| 547 | ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias; |
| 548 | ssize_t size = seg_page_end - seg_page_start; |
| 549 | |
| 550 | ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size)); |
| 551 | if (written != size) { |
| 552 | return -1; |
| 553 | } |
| 554 | void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ, |
| 555 | MAP_PRIVATE|MAP_FIXED, fd, file_offset); |
| 556 | if (map == MAP_FAILED) { |
| 557 | return -1; |
| 558 | } |
| 559 | file_offset += size; |
| 560 | } |
| 561 | return 0; |
| 562 | } |
| 563 | |
| 564 | /* Where possible, replace the GNU relro segments with mappings of the given |
| 565 | * file descriptor. This can be performed after relocations to allow a file |
| 566 | * previously created by phdr_table_serialize_gnu_relro in another process to |
| 567 | * replace the dirty relocated pages, saving memory, if it was loaded at the |
| 568 | * same address. We have to compare the data before we map over it, since some |
| 569 | * parts of the relro segment may not be identical due to other libraries in |
| 570 | * the process being loaded at different addresses. |
| 571 | * |
| 572 | * Input: |
| 573 | * phdr_table -> program header table |
| 574 | * phdr_count -> number of entries in tables |
| 575 | * load_bias -> load bias |
| 576 | * fd -> readable file descriptor to use |
| 577 | * Return: |
| 578 | * 0 on error, -1 on failure (error code in errno). |
| 579 | */ |
| 580 | int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias, |
| 581 | int fd) { |
| 582 | // Map the file at a temporary location so we can compare its contents. |
| 583 | struct stat file_stat; |
| 584 | if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) { |
| 585 | return -1; |
| 586 | } |
| 587 | off_t file_size = file_stat.st_size; |
Dmitriy Ivanov | 851135b | 2014-08-29 12:02:36 -0700 | [diff] [blame] | 588 | void* temp_mapping = nullptr; |
Torne (Richard Coles) | 26ec967 | 2014-04-30 15:48:40 +0100 | [diff] [blame] | 589 | if (file_size > 0) { |
Dmitriy Ivanov | 851135b | 2014-08-29 12:02:36 -0700 | [diff] [blame] | 590 | temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0); |
Torne (Richard Coles) | 26ec967 | 2014-04-30 15:48:40 +0100 | [diff] [blame] | 591 | if (temp_mapping == MAP_FAILED) { |
| 592 | return -1; |
| 593 | } |
Torne (Richard Coles) | 183ad9d | 2014-02-27 13:18:00 +0000 | [diff] [blame] | 594 | } |
| 595 | size_t file_offset = 0; |
| 596 | |
| 597 | // Iterate over the relro segments and compare/remap the pages. |
| 598 | const ElfW(Phdr)* phdr = phdr_table; |
| 599 | const ElfW(Phdr)* phdr_limit = phdr + phdr_count; |
| 600 | |
| 601 | for (phdr = phdr_table; phdr < phdr_limit; phdr++) { |
| 602 | if (phdr->p_type != PT_GNU_RELRO) { |
| 603 | continue; |
| 604 | } |
| 605 | |
| 606 | ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias; |
| 607 | ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias; |
| 608 | |
| 609 | char* file_base = static_cast<char*>(temp_mapping) + file_offset; |
| 610 | char* mem_base = reinterpret_cast<char*>(seg_page_start); |
| 611 | size_t match_offset = 0; |
| 612 | size_t size = seg_page_end - seg_page_start; |
| 613 | |
Torne (Richard Coles) | 26ec967 | 2014-04-30 15:48:40 +0100 | [diff] [blame] | 614 | if (file_size - file_offset < size) { |
| 615 | // File is too short to compare to this segment. The contents are likely |
| 616 | // different as well (it's probably for a different library version) so |
| 617 | // just don't bother checking. |
| 618 | break; |
| 619 | } |
| 620 | |
Torne (Richard Coles) | 183ad9d | 2014-02-27 13:18:00 +0000 | [diff] [blame] | 621 | while (match_offset < size) { |
| 622 | // Skip over dissimilar pages. |
| 623 | while (match_offset < size && |
| 624 | memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) { |
| 625 | match_offset += PAGE_SIZE; |
| 626 | } |
| 627 | |
| 628 | // Count similar pages. |
| 629 | size_t mismatch_offset = match_offset; |
| 630 | while (mismatch_offset < size && |
| 631 | memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) { |
| 632 | mismatch_offset += PAGE_SIZE; |
| 633 | } |
| 634 | |
| 635 | // Map over similar pages. |
| 636 | if (mismatch_offset > match_offset) { |
| 637 | void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset, |
| 638 | PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, match_offset); |
| 639 | if (map == MAP_FAILED) { |
| 640 | munmap(temp_mapping, file_size); |
| 641 | return -1; |
| 642 | } |
| 643 | } |
| 644 | |
| 645 | match_offset = mismatch_offset; |
| 646 | } |
| 647 | |
| 648 | // Add to the base file offset in case there are multiple relro segments. |
| 649 | file_offset += size; |
| 650 | } |
| 651 | munmap(temp_mapping, file_size); |
| 652 | return 0; |
| 653 | } |
| 654 | |
| 655 | |
Elliott Hughes | 4eeb1f1 | 2013-10-25 17:38:02 -0700 | [diff] [blame] | 656 | #if defined(__arm__) |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 657 | |
| 658 | # ifndef PT_ARM_EXIDX |
| 659 | # define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */ |
| 660 | # endif |
| 661 | |
| 662 | /* Return the address and size of the .ARM.exidx section in memory, |
| 663 | * if present. |
| 664 | * |
| 665 | * Input: |
| 666 | * phdr_table -> program header table |
Elliott Hughes | 105bc26 | 2012-08-15 16:56:00 -0700 | [diff] [blame] | 667 | * phdr_count -> number of entries in tables |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 668 | * load_bias -> load bias |
| 669 | * Output: |
Dmitriy Ivanov | 851135b | 2014-08-29 12:02:36 -0700 | [diff] [blame] | 670 | * arm_exidx -> address of table in memory (null on failure). |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 671 | * arm_exidx_count -> number of items in table (0 on failure). |
| 672 | * Return: |
| 673 | * 0 on error, -1 on failure (_no_ error code in errno) |
| 674 | */ |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 675 | int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count, |
| 676 | ElfW(Addr) load_bias, |
| 677 | ElfW(Addr)** arm_exidx, unsigned* arm_exidx_count) { |
| 678 | const ElfW(Phdr)* phdr = phdr_table; |
| 679 | const ElfW(Phdr)* phdr_limit = phdr + phdr_count; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 680 | |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 681 | for (phdr = phdr_table; phdr < phdr_limit; phdr++) { |
| 682 | if (phdr->p_type != PT_ARM_EXIDX) { |
| 683 | continue; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 684 | } |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 685 | |
| 686 | *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr); |
| 687 | *arm_exidx_count = (unsigned)(phdr->p_memsz / 8); |
| 688 | return 0; |
| 689 | } |
Dmitriy Ivanov | 851135b | 2014-08-29 12:02:36 -0700 | [diff] [blame] | 690 | *arm_exidx = nullptr; |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 691 | *arm_exidx_count = 0; |
| 692 | return -1; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 693 | } |
Elliott Hughes | 4eeb1f1 | 2013-10-25 17:38:02 -0700 | [diff] [blame] | 694 | #endif |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 695 | |
Ard Biesheuvel | 12c78bb | 2012-08-14 12:30:09 +0200 | [diff] [blame] | 696 | /* Return the address and size of the ELF file's .dynamic section in memory, |
Dmitriy Ivanov | 851135b | 2014-08-29 12:02:36 -0700 | [diff] [blame] | 697 | * or null if missing. |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 698 | * |
| 699 | * Input: |
| 700 | * phdr_table -> program header table |
Elliott Hughes | 105bc26 | 2012-08-15 16:56:00 -0700 | [diff] [blame] | 701 | * phdr_count -> number of entries in tables |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 702 | * load_bias -> load bias |
Ard Biesheuvel | 12c78bb | 2012-08-14 12:30:09 +0200 | [diff] [blame] | 703 | * Output: |
Dmitriy Ivanov | 851135b | 2014-08-29 12:02:36 -0700 | [diff] [blame] | 704 | * dynamic -> address of table in memory (null on failure). |
Dmitriy Ivanov | 498eb18 | 2014-09-05 14:57:59 -0700 | [diff] [blame^] | 705 | * dynamic_count -> number of items in table (0 on failure). |
| 706 | * dynamic_flags -> protection flags for section (unset on failure) |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 707 | * Return: |
Ard Biesheuvel | 12c78bb | 2012-08-14 12:30:09 +0200 | [diff] [blame] | 708 | * void |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 709 | */ |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 710 | void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count, |
Dmitriy Ivanov | 498eb18 | 2014-09-05 14:57:59 -0700 | [diff] [blame^] | 711 | ElfW(Addr) load_bias, |
| 712 | ElfW(Dyn)** dynamic, size_t* dynamic_count, ElfW(Word)* dynamic_flags) { |
| 713 | const ElfW(Phdr)* phdr = phdr_table; |
| 714 | const ElfW(Phdr)* phdr_limit = phdr + phdr_count; |
| 715 | |
| 716 | for (phdr = phdr_table; phdr < phdr_limit; phdr++) { |
| 717 | if (phdr->p_type != PT_DYNAMIC) { |
| 718 | continue; |
Dmitriy Ivanov | a3ad450 | 2014-07-29 14:21:45 -0700 | [diff] [blame] | 719 | } |
Dmitriy Ivanov | 498eb18 | 2014-09-05 14:57:59 -0700 | [diff] [blame^] | 720 | |
| 721 | *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr->p_vaddr); |
| 722 | if (dynamic_count) { |
| 723 | *dynamic_count = (unsigned)(phdr->p_memsz / 8); |
| 724 | } |
| 725 | if (dynamic_flags) { |
| 726 | *dynamic_flags = phdr->p_flags; |
| 727 | } |
| 728 | return; |
| 729 | } |
| 730 | *dynamic = nullptr; |
| 731 | if (dynamic_count) { |
| 732 | *dynamic_count = 0; |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 733 | } |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 734 | } |
| 735 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 736 | // Returns the address of the program header table as it appears in the loaded |
| 737 | // segments in memory. This is in contrast with 'phdr_table_' which |
| 738 | // is temporary and will be released before the library is relocated. |
| 739 | bool ElfReader::FindPhdr() { |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 740 | const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 741 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 742 | // If there is a PT_PHDR, use it directly. |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 743 | for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) { |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 744 | if (phdr->p_type == PT_PHDR) { |
| 745 | return CheckPhdr(load_bias_ + phdr->p_vaddr); |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 746 | } |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 747 | } |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 748 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 749 | // Otherwise, check the first loadable segment. If its file offset |
| 750 | // is 0, it starts with the ELF header, and we can trivially find the |
| 751 | // loaded program header from it. |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 752 | for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) { |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 753 | if (phdr->p_type == PT_LOAD) { |
| 754 | if (phdr->p_offset == 0) { |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 755 | ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr; |
Elliott Hughes | faf05ba | 2014-02-11 16:59:37 -0800 | [diff] [blame] | 756 | const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr); |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 757 | ElfW(Addr) offset = ehdr->e_phoff; |
| 758 | return CheckPhdr((ElfW(Addr))ehdr + offset); |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 759 | } |
| 760 | break; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 761 | } |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 762 | } |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 763 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 764 | DL_ERR("can't find loaded phdr for \"%s\"", name_); |
| 765 | return false; |
| 766 | } |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 767 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 768 | // Ensures that our program header is actually within a loadable |
| 769 | // segment. This should help catch badly-formed ELF files that |
| 770 | // would cause the linker to crash later when trying to access it. |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 771 | bool ElfReader::CheckPhdr(ElfW(Addr) loaded) { |
| 772 | const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_; |
| 773 | ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr))); |
| 774 | for (ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) { |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 775 | if (phdr->p_type != PT_LOAD) { |
| 776 | continue; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 777 | } |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 778 | ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_; |
| 779 | ElfW(Addr) seg_end = phdr->p_filesz + seg_start; |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 780 | if (seg_start <= loaded && loaded_end <= seg_end) { |
Elliott Hughes | 0266ae5 | 2014-02-10 17:46:57 -0800 | [diff] [blame] | 781 | loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded); |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 782 | return true; |
| 783 | } |
| 784 | } |
Elliott Hughes | c00f2cb | 2013-10-04 17:01:33 -0700 | [diff] [blame] | 785 | DL_ERR("\"%s\" loaded phdr %p not in loadable segment", name_, reinterpret_cast<void*>(loaded)); |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 786 | return false; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 787 | } |