David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 The Android Open Source Project |
| 3 | * All rights reserved. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions |
| 7 | * are met: |
| 8 | * * Redistributions of source code must retain the above copyright |
| 9 | * notice, this list of conditions and the following disclaimer. |
| 10 | * * Redistributions in binary form must reproduce the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer in |
| 12 | * the documentation and/or other materials provided with the |
| 13 | * distribution. |
| 14 | * |
| 15 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 16 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 17 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| 18 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| 19 | * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, |
| 21 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS |
| 22 | * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
| 23 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
| 24 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT |
| 25 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 26 | * SUCH DAMAGE. |
| 27 | */ |
| 28 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 29 | #include "linker_phdr.h" |
| 30 | |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 31 | #include <errno.h> |
| 32 | #include <sys/mman.h> |
| 33 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 34 | #include "linker.h" |
| 35 | #include "linker_debug.h" |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 36 | |
| 37 | /** |
| 38 | TECHNICAL NOTE ON ELF LOADING. |
| 39 | |
| 40 | An ELF file's program header table contains one or more PT_LOAD |
| 41 | segments, which corresponds to portions of the file that need to |
| 42 | be mapped into the process' address space. |
| 43 | |
| 44 | Each loadable segment has the following important properties: |
| 45 | |
| 46 | p_offset -> segment file offset |
| 47 | p_filesz -> segment file size |
| 48 | p_memsz -> segment memory size (always >= p_filesz) |
| 49 | p_vaddr -> segment's virtual address |
| 50 | p_flags -> segment flags (e.g. readable, writable, executable) |
| 51 | |
| 52 | We will ignore the p_paddr and p_align fields of Elf32_Phdr for now. |
| 53 | |
| 54 | The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz) |
| 55 | ranges of virtual addresses. A few rules apply: |
| 56 | |
| 57 | - the virtual address ranges should not overlap. |
| 58 | |
| 59 | - if a segment's p_filesz is smaller than its p_memsz, the extra bytes |
| 60 | between them should always be initialized to 0. |
| 61 | |
| 62 | - ranges do not necessarily start or end at page boundaries. Two distinct |
| 63 | segments can have their start and end on the same page. In this case, the |
| 64 | page inherits the mapping flags of the latter segment. |
| 65 | |
| 66 | Finally, the real load addrs of each segment is not p_vaddr. Instead the |
| 67 | loader decides where to load the first segment, then will load all others |
| 68 | relative to the first one to respect the initial range layout. |
| 69 | |
| 70 | For example, consider the following list: |
| 71 | |
| 72 | [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ], |
| 73 | [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ], |
| 74 | |
| 75 | This corresponds to two segments that cover these virtual address ranges: |
| 76 | |
| 77 | 0x30000...0x34000 |
| 78 | 0x40000...0x48000 |
| 79 | |
| 80 | If the loader decides to load the first segment at address 0xa0000000 |
| 81 | then the segments' load address ranges will be: |
| 82 | |
| 83 | 0xa0030000...0xa0034000 |
| 84 | 0xa0040000...0xa0048000 |
| 85 | |
| 86 | In other words, all segments must be loaded at an address that has the same |
| 87 | constant offset from their p_vaddr value. This offset is computed as the |
| 88 | difference between the first segment's load address, and its p_vaddr value. |
| 89 | |
| 90 | However, in practice, segments do _not_ start at page boundaries. Since we |
| 91 | can only memory-map at page boundaries, this means that the bias is |
| 92 | computed as: |
| 93 | |
| 94 | load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr) |
| 95 | |
| 96 | (NOTE: The value must be used as a 32-bit unsigned integer, to deal with |
| 97 | possible wrap around UINT32_MAX for possible large p_vaddr values). |
| 98 | |
| 99 | And that the phdr0_load_address must start at a page boundary, with |
| 100 | the segment's real content starting at: |
| 101 | |
| 102 | phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr) |
| 103 | |
| 104 | Note that ELF requires the following condition to make the mmap()-ing work: |
| 105 | |
| 106 | PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset) |
| 107 | |
| 108 | The load_bias must be added to any p_vaddr value read from the ELF file to |
| 109 | determine the corresponding memory address. |
| 110 | |
| 111 | **/ |
| 112 | |
| 113 | #define MAYBE_MAP_FLAG(x,from,to) (((x) & (from)) ? (to) : 0) |
| 114 | #define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \ |
| 115 | MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \ |
| 116 | MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE)) |
| 117 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 118 | ElfReader::ElfReader(const char* name, int fd) |
| 119 | : name_(name), fd_(fd), |
| 120 | phdr_num_(0), phdr_mmap_(NULL), phdr_table_(NULL), phdr_size_(0), |
| 121 | load_start_(NULL), load_size_(0), load_bias_(0), |
| 122 | loaded_phdr_(NULL) { |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 123 | } |
| 124 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 125 | ElfReader::~ElfReader() { |
| 126 | if (fd_ != -1) { |
| 127 | close(fd_); |
| 128 | } |
| 129 | if (phdr_mmap_ != NULL) { |
| 130 | munmap(phdr_mmap_, phdr_size_); |
| 131 | } |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 132 | } |
| 133 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 134 | bool ElfReader::Load() { |
| 135 | return ReadElfHeader() && |
| 136 | VerifyElfHeader() && |
| 137 | ReadProgramHeader() && |
| 138 | ReserveAddressSpace() && |
| 139 | LoadSegments() && |
| 140 | FindPhdr(); |
| 141 | } |
| 142 | |
| 143 | bool ElfReader::ReadElfHeader() { |
| 144 | ssize_t rc = TEMP_FAILURE_RETRY(read(fd_, &header_, sizeof(header_))); |
| 145 | if (rc < 0) { |
| 146 | DL_ERR("can't read file \"%s\": %s", name_, strerror(errno)); |
| 147 | return false; |
| 148 | } |
| 149 | if (rc != sizeof(header_)) { |
Brian Carlstrom | 0e79b91 | 2013-08-14 01:04:19 -0700 | [diff] [blame^] | 150 | DL_ERR("\"%s\" is too small to be an ELF executable. Expected at least %d bytes, only found %d bytes.", |
| 151 | name_, sizeof(header_), rc); |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 152 | return false; |
| 153 | } |
| 154 | return true; |
| 155 | } |
| 156 | |
| 157 | bool ElfReader::VerifyElfHeader() { |
| 158 | if (header_.e_ident[EI_MAG0] != ELFMAG0 || |
| 159 | header_.e_ident[EI_MAG1] != ELFMAG1 || |
| 160 | header_.e_ident[EI_MAG2] != ELFMAG2 || |
| 161 | header_.e_ident[EI_MAG3] != ELFMAG3) { |
| 162 | DL_ERR("\"%s\" has bad ELF magic", name_); |
| 163 | return false; |
| 164 | } |
| 165 | |
| 166 | if (header_.e_ident[EI_CLASS] != ELFCLASS32) { |
| 167 | DL_ERR("\"%s\" not 32-bit: %d", name_, header_.e_ident[EI_CLASS]); |
| 168 | return false; |
| 169 | } |
| 170 | if (header_.e_ident[EI_DATA] != ELFDATA2LSB) { |
| 171 | DL_ERR("\"%s\" not little-endian: %d", name_, header_.e_ident[EI_DATA]); |
| 172 | return false; |
| 173 | } |
| 174 | |
| 175 | if (header_.e_type != ET_DYN) { |
| 176 | DL_ERR("\"%s\" has unexpected e_type: %d", name_, header_.e_type); |
| 177 | return false; |
| 178 | } |
| 179 | |
| 180 | if (header_.e_version != EV_CURRENT) { |
| 181 | DL_ERR("\"%s\" has unexpected e_version: %d", name_, header_.e_version); |
| 182 | return false; |
| 183 | } |
| 184 | |
| 185 | if (header_.e_machine != |
| 186 | #ifdef ANDROID_ARM_LINKER |
| 187 | EM_ARM |
| 188 | #elif defined(ANDROID_MIPS_LINKER) |
| 189 | EM_MIPS |
| 190 | #elif defined(ANDROID_X86_LINKER) |
| 191 | EM_386 |
| 192 | #endif |
| 193 | ) { |
| 194 | DL_ERR("\"%s\" has unexpected e_machine: %d", name_, header_.e_machine); |
| 195 | return false; |
| 196 | } |
| 197 | |
| 198 | return true; |
| 199 | } |
| 200 | |
| 201 | // Loads the program header table from an ELF file into a read-only private |
| 202 | // anonymous mmap-ed block. |
| 203 | bool ElfReader::ReadProgramHeader() { |
| 204 | phdr_num_ = header_.e_phnum; |
| 205 | |
| 206 | // Like the kernel, we only accept program header tables that |
| 207 | // are smaller than 64KiB. |
| 208 | if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(Elf32_Phdr)) { |
| 209 | DL_ERR("\"%s\" has invalid e_phnum: %d", name_, phdr_num_); |
| 210 | return false; |
| 211 | } |
| 212 | |
| 213 | Elf32_Addr page_min = PAGE_START(header_.e_phoff); |
| 214 | Elf32_Addr page_max = PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(Elf32_Phdr))); |
| 215 | Elf32_Addr page_offset = PAGE_OFFSET(header_.e_phoff); |
| 216 | |
| 217 | phdr_size_ = page_max - page_min; |
| 218 | |
| 219 | void* mmap_result = mmap(NULL, phdr_size_, PROT_READ, MAP_PRIVATE, fd_, page_min); |
| 220 | if (mmap_result == MAP_FAILED) { |
| 221 | DL_ERR("\"%s\" phdr mmap failed: %s", name_, strerror(errno)); |
| 222 | return false; |
| 223 | } |
| 224 | |
| 225 | phdr_mmap_ = mmap_result; |
| 226 | phdr_table_ = reinterpret_cast<Elf32_Phdr*>(reinterpret_cast<char*>(mmap_result) + page_offset); |
| 227 | return true; |
| 228 | } |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 229 | |
Brian Carlstrom | e7dffe1 | 2013-01-10 16:39:58 -0800 | [diff] [blame] | 230 | /* Returns the size of the extent of all the possibly non-contiguous |
| 231 | * loadable segments in an ELF program header table. This corresponds |
| 232 | * to the page-aligned size in bytes that needs to be reserved in the |
| 233 | * process' address space. If there are no loadable segments, 0 is |
| 234 | * returned. |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 235 | * |
Brian Carlstrom | e7dffe1 | 2013-01-10 16:39:58 -0800 | [diff] [blame] | 236 | * If out_min_vaddr or out_max_vaddr are non-NULL, they will be |
| 237 | * set to the minimum and maximum addresses of pages to be reserved, |
| 238 | * or 0 if there is nothing to load. |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 239 | */ |
Brian Carlstrom | e7dffe1 | 2013-01-10 16:39:58 -0800 | [diff] [blame] | 240 | size_t phdr_table_get_load_size(const Elf32_Phdr* phdr_table, |
| 241 | size_t phdr_count, |
| 242 | Elf32_Addr* out_min_vaddr, |
| 243 | Elf32_Addr* out_max_vaddr) |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 244 | { |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 245 | Elf32_Addr min_vaddr = 0xFFFFFFFFU; |
| 246 | Elf32_Addr max_vaddr = 0x00000000U; |
| 247 | |
Brian Carlstrom | e7dffe1 | 2013-01-10 16:39:58 -0800 | [diff] [blame] | 248 | bool found_pt_load = false; |
Elliott Hughes | 4688279 | 2012-08-03 16:49:39 -0700 | [diff] [blame] | 249 | for (size_t i = 0; i < phdr_count; ++i) { |
| 250 | const Elf32_Phdr* phdr = &phdr_table[i]; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 251 | |
Elliott Hughes | 4688279 | 2012-08-03 16:49:39 -0700 | [diff] [blame] | 252 | if (phdr->p_type != PT_LOAD) { |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 253 | continue; |
Elliott Hughes | 4688279 | 2012-08-03 16:49:39 -0700 | [diff] [blame] | 254 | } |
Brian Carlstrom | e7dffe1 | 2013-01-10 16:39:58 -0800 | [diff] [blame] | 255 | found_pt_load = true; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 256 | |
Elliott Hughes | 4688279 | 2012-08-03 16:49:39 -0700 | [diff] [blame] | 257 | if (phdr->p_vaddr < min_vaddr) { |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 258 | min_vaddr = phdr->p_vaddr; |
Elliott Hughes | 4688279 | 2012-08-03 16:49:39 -0700 | [diff] [blame] | 259 | } |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 260 | |
Elliott Hughes | 4688279 | 2012-08-03 16:49:39 -0700 | [diff] [blame] | 261 | if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) { |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 262 | max_vaddr = phdr->p_vaddr + phdr->p_memsz; |
Elliott Hughes | 4688279 | 2012-08-03 16:49:39 -0700 | [diff] [blame] | 263 | } |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 264 | } |
Brian Carlstrom | e7dffe1 | 2013-01-10 16:39:58 -0800 | [diff] [blame] | 265 | if (!found_pt_load) { |
| 266 | min_vaddr = 0x00000000U; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 267 | } |
| 268 | |
| 269 | min_vaddr = PAGE_START(min_vaddr); |
| 270 | max_vaddr = PAGE_END(max_vaddr); |
| 271 | |
Brian Carlstrom | e7dffe1 | 2013-01-10 16:39:58 -0800 | [diff] [blame] | 272 | if (out_min_vaddr != NULL) { |
| 273 | *out_min_vaddr = min_vaddr; |
| 274 | } |
| 275 | if (out_max_vaddr != NULL) { |
| 276 | *out_max_vaddr = max_vaddr; |
| 277 | } |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 278 | return max_vaddr - min_vaddr; |
| 279 | } |
| 280 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 281 | // Reserve a virtual address range big enough to hold all loadable |
| 282 | // segments of a program header table. This is done by creating a |
| 283 | // private anonymous mmap() with PROT_NONE. |
| 284 | bool ElfReader::ReserveAddressSpace() { |
Brian Carlstrom | e7dffe1 | 2013-01-10 16:39:58 -0800 | [diff] [blame] | 285 | Elf32_Addr min_vaddr; |
| 286 | load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr); |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 287 | if (load_size_ == 0) { |
| 288 | DL_ERR("\"%s\" has no loadable segments", name_); |
| 289 | return false; |
| 290 | } |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 291 | |
Brian Carlstrom | e7dffe1 | 2013-01-10 16:39:58 -0800 | [diff] [blame] | 292 | uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr); |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 293 | int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS; |
Brian Carlstrom | e7dffe1 | 2013-01-10 16:39:58 -0800 | [diff] [blame] | 294 | void* start = mmap(addr, load_size_, PROT_NONE, mmap_flags, -1, 0); |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 295 | if (start == MAP_FAILED) { |
| 296 | DL_ERR("couldn't reserve %d bytes of address space for \"%s\"", load_size_, name_); |
| 297 | return false; |
| 298 | } |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 299 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 300 | load_start_ = start; |
Brian Carlstrom | e7dffe1 | 2013-01-10 16:39:58 -0800 | [diff] [blame] | 301 | load_bias_ = reinterpret_cast<uint8_t*>(start) - addr; |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 302 | return true; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 303 | } |
| 304 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 305 | // Map all loadable segments in process' address space. |
| 306 | // This assumes you already called phdr_table_reserve_memory to |
| 307 | // reserve the address space range for the library. |
| 308 | // TODO: assert assumption. |
| 309 | bool ElfReader::LoadSegments() { |
| 310 | for (size_t i = 0; i < phdr_num_; ++i) { |
| 311 | const Elf32_Phdr* phdr = &phdr_table_[i]; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 312 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 313 | if (phdr->p_type != PT_LOAD) { |
| 314 | continue; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 315 | } |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 316 | |
| 317 | // Segment addresses in memory. |
| 318 | Elf32_Addr seg_start = phdr->p_vaddr + load_bias_; |
| 319 | Elf32_Addr seg_end = seg_start + phdr->p_memsz; |
| 320 | |
| 321 | Elf32_Addr seg_page_start = PAGE_START(seg_start); |
| 322 | Elf32_Addr seg_page_end = PAGE_END(seg_end); |
| 323 | |
| 324 | Elf32_Addr seg_file_end = seg_start + phdr->p_filesz; |
| 325 | |
| 326 | // File offsets. |
| 327 | Elf32_Addr file_start = phdr->p_offset; |
| 328 | Elf32_Addr file_end = file_start + phdr->p_filesz; |
| 329 | |
| 330 | Elf32_Addr file_page_start = PAGE_START(file_start); |
Brian Carlstrom | 82dcc79 | 2013-05-21 16:49:24 -0700 | [diff] [blame] | 331 | Elf32_Addr file_length = file_end - file_page_start; |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 332 | |
Brian Carlstrom | 82dcc79 | 2013-05-21 16:49:24 -0700 | [diff] [blame] | 333 | if (file_length != 0) { |
| 334 | void* seg_addr = mmap((void*)seg_page_start, |
| 335 | file_length, |
| 336 | PFLAGS_TO_PROT(phdr->p_flags), |
| 337 | MAP_FIXED|MAP_PRIVATE, |
| 338 | fd_, |
| 339 | file_page_start); |
| 340 | if (seg_addr == MAP_FAILED) { |
| 341 | DL_ERR("couldn't map \"%s\" segment %d: %s", name_, i, strerror(errno)); |
| 342 | return false; |
| 343 | } |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 344 | } |
| 345 | |
| 346 | // if the segment is writable, and does not end on a page boundary, |
| 347 | // zero-fill it until the page limit. |
| 348 | if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) { |
| 349 | memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end)); |
| 350 | } |
| 351 | |
| 352 | seg_file_end = PAGE_END(seg_file_end); |
| 353 | |
| 354 | // seg_file_end is now the first page address after the file |
| 355 | // content. If seg_end is larger, we need to zero anything |
| 356 | // between them. This is done by using a private anonymous |
| 357 | // map for all extra pages. |
| 358 | if (seg_page_end > seg_file_end) { |
| 359 | void* zeromap = mmap((void*)seg_file_end, |
| 360 | seg_page_end - seg_file_end, |
| 361 | PFLAGS_TO_PROT(phdr->p_flags), |
| 362 | MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE, |
| 363 | -1, |
| 364 | 0); |
| 365 | if (zeromap == MAP_FAILED) { |
| 366 | DL_ERR("couldn't zero fill \"%s\" gap: %s", name_, strerror(errno)); |
| 367 | return false; |
| 368 | } |
| 369 | } |
| 370 | } |
| 371 | return true; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 372 | } |
| 373 | |
Elliott Hughes | 105bc26 | 2012-08-15 16:56:00 -0700 | [diff] [blame] | 374 | /* Used internally. Used to set the protection bits of all loaded segments |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 375 | * with optional extra flags (i.e. really PROT_WRITE). Used by |
| 376 | * phdr_table_protect_segments and phdr_table_unprotect_segments. |
| 377 | */ |
| 378 | static int |
| 379 | _phdr_table_set_load_prot(const Elf32_Phdr* phdr_table, |
| 380 | int phdr_count, |
| 381 | Elf32_Addr load_bias, |
| 382 | int extra_prot_flags) |
| 383 | { |
| 384 | const Elf32_Phdr* phdr = phdr_table; |
| 385 | const Elf32_Phdr* phdr_limit = phdr + phdr_count; |
| 386 | |
| 387 | for (; phdr < phdr_limit; phdr++) { |
| 388 | if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) |
| 389 | continue; |
| 390 | |
| 391 | Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias; |
| 392 | Elf32_Addr seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias; |
| 393 | |
| 394 | int ret = mprotect((void*)seg_page_start, |
| 395 | seg_page_end - seg_page_start, |
| 396 | PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags); |
| 397 | if (ret < 0) { |
| 398 | return -1; |
| 399 | } |
| 400 | } |
| 401 | return 0; |
| 402 | } |
| 403 | |
| 404 | /* Restore the original protection modes for all loadable segments. |
| 405 | * You should only call this after phdr_table_unprotect_segments and |
| 406 | * applying all relocations. |
| 407 | * |
| 408 | * Input: |
| 409 | * phdr_table -> program header table |
Elliott Hughes | 105bc26 | 2012-08-15 16:56:00 -0700 | [diff] [blame] | 410 | * phdr_count -> number of entries in tables |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 411 | * load_bias -> load bias |
| 412 | * Return: |
| 413 | * 0 on error, -1 on failure (error code in errno). |
| 414 | */ |
| 415 | int |
| 416 | phdr_table_protect_segments(const Elf32_Phdr* phdr_table, |
| 417 | int phdr_count, |
| 418 | Elf32_Addr load_bias) |
| 419 | { |
| 420 | return _phdr_table_set_load_prot(phdr_table, phdr_count, |
| 421 | load_bias, 0); |
| 422 | } |
| 423 | |
| 424 | /* Change the protection of all loaded segments in memory to writable. |
| 425 | * This is useful before performing relocations. Once completed, you |
| 426 | * will have to call phdr_table_protect_segments to restore the original |
| 427 | * protection flags on all segments. |
| 428 | * |
| 429 | * Note that some writable segments can also have their content turned |
| 430 | * to read-only by calling phdr_table_protect_gnu_relro. This is no |
| 431 | * performed here. |
| 432 | * |
| 433 | * Input: |
| 434 | * phdr_table -> program header table |
Elliott Hughes | 105bc26 | 2012-08-15 16:56:00 -0700 | [diff] [blame] | 435 | * phdr_count -> number of entries in tables |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 436 | * load_bias -> load bias |
| 437 | * Return: |
| 438 | * 0 on error, -1 on failure (error code in errno). |
| 439 | */ |
| 440 | int |
| 441 | phdr_table_unprotect_segments(const Elf32_Phdr* phdr_table, |
| 442 | int phdr_count, |
| 443 | Elf32_Addr load_bias) |
| 444 | { |
| 445 | return _phdr_table_set_load_prot(phdr_table, phdr_count, |
| 446 | load_bias, PROT_WRITE); |
| 447 | } |
| 448 | |
| 449 | /* Used internally by phdr_table_protect_gnu_relro and |
| 450 | * phdr_table_unprotect_gnu_relro. |
| 451 | */ |
| 452 | static int |
| 453 | _phdr_table_set_gnu_relro_prot(const Elf32_Phdr* phdr_table, |
| 454 | int phdr_count, |
| 455 | Elf32_Addr load_bias, |
| 456 | int prot_flags) |
| 457 | { |
| 458 | const Elf32_Phdr* phdr = phdr_table; |
| 459 | const Elf32_Phdr* phdr_limit = phdr + phdr_count; |
| 460 | |
| 461 | for (phdr = phdr_table; phdr < phdr_limit; phdr++) { |
| 462 | if (phdr->p_type != PT_GNU_RELRO) |
| 463 | continue; |
| 464 | |
| 465 | /* Tricky: what happens when the relro segment does not start |
| 466 | * or end at page boundaries?. We're going to be over-protective |
| 467 | * here and put every page touched by the segment as read-only. |
| 468 | * |
| 469 | * This seems to match Ian Lance Taylor's description of the |
| 470 | * feature at http://www.airs.com/blog/archives/189. |
| 471 | * |
| 472 | * Extract: |
| 473 | * Note that the current dynamic linker code will only work |
| 474 | * correctly if the PT_GNU_RELRO segment starts on a page |
| 475 | * boundary. This is because the dynamic linker rounds the |
| 476 | * p_vaddr field down to the previous page boundary. If |
| 477 | * there is anything on the page which should not be read-only, |
| 478 | * the program is likely to fail at runtime. So in effect the |
| 479 | * linker must only emit a PT_GNU_RELRO segment if it ensures |
| 480 | * that it starts on a page boundary. |
| 481 | */ |
| 482 | Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias; |
| 483 | Elf32_Addr seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias; |
| 484 | |
| 485 | int ret = mprotect((void*)seg_page_start, |
| 486 | seg_page_end - seg_page_start, |
| 487 | prot_flags); |
| 488 | if (ret < 0) { |
| 489 | return -1; |
| 490 | } |
| 491 | } |
| 492 | return 0; |
| 493 | } |
| 494 | |
| 495 | /* Apply GNU relro protection if specified by the program header. This will |
| 496 | * turn some of the pages of a writable PT_LOAD segment to read-only, as |
| 497 | * specified by one or more PT_GNU_RELRO segments. This must be always |
| 498 | * performed after relocations. |
| 499 | * |
Ard Biesheuvel | 12c78bb | 2012-08-14 12:30:09 +0200 | [diff] [blame] | 500 | * The areas typically covered are .got and .data.rel.ro, these are |
| 501 | * read-only from the program's POV, but contain absolute addresses |
| 502 | * that need to be relocated before use. |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 503 | * |
| 504 | * Input: |
| 505 | * phdr_table -> program header table |
Elliott Hughes | 105bc26 | 2012-08-15 16:56:00 -0700 | [diff] [blame] | 506 | * phdr_count -> number of entries in tables |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 507 | * load_bias -> load bias |
| 508 | * Return: |
| 509 | * 0 on error, -1 on failure (error code in errno). |
| 510 | */ |
| 511 | int |
| 512 | phdr_table_protect_gnu_relro(const Elf32_Phdr* phdr_table, |
| 513 | int phdr_count, |
| 514 | Elf32_Addr load_bias) |
| 515 | { |
| 516 | return _phdr_table_set_gnu_relro_prot(phdr_table, |
| 517 | phdr_count, |
| 518 | load_bias, |
| 519 | PROT_READ); |
| 520 | } |
| 521 | |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 522 | #ifdef ANDROID_ARM_LINKER |
| 523 | |
| 524 | # ifndef PT_ARM_EXIDX |
| 525 | # define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */ |
| 526 | # endif |
| 527 | |
| 528 | /* Return the address and size of the .ARM.exidx section in memory, |
| 529 | * if present. |
| 530 | * |
| 531 | * Input: |
| 532 | * phdr_table -> program header table |
Elliott Hughes | 105bc26 | 2012-08-15 16:56:00 -0700 | [diff] [blame] | 533 | * phdr_count -> number of entries in tables |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 534 | * load_bias -> load bias |
| 535 | * Output: |
| 536 | * arm_exidx -> address of table in memory (NULL on failure). |
| 537 | * arm_exidx_count -> number of items in table (0 on failure). |
| 538 | * Return: |
| 539 | * 0 on error, -1 on failure (_no_ error code in errno) |
| 540 | */ |
| 541 | int |
| 542 | phdr_table_get_arm_exidx(const Elf32_Phdr* phdr_table, |
| 543 | int phdr_count, |
| 544 | Elf32_Addr load_bias, |
| 545 | Elf32_Addr** arm_exidx, |
| 546 | unsigned* arm_exidx_count) |
| 547 | { |
| 548 | const Elf32_Phdr* phdr = phdr_table; |
| 549 | const Elf32_Phdr* phdr_limit = phdr + phdr_count; |
| 550 | |
| 551 | for (phdr = phdr_table; phdr < phdr_limit; phdr++) { |
| 552 | if (phdr->p_type != PT_ARM_EXIDX) |
| 553 | continue; |
| 554 | |
| 555 | *arm_exidx = (Elf32_Addr*)(load_bias + phdr->p_vaddr); |
| 556 | *arm_exidx_count = (unsigned)(phdr->p_memsz / 8); |
| 557 | return 0; |
| 558 | } |
| 559 | *arm_exidx = NULL; |
| 560 | *arm_exidx_count = 0; |
| 561 | return -1; |
| 562 | } |
| 563 | #endif /* ANDROID_ARM_LINKER */ |
| 564 | |
Ard Biesheuvel | 12c78bb | 2012-08-14 12:30:09 +0200 | [diff] [blame] | 565 | /* Return the address and size of the ELF file's .dynamic section in memory, |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 566 | * or NULL if missing. |
| 567 | * |
| 568 | * Input: |
| 569 | * phdr_table -> program header table |
Elliott Hughes | 105bc26 | 2012-08-15 16:56:00 -0700 | [diff] [blame] | 570 | * phdr_count -> number of entries in tables |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 571 | * load_bias -> load bias |
Ard Biesheuvel | 12c78bb | 2012-08-14 12:30:09 +0200 | [diff] [blame] | 572 | * Output: |
| 573 | * dynamic -> address of table in memory (NULL on failure). |
| 574 | * dynamic_count -> number of items in table (0 on failure). |
Chris Dearman | cf23905 | 2013-01-11 15:32:20 -0800 | [diff] [blame] | 575 | * dynamic_flags -> protection flags for section (unset on failure) |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 576 | * Return: |
Ard Biesheuvel | 12c78bb | 2012-08-14 12:30:09 +0200 | [diff] [blame] | 577 | * void |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 578 | */ |
Ard Biesheuvel | 12c78bb | 2012-08-14 12:30:09 +0200 | [diff] [blame] | 579 | void |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 580 | phdr_table_get_dynamic_section(const Elf32_Phdr* phdr_table, |
| 581 | int phdr_count, |
Ard Biesheuvel | 12c78bb | 2012-08-14 12:30:09 +0200 | [diff] [blame] | 582 | Elf32_Addr load_bias, |
Brian Carlstrom | d4ee82d | 2013-02-28 15:58:45 -0800 | [diff] [blame] | 583 | Elf32_Dyn** dynamic, |
Chris Dearman | cf23905 | 2013-01-11 15:32:20 -0800 | [diff] [blame] | 584 | size_t* dynamic_count, |
| 585 | Elf32_Word* dynamic_flags) |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 586 | { |
| 587 | const Elf32_Phdr* phdr = phdr_table; |
| 588 | const Elf32_Phdr* phdr_limit = phdr + phdr_count; |
| 589 | |
| 590 | for (phdr = phdr_table; phdr < phdr_limit; phdr++) { |
Ard Biesheuvel | 12c78bb | 2012-08-14 12:30:09 +0200 | [diff] [blame] | 591 | if (phdr->p_type != PT_DYNAMIC) { |
| 592 | continue; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 593 | } |
Ard Biesheuvel | 12c78bb | 2012-08-14 12:30:09 +0200 | [diff] [blame] | 594 | |
Brian Carlstrom | d4ee82d | 2013-02-28 15:58:45 -0800 | [diff] [blame] | 595 | *dynamic = reinterpret_cast<Elf32_Dyn*>(load_bias + phdr->p_vaddr); |
Ard Biesheuvel | 12c78bb | 2012-08-14 12:30:09 +0200 | [diff] [blame] | 596 | if (dynamic_count) { |
| 597 | *dynamic_count = (unsigned)(phdr->p_memsz / 8); |
| 598 | } |
Chris Dearman | cf23905 | 2013-01-11 15:32:20 -0800 | [diff] [blame] | 599 | if (dynamic_flags) { |
| 600 | *dynamic_flags = phdr->p_flags; |
| 601 | } |
Ard Biesheuvel | 12c78bb | 2012-08-14 12:30:09 +0200 | [diff] [blame] | 602 | return; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 603 | } |
Ard Biesheuvel | 12c78bb | 2012-08-14 12:30:09 +0200 | [diff] [blame] | 604 | *dynamic = NULL; |
| 605 | if (dynamic_count) { |
| 606 | *dynamic_count = 0; |
| 607 | } |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 608 | } |
| 609 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 610 | // Returns the address of the program header table as it appears in the loaded |
| 611 | // segments in memory. This is in contrast with 'phdr_table_' which |
| 612 | // is temporary and will be released before the library is relocated. |
| 613 | bool ElfReader::FindPhdr() { |
| 614 | const Elf32_Phdr* phdr_limit = phdr_table_ + phdr_num_; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 615 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 616 | // If there is a PT_PHDR, use it directly. |
| 617 | for (const Elf32_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) { |
| 618 | if (phdr->p_type == PT_PHDR) { |
| 619 | return CheckPhdr(load_bias_ + phdr->p_vaddr); |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 620 | } |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 621 | } |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 622 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 623 | // Otherwise, check the first loadable segment. If its file offset |
| 624 | // is 0, it starts with the ELF header, and we can trivially find the |
| 625 | // loaded program header from it. |
| 626 | for (const Elf32_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) { |
| 627 | if (phdr->p_type == PT_LOAD) { |
| 628 | if (phdr->p_offset == 0) { |
| 629 | Elf32_Addr elf_addr = load_bias_ + phdr->p_vaddr; |
| 630 | const Elf32_Ehdr* ehdr = (const Elf32_Ehdr*)(void*)elf_addr; |
| 631 | Elf32_Addr offset = ehdr->e_phoff; |
| 632 | return CheckPhdr((Elf32_Addr)ehdr + offset); |
| 633 | } |
| 634 | break; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 635 | } |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 636 | } |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 637 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 638 | DL_ERR("can't find loaded phdr for \"%s\"", name_); |
| 639 | return false; |
| 640 | } |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 641 | |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 642 | // Ensures that our program header is actually within a loadable |
| 643 | // segment. This should help catch badly-formed ELF files that |
| 644 | // would cause the linker to crash later when trying to access it. |
| 645 | bool ElfReader::CheckPhdr(Elf32_Addr loaded) { |
| 646 | const Elf32_Phdr* phdr_limit = phdr_table_ + phdr_num_; |
| 647 | Elf32_Addr loaded_end = loaded + (phdr_num_ * sizeof(Elf32_Phdr)); |
| 648 | for (Elf32_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) { |
| 649 | if (phdr->p_type != PT_LOAD) { |
| 650 | continue; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 651 | } |
Elliott Hughes | 650be4e | 2013-03-05 18:47:58 -0800 | [diff] [blame] | 652 | Elf32_Addr seg_start = phdr->p_vaddr + load_bias_; |
| 653 | Elf32_Addr seg_end = phdr->p_filesz + seg_start; |
| 654 | if (seg_start <= loaded && loaded_end <= seg_end) { |
| 655 | loaded_phdr_ = reinterpret_cast<const Elf32_Phdr*>(loaded); |
| 656 | return true; |
| 657 | } |
| 658 | } |
| 659 | DL_ERR("\"%s\" loaded phdr %x not in loadable segment", name_, loaded); |
| 660 | return false; |
David 'Digit' Turner | c1bd559 | 2012-06-19 11:21:29 +0200 | [diff] [blame] | 661 | } |