blob: 6fe808421075d1072307b33ad954df3b17cb128f [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
Elliott Hughes650be4e2013-03-05 18:47:58 -080029#include "linker_phdr.h"
30
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020031#include <errno.h>
Elliott Hughes05fc1d72015-01-28 18:02:33 -080032#include <string.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020033#include <sys/mman.h>
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +000034#include <sys/types.h>
35#include <sys/stat.h>
36#include <unistd.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020037
Elliott Hughes650be4e2013-03-05 18:47:58 -080038#include "linker.h"
39#include "linker_debug.h"
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020040
Elliott Hughesb5140262014-12-02 16:16:29 -080041static int GetTargetElfMachine() {
42#if defined(__arm__)
43 return EM_ARM;
44#elif defined(__aarch64__)
45 return EM_AARCH64;
46#elif defined(__i386__)
47 return EM_386;
48#elif defined(__mips__)
49 return EM_MIPS;
50#elif defined(__x86_64__)
51 return EM_X86_64;
52#endif
53}
54
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020055/**
56 TECHNICAL NOTE ON ELF LOADING.
57
58 An ELF file's program header table contains one or more PT_LOAD
59 segments, which corresponds to portions of the file that need to
60 be mapped into the process' address space.
61
62 Each loadable segment has the following important properties:
63
64 p_offset -> segment file offset
65 p_filesz -> segment file size
66 p_memsz -> segment memory size (always >= p_filesz)
67 p_vaddr -> segment's virtual address
68 p_flags -> segment flags (e.g. readable, writable, executable)
69
Elliott Hughes0266ae52014-02-10 17:46:57 -080070 We will ignore the p_paddr and p_align fields of ElfW(Phdr) for now.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020071
72 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
73 ranges of virtual addresses. A few rules apply:
74
75 - the virtual address ranges should not overlap.
76
77 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
78 between them should always be initialized to 0.
79
80 - ranges do not necessarily start or end at page boundaries. Two distinct
81 segments can have their start and end on the same page. In this case, the
82 page inherits the mapping flags of the latter segment.
83
84 Finally, the real load addrs of each segment is not p_vaddr. Instead the
85 loader decides where to load the first segment, then will load all others
86 relative to the first one to respect the initial range layout.
87
88 For example, consider the following list:
89
90 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
91 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
92
93 This corresponds to two segments that cover these virtual address ranges:
94
95 0x30000...0x34000
96 0x40000...0x48000
97
98 If the loader decides to load the first segment at address 0xa0000000
99 then the segments' load address ranges will be:
100
101 0xa0030000...0xa0034000
102 0xa0040000...0xa0048000
103
104 In other words, all segments must be loaded at an address that has the same
105 constant offset from their p_vaddr value. This offset is computed as the
106 difference between the first segment's load address, and its p_vaddr value.
107
108 However, in practice, segments do _not_ start at page boundaries. Since we
109 can only memory-map at page boundaries, this means that the bias is
110 computed as:
111
112 load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
113
114 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
115 possible wrap around UINT32_MAX for possible large p_vaddr values).
116
117 And that the phdr0_load_address must start at a page boundary, with
118 the segment's real content starting at:
119
120 phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
121
122 Note that ELF requires the following condition to make the mmap()-ing work:
123
124 PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
125
126 The load_bias must be added to any p_vaddr value read from the ELF file to
127 determine the corresponding memory address.
128
129 **/
130
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800131#define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200132#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
133 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
134 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
135
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700136ElfReader::ElfReader(const char* name, int fd, off64_t file_offset, off64_t file_size)
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -0700137 : name_(name), fd_(fd), file_offset_(file_offset), file_size_(file_size), phdr_num_(0),
138 phdr_table_(nullptr), load_start_(nullptr), load_size_(0), load_bias_(0),
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700139 loaded_phdr_(nullptr) {
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200140}
141
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000142bool ElfReader::Load(const android_dlextinfo* extinfo) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800143 return ReadElfHeader() &&
144 VerifyElfHeader() &&
145 ReadProgramHeader() &&
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000146 ReserveAddressSpace(extinfo) &&
Elliott Hughes650be4e2013-03-05 18:47:58 -0800147 LoadSegments() &&
148 FindPhdr();
149}
150
151bool ElfReader::ReadElfHeader() {
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700152 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800153 if (rc < 0) {
154 DL_ERR("can't read file \"%s\": %s", name_, strerror(errno));
155 return false;
156 }
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700157
Elliott Hughes650be4e2013-03-05 18:47:58 -0800158 if (rc != sizeof(header_)) {
Elliott Hughesc6200592013-09-30 18:43:46 -0700159 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_,
160 static_cast<size_t>(rc));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800161 return false;
162 }
163 return true;
164}
165
166bool ElfReader::VerifyElfHeader() {
Elliott Hughes625993d2014-07-15 16:53:13 -0700167 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800168 DL_ERR("\"%s\" has bad ELF magic", name_);
169 return false;
170 }
171
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700172 // Try to give a clear diagnostic for ELF class mismatches, since they're
173 // an easy mistake to make during the 32-bit/64-bit transition period.
174 int elf_class = header_.e_ident[EI_CLASS];
175#if defined(__LP64__)
176 if (elf_class != ELFCLASS64) {
177 if (elf_class == ELFCLASS32) {
178 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_);
179 } else {
180 DL_ERR("\"%s\" has unknown ELF class: %d", name_, elf_class);
181 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800182 return false;
183 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700184#else
185 if (elf_class != ELFCLASS32) {
186 if (elf_class == ELFCLASS64) {
187 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_);
188 } else {
189 DL_ERR("\"%s\" has unknown ELF class: %d", name_, elf_class);
190 }
191 return false;
192 }
193#endif
194
Elliott Hughes650be4e2013-03-05 18:47:58 -0800195 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
196 DL_ERR("\"%s\" not little-endian: %d", name_, header_.e_ident[EI_DATA]);
197 return false;
198 }
199
200 if (header_.e_type != ET_DYN) {
201 DL_ERR("\"%s\" has unexpected e_type: %d", name_, header_.e_type);
202 return false;
203 }
204
205 if (header_.e_version != EV_CURRENT) {
206 DL_ERR("\"%s\" has unexpected e_version: %d", name_, header_.e_version);
207 return false;
208 }
209
Elliott Hughesb5140262014-12-02 16:16:29 -0800210 if (header_.e_machine != GetTargetElfMachine()) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800211 DL_ERR("\"%s\" has unexpected e_machine: %d", name_, header_.e_machine);
212 return false;
213 }
214
215 return true;
216}
217
218// Loads the program header table from an ELF file into a read-only private
219// anonymous mmap-ed block.
220bool ElfReader::ReadProgramHeader() {
221 phdr_num_ = header_.e_phnum;
222
223 // Like the kernel, we only accept program header tables that
224 // are smaller than 64KiB.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800225 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
Elliott Hughesc6200592013-09-30 18:43:46 -0700226 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_, phdr_num_);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800227 return false;
228 }
229
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -0700230 if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, phdr_num_ * sizeof(ElfW(Phdr)))) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800231 DL_ERR("\"%s\" phdr mmap failed: %s", name_, strerror(errno));
232 return false;
233 }
234
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -0700235 phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800236 return true;
237}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200238
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800239/* Returns the size of the extent of all the possibly non-contiguous
240 * loadable segments in an ELF program header table. This corresponds
241 * to the page-aligned size in bytes that needs to be reserved in the
242 * process' address space. If there are no loadable segments, 0 is
243 * returned.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200244 *
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700245 * If out_min_vaddr or out_max_vaddr are not null, they will be
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800246 * set to the minimum and maximum addresses of pages to be reserved,
247 * or 0 if there is nothing to load.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200248 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800249size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
250 ElfW(Addr)* out_min_vaddr,
251 ElfW(Addr)* out_max_vaddr) {
252 ElfW(Addr) min_vaddr = UINTPTR_MAX;
253 ElfW(Addr) max_vaddr = 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200254
Elliott Hughes0266ae52014-02-10 17:46:57 -0800255 bool found_pt_load = false;
256 for (size_t i = 0; i < phdr_count; ++i) {
257 const ElfW(Phdr)* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200258
Elliott Hughes0266ae52014-02-10 17:46:57 -0800259 if (phdr->p_type != PT_LOAD) {
260 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200261 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800262 found_pt_load = true;
263
264 if (phdr->p_vaddr < min_vaddr) {
265 min_vaddr = phdr->p_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200266 }
267
Elliott Hughes0266ae52014-02-10 17:46:57 -0800268 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
269 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
270 }
271 }
272 if (!found_pt_load) {
273 min_vaddr = 0;
274 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200275
Elliott Hughes0266ae52014-02-10 17:46:57 -0800276 min_vaddr = PAGE_START(min_vaddr);
277 max_vaddr = PAGE_END(max_vaddr);
278
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700279 if (out_min_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800280 *out_min_vaddr = min_vaddr;
281 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700282 if (out_max_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800283 *out_max_vaddr = max_vaddr;
284 }
285 return max_vaddr - min_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200286}
287
Elliott Hughes650be4e2013-03-05 18:47:58 -0800288// Reserve a virtual address range big enough to hold all loadable
289// segments of a program header table. This is done by creating a
290// private anonymous mmap() with PROT_NONE.
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000291bool ElfReader::ReserveAddressSpace(const android_dlextinfo* extinfo) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800292 ElfW(Addr) min_vaddr;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800293 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800294 if (load_size_ == 0) {
295 DL_ERR("\"%s\" has no loadable segments", name_);
296 return false;
297 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200298
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800299 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000300 void* start;
301 size_t reserved_size = 0;
302 bool reserved_hint = true;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700303 // Assume position independent executable by default.
304 uint8_t* mmap_hint = nullptr;
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000305
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700306 if (extinfo != nullptr) {
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000307 if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS) {
308 reserved_size = extinfo->reserved_size;
309 reserved_hint = false;
310 } else if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS_HINT) {
311 reserved_size = extinfo->reserved_size;
312 }
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700313
314 if ((extinfo->flags & ANDROID_DLEXT_FORCE_FIXED_VADDR) != 0) {
315 mmap_hint = addr;
316 }
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000317 }
318
319 if (load_size_ > reserved_size) {
320 if (!reserved_hint) {
321 DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
322 reserved_size - load_size_, load_size_, name_);
323 return false;
324 }
325 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700326 start = mmap(mmap_hint, load_size_, PROT_NONE, mmap_flags, -1, 0);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000327 if (start == MAP_FAILED) {
328 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_);
329 return false;
330 }
331 } else {
332 start = extinfo->reserved_addr;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800333 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200334
Elliott Hughes650be4e2013-03-05 18:47:58 -0800335 load_start_ = start;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800336 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800337 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200338}
339
Elliott Hughes650be4e2013-03-05 18:47:58 -0800340bool ElfReader::LoadSegments() {
341 for (size_t i = 0; i < phdr_num_; ++i) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800342 const ElfW(Phdr)* phdr = &phdr_table_[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200343
Elliott Hughes650be4e2013-03-05 18:47:58 -0800344 if (phdr->p_type != PT_LOAD) {
345 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200346 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800347
348 // Segment addresses in memory.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800349 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
350 ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800351
Elliott Hughes0266ae52014-02-10 17:46:57 -0800352 ElfW(Addr) seg_page_start = PAGE_START(seg_start);
353 ElfW(Addr) seg_page_end = PAGE_END(seg_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800354
Elliott Hughes0266ae52014-02-10 17:46:57 -0800355 ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800356
357 // File offsets.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800358 ElfW(Addr) file_start = phdr->p_offset;
359 ElfW(Addr) file_end = file_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800360
Elliott Hughes0266ae52014-02-10 17:46:57 -0800361 ElfW(Addr) file_page_start = PAGE_START(file_start);
362 ElfW(Addr) file_length = file_end - file_page_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800363
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700364 if (file_size_ <= 0) {
365 DL_ERR("\"%s\" invalid file size: %" PRId64, name_, file_size_);
366 return false;
367 }
368
skvalex93ce3542015-08-20 01:06:42 +0300369 if (file_end > static_cast<size_t>(file_size_)) {
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700370 DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
371 " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
372 name_, i, reinterpret_cast<void*>(phdr->p_offset),
373 reinterpret_cast<void*>(phdr->p_filesz),
374 reinterpret_cast<void*>(file_end), file_size_);
375 return false;
376 }
377
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700378 if (file_length != 0) {
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700379 void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700380 file_length,
381 PFLAGS_TO_PROT(phdr->p_flags),
382 MAP_FIXED|MAP_PRIVATE,
383 fd_,
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700384 file_offset_ + file_page_start);
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700385 if (seg_addr == MAP_FAILED) {
Elliott Hughesc6200592013-09-30 18:43:46 -0700386 DL_ERR("couldn't map \"%s\" segment %zd: %s", name_, i, strerror(errno));
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700387 return false;
388 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800389 }
390
391 // if the segment is writable, and does not end on a page boundary,
392 // zero-fill it until the page limit.
393 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800394 memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800395 }
396
397 seg_file_end = PAGE_END(seg_file_end);
398
399 // seg_file_end is now the first page address after the file
400 // content. If seg_end is larger, we need to zero anything
401 // between them. This is done by using a private anonymous
402 // map for all extra pages.
403 if (seg_page_end > seg_file_end) {
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800404 void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
Elliott Hughes650be4e2013-03-05 18:47:58 -0800405 seg_page_end - seg_file_end,
406 PFLAGS_TO_PROT(phdr->p_flags),
407 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
408 -1,
409 0);
410 if (zeromap == MAP_FAILED) {
411 DL_ERR("couldn't zero fill \"%s\" gap: %s", name_, strerror(errno));
412 return false;
413 }
414 }
415 }
416 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200417}
418
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000419/* Used internally. Used to set the protection bits of all loaded segments
420 * with optional extra flags (i.e. really PROT_WRITE). Used by
421 * phdr_table_protect_segments and phdr_table_unprotect_segments.
422 */
423static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
424 ElfW(Addr) load_bias, int extra_prot_flags) {
425 const ElfW(Phdr)* phdr = phdr_table;
426 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
427
428 for (; phdr < phdr_limit; phdr++) {
429 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
430 continue;
431 }
432
433 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
434 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
435
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700436 int prot = PFLAGS_TO_PROT(phdr->p_flags);
437 if ((extra_prot_flags & PROT_WRITE) != 0) {
438 // make sure we're never simultaneously writable / executable
439 prot &= ~PROT_EXEC;
440 }
441
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000442 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
443 seg_page_end - seg_page_start,
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700444 prot | extra_prot_flags);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000445 if (ret < 0) {
446 return -1;
447 }
448 }
449 return 0;
450}
451
452/* Restore the original protection modes for all loadable segments.
453 * You should only call this after phdr_table_unprotect_segments and
454 * applying all relocations.
455 *
456 * Input:
457 * phdr_table -> program header table
458 * phdr_count -> number of entries in tables
459 * load_bias -> load bias
460 * Return:
461 * 0 on error, -1 on failure (error code in errno).
462 */
463int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table,
464 size_t phdr_count, ElfW(Addr) load_bias) {
465 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
466}
467
468/* Change the protection of all loaded segments in memory to writable.
469 * This is useful before performing relocations. Once completed, you
470 * will have to call phdr_table_protect_segments to restore the original
471 * protection flags on all segments.
472 *
473 * Note that some writable segments can also have their content turned
474 * to read-only by calling phdr_table_protect_gnu_relro. This is no
475 * performed here.
476 *
477 * Input:
478 * phdr_table -> program header table
479 * phdr_count -> number of entries in tables
480 * load_bias -> load bias
481 * Return:
482 * 0 on error, -1 on failure (error code in errno).
483 */
484int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
485 size_t phdr_count, ElfW(Addr) load_bias) {
486 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
487}
488
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200489/* Used internally by phdr_table_protect_gnu_relro and
490 * phdr_table_unprotect_gnu_relro.
491 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800492static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
493 ElfW(Addr) load_bias, int prot_flags) {
494 const ElfW(Phdr)* phdr = phdr_table;
495 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200496
Elliott Hughes0266ae52014-02-10 17:46:57 -0800497 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
498 if (phdr->p_type != PT_GNU_RELRO) {
499 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200500 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800501
502 // Tricky: what happens when the relro segment does not start
503 // or end at page boundaries? We're going to be over-protective
504 // here and put every page touched by the segment as read-only.
505
506 // This seems to match Ian Lance Taylor's description of the
507 // feature at http://www.airs.com/blog/archives/189.
508
509 // Extract:
510 // Note that the current dynamic linker code will only work
511 // correctly if the PT_GNU_RELRO segment starts on a page
512 // boundary. This is because the dynamic linker rounds the
513 // p_vaddr field down to the previous page boundary. If
514 // there is anything on the page which should not be read-only,
515 // the program is likely to fail at runtime. So in effect the
516 // linker must only emit a PT_GNU_RELRO segment if it ensures
517 // that it starts on a page boundary.
518 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
519 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
520
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800521 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
Elliott Hughes0266ae52014-02-10 17:46:57 -0800522 seg_page_end - seg_page_start,
523 prot_flags);
524 if (ret < 0) {
525 return -1;
526 }
527 }
528 return 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200529}
530
531/* Apply GNU relro protection if specified by the program header. This will
532 * turn some of the pages of a writable PT_LOAD segment to read-only, as
533 * specified by one or more PT_GNU_RELRO segments. This must be always
534 * performed after relocations.
535 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200536 * The areas typically covered are .got and .data.rel.ro, these are
537 * read-only from the program's POV, but contain absolute addresses
538 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200539 *
540 * Input:
541 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700542 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200543 * load_bias -> load bias
544 * Return:
545 * 0 on error, -1 on failure (error code in errno).
546 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700547int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
548 size_t phdr_count, ElfW(Addr) load_bias) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800549 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200550}
551
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000552/* Serialize the GNU relro segments to the given file descriptor. This can be
553 * performed after relocations to allow another process to later share the
554 * relocated segment, if it was loaded at the same address.
555 *
556 * Input:
557 * phdr_table -> program header table
558 * phdr_count -> number of entries in tables
559 * load_bias -> load bias
560 * fd -> writable file descriptor to use
561 * Return:
562 * 0 on error, -1 on failure (error code in errno).
563 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700564int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
565 size_t phdr_count,
566 ElfW(Addr) load_bias,
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000567 int fd) {
568 const ElfW(Phdr)* phdr = phdr_table;
569 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
570 ssize_t file_offset = 0;
571
572 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
573 if (phdr->p_type != PT_GNU_RELRO) {
574 continue;
575 }
576
577 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
578 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
579 ssize_t size = seg_page_end - seg_page_start;
580
581 ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
582 if (written != size) {
583 return -1;
584 }
585 void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
586 MAP_PRIVATE|MAP_FIXED, fd, file_offset);
587 if (map == MAP_FAILED) {
588 return -1;
589 }
590 file_offset += size;
591 }
592 return 0;
593}
594
595/* Where possible, replace the GNU relro segments with mappings of the given
596 * file descriptor. This can be performed after relocations to allow a file
597 * previously created by phdr_table_serialize_gnu_relro in another process to
598 * replace the dirty relocated pages, saving memory, if it was loaded at the
599 * same address. We have to compare the data before we map over it, since some
600 * parts of the relro segment may not be identical due to other libraries in
601 * the process being loaded at different addresses.
602 *
603 * Input:
604 * phdr_table -> program header table
605 * phdr_count -> number of entries in tables
606 * load_bias -> load bias
607 * fd -> readable file descriptor to use
608 * Return:
609 * 0 on error, -1 on failure (error code in errno).
610 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700611int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
612 size_t phdr_count,
613 ElfW(Addr) load_bias,
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000614 int fd) {
615 // Map the file at a temporary location so we can compare its contents.
616 struct stat file_stat;
617 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
618 return -1;
619 }
620 off_t file_size = file_stat.st_size;
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700621 void* temp_mapping = nullptr;
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100622 if (file_size > 0) {
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700623 temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100624 if (temp_mapping == MAP_FAILED) {
625 return -1;
626 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000627 }
628 size_t file_offset = 0;
629
630 // Iterate over the relro segments and compare/remap the pages.
631 const ElfW(Phdr)* phdr = phdr_table;
632 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
633
634 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
635 if (phdr->p_type != PT_GNU_RELRO) {
636 continue;
637 }
638
639 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
640 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
641
642 char* file_base = static_cast<char*>(temp_mapping) + file_offset;
643 char* mem_base = reinterpret_cast<char*>(seg_page_start);
644 size_t match_offset = 0;
645 size_t size = seg_page_end - seg_page_start;
646
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100647 if (file_size - file_offset < size) {
648 // File is too short to compare to this segment. The contents are likely
649 // different as well (it's probably for a different library version) so
650 // just don't bother checking.
651 break;
652 }
653
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000654 while (match_offset < size) {
655 // Skip over dissimilar pages.
656 while (match_offset < size &&
657 memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) {
658 match_offset += PAGE_SIZE;
659 }
660
661 // Count similar pages.
662 size_t mismatch_offset = match_offset;
663 while (mismatch_offset < size &&
664 memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) {
665 mismatch_offset += PAGE_SIZE;
666 }
667
668 // Map over similar pages.
669 if (mismatch_offset > match_offset) {
670 void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
671 PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, match_offset);
672 if (map == MAP_FAILED) {
673 munmap(temp_mapping, file_size);
674 return -1;
675 }
676 }
677
678 match_offset = mismatch_offset;
679 }
680
681 // Add to the base file offset in case there are multiple relro segments.
682 file_offset += size;
683 }
684 munmap(temp_mapping, file_size);
685 return 0;
686}
687
688
Elliott Hughes4eeb1f12013-10-25 17:38:02 -0700689#if defined(__arm__)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200690
691# ifndef PT_ARM_EXIDX
692# define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
693# endif
694
695/* Return the address and size of the .ARM.exidx section in memory,
696 * if present.
697 *
698 * Input:
699 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700700 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200701 * load_bias -> load bias
702 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700703 * arm_exidx -> address of table in memory (null on failure).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200704 * arm_exidx_count -> number of items in table (0 on failure).
705 * Return:
706 * 0 on error, -1 on failure (_no_ error code in errno)
707 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800708int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
709 ElfW(Addr) load_bias,
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -0800710 ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800711 const ElfW(Phdr)* phdr = phdr_table;
712 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200713
Elliott Hughes0266ae52014-02-10 17:46:57 -0800714 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
715 if (phdr->p_type != PT_ARM_EXIDX) {
716 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200717 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800718
719 *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -0800720 *arm_exidx_count = phdr->p_memsz / 8;
Elliott Hughes0266ae52014-02-10 17:46:57 -0800721 return 0;
722 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700723 *arm_exidx = nullptr;
Elliott Hughes0266ae52014-02-10 17:46:57 -0800724 *arm_exidx_count = 0;
725 return -1;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200726}
Elliott Hughes4eeb1f12013-10-25 17:38:02 -0700727#endif
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200728
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200729/* Return the address and size of the ELF file's .dynamic section in memory,
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700730 * or null if missing.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200731 *
732 * Input:
733 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700734 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200735 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200736 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700737 * dynamic -> address of table in memory (null on failure).
Ningsheng Jiane93be992014-09-16 15:22:10 +0800738 * dynamic_flags -> protection flags for section (unset on failure)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200739 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200740 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200741 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800742void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Ningsheng Jiane93be992014-09-16 15:22:10 +0800743 ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
744 ElfW(Word)* dynamic_flags) {
Dmitriy Ivanov498eb182014-09-05 14:57:59 -0700745 *dynamic = nullptr;
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700746 for (size_t i = 0; i<phdr_count; ++i) {
747 const ElfW(Phdr)& phdr = phdr_table[i];
748 if (phdr.p_type == PT_DYNAMIC) {
749 *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
Ningsheng Jiane93be992014-09-16 15:22:10 +0800750 if (dynamic_flags) {
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700751 *dynamic_flags = phdr.p_flags;
Ningsheng Jiane93be992014-09-16 15:22:10 +0800752 }
Dmitriy Ivanov14669a92014-09-05 16:42:53 -0700753 return;
754 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800755 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200756}
757
Evgenii Stepanovd640b222015-07-10 17:54:01 -0700758/* Return the program interpreter string, or nullptr if missing.
759 *
760 * Input:
761 * phdr_table -> program header table
762 * phdr_count -> number of entries in tables
763 * load_bias -> load bias
764 * Return:
765 * pointer to the program interpreter string.
766 */
767const char* phdr_table_get_interpreter_name(const ElfW(Phdr) * phdr_table, size_t phdr_count,
768 ElfW(Addr) load_bias) {
769 for (size_t i = 0; i<phdr_count; ++i) {
770 const ElfW(Phdr)& phdr = phdr_table[i];
771 if (phdr.p_type == PT_INTERP) {
772 return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
773 }
774 }
775 return nullptr;
776}
777
Robert Grosse4544d9f2014-10-15 14:32:19 -0700778// Sets loaded_phdr_ to the address of the program header table as it appears
779// in the loaded segments in memory. This is in contrast with phdr_table_,
780// which is temporary and will be released before the library is relocated.
Elliott Hughes650be4e2013-03-05 18:47:58 -0800781bool ElfReader::FindPhdr() {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800782 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200783
Elliott Hughes650be4e2013-03-05 18:47:58 -0800784 // If there is a PT_PHDR, use it directly.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800785 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800786 if (phdr->p_type == PT_PHDR) {
787 return CheckPhdr(load_bias_ + phdr->p_vaddr);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200788 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800789 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200790
Elliott Hughes650be4e2013-03-05 18:47:58 -0800791 // Otherwise, check the first loadable segment. If its file offset
792 // is 0, it starts with the ELF header, and we can trivially find the
793 // loaded program header from it.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800794 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800795 if (phdr->p_type == PT_LOAD) {
796 if (phdr->p_offset == 0) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800797 ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800798 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
Elliott Hughes0266ae52014-02-10 17:46:57 -0800799 ElfW(Addr) offset = ehdr->e_phoff;
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -0800800 return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800801 }
802 break;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200803 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800804 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200805
Elliott Hughes650be4e2013-03-05 18:47:58 -0800806 DL_ERR("can't find loaded phdr for \"%s\"", name_);
807 return false;
808}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200809
Elliott Hughes650be4e2013-03-05 18:47:58 -0800810// Ensures that our program header is actually within a loadable
811// segment. This should help catch badly-formed ELF files that
812// would cause the linker to crash later when trying to access it.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800813bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
814 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
815 ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -0700816 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800817 if (phdr->p_type != PT_LOAD) {
818 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200819 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800820 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
821 ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800822 if (seg_start <= loaded && loaded_end <= seg_end) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800823 loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800824 return true;
825 }
826 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700827 DL_ERR("\"%s\" loaded phdr %p not in loadable segment", name_, reinterpret_cast<void*>(loaded));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800828 return false;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200829}