blob: acd18cd8fdfdbf2aed638dd1861953e995dc1a3a [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <errno.h>
30#include <sys/mman.h>
31
32#include "linker_phdr.h"
33
34/**
35 TECHNICAL NOTE ON ELF LOADING.
36
37 An ELF file's program header table contains one or more PT_LOAD
38 segments, which corresponds to portions of the file that need to
39 be mapped into the process' address space.
40
41 Each loadable segment has the following important properties:
42
43 p_offset -> segment file offset
44 p_filesz -> segment file size
45 p_memsz -> segment memory size (always >= p_filesz)
46 p_vaddr -> segment's virtual address
47 p_flags -> segment flags (e.g. readable, writable, executable)
48
49 We will ignore the p_paddr and p_align fields of Elf32_Phdr for now.
50
51 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
52 ranges of virtual addresses. A few rules apply:
53
54 - the virtual address ranges should not overlap.
55
56 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
57 between them should always be initialized to 0.
58
59 - ranges do not necessarily start or end at page boundaries. Two distinct
60 segments can have their start and end on the same page. In this case, the
61 page inherits the mapping flags of the latter segment.
62
63 Finally, the real load addrs of each segment is not p_vaddr. Instead the
64 loader decides where to load the first segment, then will load all others
65 relative to the first one to respect the initial range layout.
66
67 For example, consider the following list:
68
69 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
70 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
71
72 This corresponds to two segments that cover these virtual address ranges:
73
74 0x30000...0x34000
75 0x40000...0x48000
76
77 If the loader decides to load the first segment at address 0xa0000000
78 then the segments' load address ranges will be:
79
80 0xa0030000...0xa0034000
81 0xa0040000...0xa0048000
82
83 In other words, all segments must be loaded at an address that has the same
84 constant offset from their p_vaddr value. This offset is computed as the
85 difference between the first segment's load address, and its p_vaddr value.
86
87 However, in practice, segments do _not_ start at page boundaries. Since we
88 can only memory-map at page boundaries, this means that the bias is
89 computed as:
90
91 load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
92
93 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
94 possible wrap around UINT32_MAX for possible large p_vaddr values).
95
96 And that the phdr0_load_address must start at a page boundary, with
97 the segment's real content starting at:
98
99 phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
100
101 Note that ELF requires the following condition to make the mmap()-ing work:
102
103 PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
104
105 The load_bias must be added to any p_vaddr value read from the ELF file to
106 determine the corresponding memory address.
107
108 **/
109
110#define MAYBE_MAP_FLAG(x,from,to) (((x) & (from)) ? (to) : 0)
111#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
112 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
113 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
114
115/* Load the program header table from an ELF file into a read-only private
116 * anonymous mmap-ed block.
117 *
118 * Input:
119 * fd -> file descriptor
120 * phdr_offset -> file offset of phdr table
121 * phdr_num -> number of entries in the table.
122 *
123 * Output:
124 * phdr_mmap -> address of mmap block in memory.
125 * phdr_memsize -> size of mmap block in memory.
126 * phdr_table -> address of first entry in memory.
127 *
128 * Return:
129 * -1 on error, or 0 on success.
130 */
131int phdr_table_load(int fd,
132 Elf32_Addr phdr_offset,
133 Elf32_Half phdr_num,
134 void** phdr_mmap,
135 Elf32_Addr* phdr_size,
136 const Elf32_Phdr** phdr_table)
137{
138 Elf32_Addr page_min, page_max, page_offset;
139 void* mmap_result;
140
141 /* Just like the kernel, we only accept program header tables that
142 * are smaller than 64KB. */
143 if (phdr_num < 1 || phdr_num > 65536/sizeof(Elf32_Phdr)) {
144 errno = EINVAL;
145 return -1;
146 }
147
148 page_min = PAGE_START(phdr_offset);
149 page_max = PAGE_END(phdr_offset + phdr_num*sizeof(Elf32_Phdr));
150 page_offset = PAGE_OFFSET(phdr_offset);
151
152 mmap_result = mmap(NULL,
153 page_max - page_min,
154 PROT_READ,
155 MAP_PRIVATE,
156 fd,
157 page_min);
158
159 if (mmap_result == MAP_FAILED) {
160 return -1;
161 }
162
163 *phdr_mmap = mmap_result;
164 *phdr_size = page_max - page_min;
165 *phdr_table = (Elf32_Phdr*)((char*)mmap_result + page_offset);
166
167 return 0;
168}
169
170void phdr_table_unload(void* phdr_mmap, Elf32_Addr phdr_memsize)
171{
172 munmap(phdr_mmap, phdr_memsize);
173}
174
175
176/* Compute the extent of all loadable segments in an ELF program header
177 * table. This corresponds to the page-aligned size in bytes that needs to be
178 * reserved in the process' address space
179 *
180 * This returns 0 if there are no loadable segments.
181 */
182Elf32_Addr phdr_table_get_load_size(const Elf32_Phdr* phdr_table,
Elliott Hughes46882792012-08-03 16:49:39 -0700183 size_t phdr_count)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200184{
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200185 Elf32_Addr min_vaddr = 0xFFFFFFFFU;
186 Elf32_Addr max_vaddr = 0x00000000U;
187
Elliott Hughes46882792012-08-03 16:49:39 -0700188 for (size_t i = 0; i < phdr_count; ++i) {
189 const Elf32_Phdr* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200190
Elliott Hughes46882792012-08-03 16:49:39 -0700191 if (phdr->p_type != PT_LOAD) {
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200192 continue;
Elliott Hughes46882792012-08-03 16:49:39 -0700193 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200194
Elliott Hughes46882792012-08-03 16:49:39 -0700195 if (phdr->p_vaddr < min_vaddr) {
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200196 min_vaddr = phdr->p_vaddr;
Elliott Hughes46882792012-08-03 16:49:39 -0700197 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200198
Elliott Hughes46882792012-08-03 16:49:39 -0700199 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200200 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
Elliott Hughes46882792012-08-03 16:49:39 -0700201 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200202 }
203
204 if (min_vaddr > max_vaddr) {
205 return 0;
206 }
207
208 min_vaddr = PAGE_START(min_vaddr);
209 max_vaddr = PAGE_END(max_vaddr);
210
211 return max_vaddr - min_vaddr;
212}
213
214/* Reserve a virtual address range big enough to hold all loadable
215 * segments of a program header table. This is done by creating a
216 * private anonymous mmap() with PROT_NONE.
217 *
218 * Input:
219 * phdr_table -> program header table
220 * phdr_count -> number of entries in the tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200221 * Output:
222 * load_start -> first page of reserved address space range
223 * load_size -> size in bytes of reserved address space range
224 * load_bias -> load bias, as described in technical note above.
225 *
226 * Return:
227 * 0 on success, -1 otherwise. Error code in errno.
228 */
229int
230phdr_table_reserve_memory(const Elf32_Phdr* phdr_table,
Elliott Hughes46882792012-08-03 16:49:39 -0700231 size_t phdr_count,
232 void** load_start,
233 Elf32_Addr* load_size,
234 Elf32_Addr* load_bias)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200235{
236 Elf32_Addr size = phdr_table_get_load_size(phdr_table, phdr_count);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200237 if (size == 0) {
238 errno = EINVAL;
239 return -1;
240 }
241
Elliott Hughes46882792012-08-03 16:49:39 -0700242 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
243 void* start = mmap(NULL, size, PROT_NONE, mmap_flags, -1, 0);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200244 if (start == MAP_FAILED) {
245 return -1;
246 }
247
248 *load_start = start;
249 *load_size = size;
250 *load_bias = 0;
251
Elliott Hughes46882792012-08-03 16:49:39 -0700252 for (size_t i = 0; i < phdr_count; ++i) {
253 const Elf32_Phdr* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200254 if (phdr->p_type == PT_LOAD) {
255 *load_bias = (Elf32_Addr)start - PAGE_START(phdr->p_vaddr);
256 break;
257 }
258 }
259 return 0;
260}
261
262/* Map all loadable segments in process' address space.
263 * This assumes you already called phdr_table_reserve_memory to
264 * reserve the address space range for the library.
265 *
266 * Input:
267 * phdr_table -> program header table
268 * phdr_count -> number of entries in the table
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200269 * load_bias -> load offset.
270 * fd -> input file descriptor.
271 *
272 * Return:
273 * 0 on success, -1 otherwise. Error code in errno.
274 */
275int
276phdr_table_load_segments(const Elf32_Phdr* phdr_table,
277 int phdr_count,
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200278 Elf32_Addr load_bias,
279 int fd)
280{
281 int nn;
282
283 for (nn = 0; nn < phdr_count; nn++) {
284 const Elf32_Phdr* phdr = &phdr_table[nn];
285 void* seg_addr;
286
287 if (phdr->p_type != PT_LOAD)
288 continue;
289
290 /* Segment addresses in memory */
291 Elf32_Addr seg_start = phdr->p_vaddr + load_bias;
292 Elf32_Addr seg_end = seg_start + phdr->p_memsz;
293
294 Elf32_Addr seg_page_start = PAGE_START(seg_start);
295 Elf32_Addr seg_page_end = PAGE_END(seg_end);
296
297 Elf32_Addr seg_file_end = seg_start + phdr->p_filesz;
298
299 /* File offsets */
300 Elf32_Addr file_start = phdr->p_offset;
301 Elf32_Addr file_end = file_start + phdr->p_filesz;
302
303 Elf32_Addr file_page_start = PAGE_START(file_start);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200304
305 seg_addr = mmap((void*)seg_page_start,
306 file_end - file_page_start,
307 PFLAGS_TO_PROT(phdr->p_flags),
308 MAP_FIXED|MAP_PRIVATE,
309 fd,
310 file_page_start);
311
312 if (seg_addr == MAP_FAILED) {
313 return -1;
314 }
315
316 /* if the segment is writable, and does not end on a page boundary,
317 * zero-fill it until the page limit. */
318 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
319 memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
320 }
321
322 seg_file_end = PAGE_END(seg_file_end);
323
324 /* seg_file_end is now the first page address after the file
325 * content. If seg_end is larger, we need to zero anything
326 * between them. This is done by using a private anonymous
327 * map for all extra pages.
328 */
329 if (seg_page_end > seg_file_end) {
330 void* zeromap = mmap((void*)seg_file_end,
331 seg_page_end - seg_file_end,
332 PFLAGS_TO_PROT(phdr->p_flags),
333 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
334 -1,
335 0);
336 if (zeromap == MAP_FAILED) {
337 return -1;
338 }
339 }
340 }
341 return 0;
342}
343
Elliott Hughes105bc262012-08-15 16:56:00 -0700344/* Used internally. Used to set the protection bits of all loaded segments
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200345 * with optional extra flags (i.e. really PROT_WRITE). Used by
346 * phdr_table_protect_segments and phdr_table_unprotect_segments.
347 */
348static int
349_phdr_table_set_load_prot(const Elf32_Phdr* phdr_table,
350 int phdr_count,
351 Elf32_Addr load_bias,
352 int extra_prot_flags)
353{
354 const Elf32_Phdr* phdr = phdr_table;
355 const Elf32_Phdr* phdr_limit = phdr + phdr_count;
356
357 for (; phdr < phdr_limit; phdr++) {
358 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0)
359 continue;
360
361 Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
362 Elf32_Addr seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
363
364 int ret = mprotect((void*)seg_page_start,
365 seg_page_end - seg_page_start,
366 PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags);
367 if (ret < 0) {
368 return -1;
369 }
370 }
371 return 0;
372}
373
374/* Restore the original protection modes for all loadable segments.
375 * You should only call this after phdr_table_unprotect_segments and
376 * applying all relocations.
377 *
378 * Input:
379 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700380 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200381 * load_bias -> load bias
382 * Return:
383 * 0 on error, -1 on failure (error code in errno).
384 */
385int
386phdr_table_protect_segments(const Elf32_Phdr* phdr_table,
387 int phdr_count,
388 Elf32_Addr load_bias)
389{
390 return _phdr_table_set_load_prot(phdr_table, phdr_count,
391 load_bias, 0);
392}
393
394/* Change the protection of all loaded segments in memory to writable.
395 * This is useful before performing relocations. Once completed, you
396 * will have to call phdr_table_protect_segments to restore the original
397 * protection flags on all segments.
398 *
399 * Note that some writable segments can also have their content turned
400 * to read-only by calling phdr_table_protect_gnu_relro. This is no
401 * performed here.
402 *
403 * Input:
404 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700405 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200406 * load_bias -> load bias
407 * Return:
408 * 0 on error, -1 on failure (error code in errno).
409 */
410int
411phdr_table_unprotect_segments(const Elf32_Phdr* phdr_table,
412 int phdr_count,
413 Elf32_Addr load_bias)
414{
415 return _phdr_table_set_load_prot(phdr_table, phdr_count,
416 load_bias, PROT_WRITE);
417}
418
419/* Used internally by phdr_table_protect_gnu_relro and
420 * phdr_table_unprotect_gnu_relro.
421 */
422static int
423_phdr_table_set_gnu_relro_prot(const Elf32_Phdr* phdr_table,
424 int phdr_count,
425 Elf32_Addr load_bias,
426 int prot_flags)
427{
428 const Elf32_Phdr* phdr = phdr_table;
429 const Elf32_Phdr* phdr_limit = phdr + phdr_count;
430
431 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
432 if (phdr->p_type != PT_GNU_RELRO)
433 continue;
434
435 /* Tricky: what happens when the relro segment does not start
436 * or end at page boundaries?. We're going to be over-protective
437 * here and put every page touched by the segment as read-only.
438 *
439 * This seems to match Ian Lance Taylor's description of the
440 * feature at http://www.airs.com/blog/archives/189.
441 *
442 * Extract:
443 * Note that the current dynamic linker code will only work
444 * correctly if the PT_GNU_RELRO segment starts on a page
445 * boundary. This is because the dynamic linker rounds the
446 * p_vaddr field down to the previous page boundary. If
447 * there is anything on the page which should not be read-only,
448 * the program is likely to fail at runtime. So in effect the
449 * linker must only emit a PT_GNU_RELRO segment if it ensures
450 * that it starts on a page boundary.
451 */
452 Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
453 Elf32_Addr seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
454
455 int ret = mprotect((void*)seg_page_start,
456 seg_page_end - seg_page_start,
457 prot_flags);
458 if (ret < 0) {
459 return -1;
460 }
461 }
462 return 0;
463}
464
465/* Apply GNU relro protection if specified by the program header. This will
466 * turn some of the pages of a writable PT_LOAD segment to read-only, as
467 * specified by one or more PT_GNU_RELRO segments. This must be always
468 * performed after relocations.
469 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200470 * The areas typically covered are .got and .data.rel.ro, these are
471 * read-only from the program's POV, but contain absolute addresses
472 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200473 *
474 * Input:
475 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700476 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200477 * load_bias -> load bias
478 * Return:
479 * 0 on error, -1 on failure (error code in errno).
480 */
481int
482phdr_table_protect_gnu_relro(const Elf32_Phdr* phdr_table,
483 int phdr_count,
484 Elf32_Addr load_bias)
485{
486 return _phdr_table_set_gnu_relro_prot(phdr_table,
487 phdr_count,
488 load_bias,
489 PROT_READ);
490}
491
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200492#ifdef ANDROID_ARM_LINKER
493
494# ifndef PT_ARM_EXIDX
495# define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
496# endif
497
498/* Return the address and size of the .ARM.exidx section in memory,
499 * if present.
500 *
501 * Input:
502 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700503 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200504 * load_bias -> load bias
505 * Output:
506 * arm_exidx -> address of table in memory (NULL on failure).
507 * arm_exidx_count -> number of items in table (0 on failure).
508 * Return:
509 * 0 on error, -1 on failure (_no_ error code in errno)
510 */
511int
512phdr_table_get_arm_exidx(const Elf32_Phdr* phdr_table,
513 int phdr_count,
514 Elf32_Addr load_bias,
515 Elf32_Addr** arm_exidx,
516 unsigned* arm_exidx_count)
517{
518 const Elf32_Phdr* phdr = phdr_table;
519 const Elf32_Phdr* phdr_limit = phdr + phdr_count;
520
521 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
522 if (phdr->p_type != PT_ARM_EXIDX)
523 continue;
524
525 *arm_exidx = (Elf32_Addr*)(load_bias + phdr->p_vaddr);
526 *arm_exidx_count = (unsigned)(phdr->p_memsz / 8);
527 return 0;
528 }
529 *arm_exidx = NULL;
530 *arm_exidx_count = 0;
531 return -1;
532}
533#endif /* ANDROID_ARM_LINKER */
534
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200535/* Return the address and size of the ELF file's .dynamic section in memory,
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200536 * or NULL if missing.
537 *
538 * Input:
539 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700540 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200541 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200542 * Output:
543 * dynamic -> address of table in memory (NULL on failure).
544 * dynamic_count -> number of items in table (0 on failure).
Chris Dearmancf239052013-01-11 15:32:20 -0800545 * dynamic_flags -> protection flags for section (unset on failure)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200546 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200547 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200548 */
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200549void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200550phdr_table_get_dynamic_section(const Elf32_Phdr* phdr_table,
551 int phdr_count,
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200552 Elf32_Addr load_bias,
Brian Carlstromd4ee82d2013-02-28 15:58:45 -0800553 Elf32_Dyn** dynamic,
Chris Dearmancf239052013-01-11 15:32:20 -0800554 size_t* dynamic_count,
555 Elf32_Word* dynamic_flags)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200556{
557 const Elf32_Phdr* phdr = phdr_table;
558 const Elf32_Phdr* phdr_limit = phdr + phdr_count;
559
560 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200561 if (phdr->p_type != PT_DYNAMIC) {
562 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200563 }
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200564
Brian Carlstromd4ee82d2013-02-28 15:58:45 -0800565 *dynamic = reinterpret_cast<Elf32_Dyn*>(load_bias + phdr->p_vaddr);
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200566 if (dynamic_count) {
567 *dynamic_count = (unsigned)(phdr->p_memsz / 8);
568 }
Chris Dearmancf239052013-01-11 15:32:20 -0800569 if (dynamic_flags) {
570 *dynamic_flags = phdr->p_flags;
571 }
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200572 return;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200573 }
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200574 *dynamic = NULL;
575 if (dynamic_count) {
576 *dynamic_count = 0;
577 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200578}
579
580/* Return the address of the program header table as it appears in the loaded
581 * segments in memory. This is in contrast with the input 'phdr_table' which
582 * is temporary and will be released before the library is relocated.
583 *
584 * Input:
585 * phdr_table -> program header table
586 * phdr_count -> number of entries in tables
587 * load_bias -> load bias
588 * Return:
589 * Address of loaded program header table on success (it has
590 * 'phdr_count' entries), or NULL on failure (no error code).
591 */
592const Elf32_Phdr*
593phdr_table_get_loaded_phdr(const Elf32_Phdr* phdr_table,
594 int phdr_count,
595 Elf32_Addr load_bias)
596{
597 const Elf32_Phdr* phdr = phdr_table;
598 const Elf32_Phdr* phdr_limit = phdr + phdr_count;
599 Elf32_Addr loaded = 0;
600 Elf32_Addr loaded_end;
601
602 /* If there is a PT_PHDR, use it directly */
603 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
604 if (phdr->p_type == PT_PHDR) {
605 loaded = load_bias + phdr->p_vaddr;
606 goto CHECK;
607 }
608 }
609
610 /* Otherwise, check the first loadable segment. If its file offset
611 * is 0, it starts with the ELF header, and we can trivially find the
612 * loaded program header from it. */
613 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
614 if (phdr->p_type == PT_LOAD) {
615 if (phdr->p_offset == 0) {
616 Elf32_Addr elf_addr = load_bias + phdr->p_vaddr;
617 const Elf32_Ehdr* ehdr = (const Elf32_Ehdr*)(void*)elf_addr;
618 Elf32_Addr offset = ehdr->e_phoff;
619 loaded = (Elf32_Addr)ehdr + offset;
620 goto CHECK;
621 }
622 break;
623 }
624 }
625
626 /* We didn't find it, let the client know. He may be able to
627 * keep a copy of the input phdr_table instead. */
628 return NULL;
629
630CHECK:
631 /* Ensure that our program header is actually within a loadable
632 * segment. This should help catch badly-formed ELF files that
633 * would cause the linker to crash later when trying to access it.
634 */
635 loaded_end = loaded + phdr_count*sizeof(Elf32_Phdr);
636
637 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
638 if (phdr->p_type != PT_LOAD)
639 continue;
640 Elf32_Addr seg_start = phdr->p_vaddr + load_bias;
641 Elf32_Addr seg_end = phdr->p_filesz + seg_start;
642
643 if (seg_start <= loaded && loaded_end <= seg_end) {
644 return (const Elf32_Phdr*)loaded;
645 }
646 }
647 return NULL;
648}