blob: 3ffccecd7c4bc4d87a34194f7416b04b141524ee [file] [log] [blame]
Dmitriy Ivanov87a06172015-02-06 10:56:28 -08001// Copyright 2014 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// Implementation notes:
6//
7// We need to remove a piece from the ELF shared library. However, we also
8// want to ensure that code and data loads at the same addresses as before
9// packing, so that tools like breakpad can still match up addresses found
10// in any crash dumps with data extracted from the pre-packed version of
11// the shared library.
12//
13// Arranging this means that we have to split one of the LOAD segments into
14// two. Unfortunately, the program headers are located at the very start
15// of the shared library file, so expanding the program header section
16// would cause a lot of consequent changes to files offsets that we don't
17// really want to have to handle.
18//
19// Luckily, though, there is a segment that is always present and always
20// unused on Android; the GNU_STACK segment. What we do is to steal that
21// and repurpose it to be one of the split LOAD segments. We then have to
22// sort LOAD segments by offset to keep the crazy linker happy.
23//
24// All of this takes place in SplitProgramHeadersForHole(), used on packing,
25// and is unraveled on unpacking in CoalesceProgramHeadersForHole(). See
26// commentary on those functions for an example of this segment stealing
27// in action.
28
29#include "elf_file.h"
30
31#include <stdlib.h>
32#include <sys/types.h>
33#include <unistd.h>
34#include <algorithm>
35#include <string>
36#include <vector>
37
38#include "debug.h"
39#include "elf_traits.h"
40#include "libelf.h"
41#include "packer.h"
42
43namespace relocation_packer {
44
45// Stub identifier written to 'null out' packed data, "NULL".
46static const uint32_t kStubIdentifier = 0x4c4c554eu;
47
48// Out-of-band dynamic tags used to indicate the offset and size of the
49// android packed relocations section.
50static const ELF::Sword DT_ANDROID_REL_OFFSET = DT_LOOS;
51static const ELF::Sword DT_ANDROID_REL_SIZE = DT_LOOS + 1;
52
53// Alignment to preserve, in bytes. This must be at least as large as the
54// largest d_align and sh_addralign values found in the loaded file.
55// Out of caution for RELRO page alignment, we preserve to a complete target
56// page. See http://www.airs.com/blog/archives/189.
57static const size_t kPreserveAlignment = 4096;
58
59namespace {
60
61// Get section data. Checks that the section has exactly one data entry,
62// so that the section size and the data size are the same. True in
63// practice for all sections we resize when packing or unpacking. Done
64// by ensuring that a call to elf_getdata(section, data) returns NULL as
65// the next data entry.
66Elf_Data* GetSectionData(Elf_Scn* section) {
67 Elf_Data* data = elf_getdata(section, NULL);
68 CHECK(data && elf_getdata(section, data) == NULL);
69 return data;
70}
71
72// Rewrite section data. Allocates new data and makes it the data element's
73// buffer. Relies on program exit to free allocated data.
74void RewriteSectionData(Elf_Scn* section,
75 const void* section_data,
76 size_t size) {
77 Elf_Data* data = GetSectionData(section);
78 CHECK(size == data->d_size);
79 uint8_t* area = new uint8_t[size];
80 memcpy(area, section_data, size);
81 data->d_buf = area;
82}
83
84// Verbose ELF header logging.
85void VerboseLogElfHeader(const ELF::Ehdr* elf_header) {
86 VLOG(1) << "e_phoff = " << elf_header->e_phoff;
87 VLOG(1) << "e_shoff = " << elf_header->e_shoff;
88 VLOG(1) << "e_ehsize = " << elf_header->e_ehsize;
89 VLOG(1) << "e_phentsize = " << elf_header->e_phentsize;
90 VLOG(1) << "e_phnum = " << elf_header->e_phnum;
91 VLOG(1) << "e_shnum = " << elf_header->e_shnum;
92 VLOG(1) << "e_shstrndx = " << elf_header->e_shstrndx;
93}
94
95// Verbose ELF program header logging.
96void VerboseLogProgramHeader(size_t program_header_index,
97 const ELF::Phdr* program_header) {
98 std::string type;
99 switch (program_header->p_type) {
100 case PT_NULL: type = "NULL"; break;
101 case PT_LOAD: type = "LOAD"; break;
102 case PT_DYNAMIC: type = "DYNAMIC"; break;
103 case PT_INTERP: type = "INTERP"; break;
104 case PT_PHDR: type = "PHDR"; break;
105 case PT_GNU_RELRO: type = "GNU_RELRO"; break;
106 case PT_GNU_STACK: type = "GNU_STACK"; break;
107 case PT_ARM_EXIDX: type = "EXIDX"; break;
108 default: type = "(OTHER)"; break;
109 }
110 VLOG(1) << "phdr[" << program_header_index << "] : " << type;
111 VLOG(1) << " p_offset = " << program_header->p_offset;
112 VLOG(1) << " p_vaddr = " << program_header->p_vaddr;
113 VLOG(1) << " p_paddr = " << program_header->p_paddr;
114 VLOG(1) << " p_filesz = " << program_header->p_filesz;
115 VLOG(1) << " p_memsz = " << program_header->p_memsz;
116 VLOG(1) << " p_flags = " << program_header->p_flags;
117 VLOG(1) << " p_align = " << program_header->p_align;
118}
119
120// Verbose ELF section header logging.
121void VerboseLogSectionHeader(const std::string& section_name,
122 const ELF::Shdr* section_header) {
123 VLOG(1) << "section " << section_name;
124 VLOG(1) << " sh_addr = " << section_header->sh_addr;
125 VLOG(1) << " sh_offset = " << section_header->sh_offset;
126 VLOG(1) << " sh_size = " << section_header->sh_size;
127 VLOG(1) << " sh_addralign = " << section_header->sh_addralign;
128}
129
130// Verbose ELF section data logging.
131void VerboseLogSectionData(const Elf_Data* data) {
132 VLOG(1) << " data";
133 VLOG(1) << " d_buf = " << data->d_buf;
134 VLOG(1) << " d_off = " << data->d_off;
135 VLOG(1) << " d_size = " << data->d_size;
136 VLOG(1) << " d_align = " << data->d_align;
137}
138
139} // namespace
140
141// Load the complete ELF file into a memory image in libelf, and identify
142// the .rel.dyn or .rela.dyn, .dynamic, and .android.rel.dyn or
143// .android.rela.dyn sections. No-op if the ELF file has already been loaded.
144bool ElfFile::Load() {
145 if (elf_)
146 return true;
147
148 Elf* elf = elf_begin(fd_, ELF_C_RDWR, NULL);
149 CHECK(elf);
150
151 if (elf_kind(elf) != ELF_K_ELF) {
152 LOG(ERROR) << "File not in ELF format";
153 return false;
154 }
155
156 ELF::Ehdr* elf_header = ELF::getehdr(elf);
157 if (!elf_header) {
158 LOG(ERROR) << "Failed to load ELF header: " << elf_errmsg(elf_errno());
159 return false;
160 }
161 if (elf_header->e_machine != ELF::kMachine) {
162 LOG(ERROR) << "ELF file architecture is not " << ELF::Machine();
163 return false;
164 }
165 if (elf_header->e_type != ET_DYN) {
166 LOG(ERROR) << "ELF file is not a shared object";
167 return false;
168 }
169
170 // Require that our endianness matches that of the target, and that both
171 // are little-endian. Safe for all current build/target combinations.
172 const int endian = elf_header->e_ident[EI_DATA];
173 CHECK(endian == ELFDATA2LSB);
174 CHECK(__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__);
175
176 // Also require that the file class is as expected.
177 const int file_class = elf_header->e_ident[EI_CLASS];
178 CHECK(file_class == ELF::kFileClass);
179
180 VLOG(1) << "endian = " << endian << ", file class = " << file_class;
181 VerboseLogElfHeader(elf_header);
182
183 const ELF::Phdr* elf_program_header = ELF::getphdr(elf);
184 CHECK(elf_program_header);
185
186 const ELF::Phdr* dynamic_program_header = NULL;
187 for (size_t i = 0; i < elf_header->e_phnum; ++i) {
188 const ELF::Phdr* program_header = &elf_program_header[i];
189 VerboseLogProgramHeader(i, program_header);
190
191 if (program_header->p_type == PT_DYNAMIC) {
192 CHECK(dynamic_program_header == NULL);
193 dynamic_program_header = program_header;
194 }
195 }
196 CHECK(dynamic_program_header != NULL);
197
198 size_t string_index;
199 elf_getshdrstrndx(elf, &string_index);
200
201 // Notes of the dynamic relocations, packed relocations, and .dynamic
202 // sections. Found while iterating sections, and later stored in class
203 // attributes.
204 Elf_Scn* found_relocations_section = NULL;
205 Elf_Scn* found_android_relocations_section = NULL;
206 Elf_Scn* found_dynamic_section = NULL;
207
208 // Notes of relocation section types seen. We require one or the other of
209 // these; both is unsupported.
210 bool has_rel_relocations = false;
211 bool has_rela_relocations = false;
212
213 Elf_Scn* section = NULL;
214 while ((section = elf_nextscn(elf, section)) != NULL) {
215 const ELF::Shdr* section_header = ELF::getshdr(section);
216 std::string name = elf_strptr(elf, string_index, section_header->sh_name);
217 VerboseLogSectionHeader(name, section_header);
218
219 // Note relocation section types.
220 if (section_header->sh_type == SHT_REL) {
221 has_rel_relocations = true;
222 }
223 if (section_header->sh_type == SHT_RELA) {
224 has_rela_relocations = true;
225 }
226
227 // Note special sections as we encounter them.
228 if ((name == ".rel.dyn" || name == ".rela.dyn") &&
229 section_header->sh_size > 0) {
230 found_relocations_section = section;
231 }
232 if ((name == ".android.rel.dyn" || name == ".android.rela.dyn") &&
233 section_header->sh_size > 0) {
234 found_android_relocations_section = section;
235 }
236 if (section_header->sh_offset == dynamic_program_header->p_offset) {
237 found_dynamic_section = section;
238 }
239
240 // Ensure we preserve alignment, repeated later for the data block(s).
241 CHECK(section_header->sh_addralign <= kPreserveAlignment);
242
243 Elf_Data* data = NULL;
244 while ((data = elf_getdata(section, data)) != NULL) {
245 CHECK(data->d_align <= kPreserveAlignment);
246 VerboseLogSectionData(data);
247 }
248 }
249
250 // Loading failed if we did not find the required special sections.
251 if (!found_relocations_section) {
252 LOG(ERROR) << "Missing or empty .rel.dyn or .rela.dyn section";
253 return false;
254 }
255 if (!found_android_relocations_section) {
256 LOG(ERROR) << "Missing or empty .android.rel.dyn or .android.rela.dyn "
257 << "section (to fix, run with --help and follow the "
258 << "pre-packing instructions)";
259 return false;
260 }
261 if (!found_dynamic_section) {
262 LOG(ERROR) << "Missing .dynamic section";
263 return false;
264 }
265
266 // Loading failed if we could not identify the relocations type.
267 if (!has_rel_relocations && !has_rela_relocations) {
268 LOG(ERROR) << "No relocations sections found";
269 return false;
270 }
271 if (has_rel_relocations && has_rela_relocations) {
272 LOG(ERROR) << "Multiple relocations sections with different types found, "
273 << "not currently supported";
274 return false;
275 }
276
277 elf_ = elf;
278 relocations_section_ = found_relocations_section;
279 dynamic_section_ = found_dynamic_section;
280 android_relocations_section_ = found_android_relocations_section;
281 relocations_type_ = has_rel_relocations ? REL : RELA;
282 return true;
283}
284
285namespace {
286
287// Helper for ResizeSection(). Adjust the main ELF header for the hole.
288void AdjustElfHeaderForHole(ELF::Ehdr* elf_header,
289 ELF::Off hole_start,
290 ssize_t hole_size) {
291 if (elf_header->e_phoff > hole_start) {
292 elf_header->e_phoff += hole_size;
293 VLOG(1) << "e_phoff adjusted to " << elf_header->e_phoff;
294 }
295 if (elf_header->e_shoff > hole_start) {
296 elf_header->e_shoff += hole_size;
297 VLOG(1) << "e_shoff adjusted to " << elf_header->e_shoff;
298 }
299}
300
301// Helper for ResizeSection(). Adjust all section headers for the hole.
302void AdjustSectionHeadersForHole(Elf* elf,
303 ELF::Off hole_start,
304 ssize_t hole_size) {
305 size_t string_index;
306 elf_getshdrstrndx(elf, &string_index);
307
308 Elf_Scn* section = NULL;
309 while ((section = elf_nextscn(elf, section)) != NULL) {
310 ELF::Shdr* section_header = ELF::getshdr(section);
311 std::string name = elf_strptr(elf, string_index, section_header->sh_name);
312
313 if (section_header->sh_offset > hole_start) {
314 section_header->sh_offset += hole_size;
315 VLOG(1) << "section " << name
316 << " sh_offset adjusted to " << section_header->sh_offset;
317 }
318 }
319}
320
321// Helper for ResizeSection(). Adjust the offsets of any program headers
322// that have offsets currently beyond the hole start.
323void AdjustProgramHeaderOffsets(ELF::Phdr* program_headers,
324 size_t count,
325 ELF::Phdr* ignored_1,
326 ELF::Phdr* ignored_2,
327 ELF::Off hole_start,
328 ssize_t hole_size) {
329 for (size_t i = 0; i < count; ++i) {
330 ELF::Phdr* program_header = &program_headers[i];
331
332 if (program_header == ignored_1 || program_header == ignored_2)
333 continue;
334
335 if (program_header->p_offset > hole_start) {
336 // The hole start is past this segment, so adjust offset.
337 program_header->p_offset += hole_size;
338 VLOG(1) << "phdr[" << i
339 << "] p_offset adjusted to "<< program_header->p_offset;
340 }
341 }
342}
343
344// Helper for ResizeSection(). Find the first loadable segment in the
345// file. We expect it to map from file offset zero.
346ELF::Phdr* FindFirstLoadSegment(ELF::Phdr* program_headers,
347 size_t count) {
348 ELF::Phdr* first_loadable_segment = NULL;
349
350 for (size_t i = 0; i < count; ++i) {
351 ELF::Phdr* program_header = &program_headers[i];
352
353 if (program_header->p_type == PT_LOAD &&
354 program_header->p_offset == 0 &&
355 program_header->p_vaddr == 0 &&
356 program_header->p_paddr == 0) {
357 first_loadable_segment = program_header;
358 }
359 }
360 LOG_IF(FATAL, !first_loadable_segment)
361 << "Cannot locate a LOAD segment with address and offset zero";
362
363 return first_loadable_segment;
364}
365
366// Helper for ResizeSection(). Find the PT_GNU_STACK segment, and check
367// that it contains what we expect so we can restore it on unpack if needed.
368ELF::Phdr* FindUnusedGnuStackSegment(ELF::Phdr* program_headers,
369 size_t count) {
370 ELF::Phdr* unused_segment = NULL;
371
372 for (size_t i = 0; i < count; ++i) {
373 ELF::Phdr* program_header = &program_headers[i];
374
375 if (program_header->p_type == PT_GNU_STACK &&
376 program_header->p_offset == 0 &&
377 program_header->p_vaddr == 0 &&
378 program_header->p_paddr == 0 &&
379 program_header->p_filesz == 0 &&
380 program_header->p_memsz == 0 &&
381 program_header->p_flags == (PF_R | PF_W) &&
382 program_header->p_align == ELF::kGnuStackSegmentAlignment) {
383 unused_segment = program_header;
384 }
385 }
386 LOG_IF(FATAL, !unused_segment)
387 << "Cannot locate the expected GNU_STACK segment";
388
389 return unused_segment;
390}
391
392// Helper for ResizeSection(). Find the segment that was the first loadable
393// one before we split it into two. This is the one into which we coalesce
394// the split segments on unpacking.
395ELF::Phdr* FindOriginalFirstLoadSegment(ELF::Phdr* program_headers,
396 size_t count) {
397 const ELF::Phdr* first_loadable_segment =
398 FindFirstLoadSegment(program_headers, count);
399
400 ELF::Phdr* original_first_loadable_segment = NULL;
401
402 for (size_t i = 0; i < count; ++i) {
403 ELF::Phdr* program_header = &program_headers[i];
404
405 // The original first loadable segment is the one that follows on from
406 // the one we wrote on split to be the current first loadable segment.
407 if (program_header->p_type == PT_LOAD &&
408 program_header->p_offset == first_loadable_segment->p_filesz) {
409 original_first_loadable_segment = program_header;
410 }
411 }
412 LOG_IF(FATAL, !original_first_loadable_segment)
413 << "Cannot locate the LOAD segment that follows a LOAD at offset zero";
414
415 return original_first_loadable_segment;
416}
417
418// Helper for ResizeSection(). Find the segment that contains the hole.
419Elf_Scn* FindSectionContainingHole(Elf* elf,
420 ELF::Off hole_start,
421 ssize_t hole_size) {
422 Elf_Scn* section = NULL;
423 Elf_Scn* last_unholed_section = NULL;
424
425 while ((section = elf_nextscn(elf, section)) != NULL) {
426 const ELF::Shdr* section_header = ELF::getshdr(section);
427
428 // Because we get here after section headers have been adjusted for the
429 // hole, we need to 'undo' that adjustment to give a view of the original
430 // sections layout.
431 ELF::Off offset = section_header->sh_offset;
432 if (section_header->sh_offset >= hole_start) {
433 offset -= hole_size;
434 }
435
436 if (offset <= hole_start) {
437 last_unholed_section = section;
438 }
439 }
440 LOG_IF(FATAL, !last_unholed_section)
441 << "Cannot identify the section before the one containing the hole";
442
443 // The section containing the hole is the one after the last one found
444 // by the loop above.
445 Elf_Scn* holed_section = elf_nextscn(elf, last_unholed_section);
446 LOG_IF(FATAL, !holed_section)
447 << "Cannot identify the section containing the hole";
448
449 return holed_section;
450}
451
452// Helper for ResizeSection(). Find the last section contained in a segment.
453Elf_Scn* FindLastSectionInSegment(Elf* elf,
454 ELF::Phdr* program_header,
455 ELF::Off hole_start,
456 ssize_t hole_size) {
457 const ELF::Off segment_end =
458 program_header->p_offset + program_header->p_filesz;
459
460 Elf_Scn* section = NULL;
461 Elf_Scn* last_section = NULL;
462
463 while ((section = elf_nextscn(elf, section)) != NULL) {
464 const ELF::Shdr* section_header = ELF::getshdr(section);
465
466 // As above, 'undo' any section offset adjustment to give a view of the
467 // original sections layout.
468 ELF::Off offset = section_header->sh_offset;
469 if (section_header->sh_offset >= hole_start) {
470 offset -= hole_size;
471 }
472
473 if (offset < segment_end) {
474 last_section = section;
475 }
476 }
477 LOG_IF(FATAL, !last_section)
478 << "Cannot identify the last section in the given segment";
479
480 return last_section;
481}
482
483// Helper for ResizeSection(). Order loadable segments by their offsets.
484// The crazy linker contains assumptions about loadable segment ordering,
485// and it is better if we do not break them.
486void SortOrderSensitiveProgramHeaders(ELF::Phdr* program_headers,
487 size_t count) {
488 std::vector<ELF::Phdr*> orderable;
489
490 // Collect together orderable program headers. These are all the LOAD
491 // segments, and any GNU_STACK that may be present (removed on packing,
492 // but replaced on unpacking).
493 for (size_t i = 0; i < count; ++i) {
494 ELF::Phdr* program_header = &program_headers[i];
495
496 if (program_header->p_type == PT_LOAD ||
497 program_header->p_type == PT_GNU_STACK) {
498 orderable.push_back(program_header);
499 }
500 }
501
502 // Order these program headers so that any PT_GNU_STACK is last, and
503 // the LOAD segments that precede it appear in offset order. Uses
504 // insertion sort.
505 for (size_t i = 1; i < orderable.size(); ++i) {
506 for (size_t j = i; j > 0; --j) {
507 ELF::Phdr* first = orderable[j - 1];
508 ELF::Phdr* second = orderable[j];
509
510 if (!(first->p_type == PT_GNU_STACK ||
511 first->p_offset > second->p_offset)) {
512 break;
513 }
514 std::swap(*first, *second);
515 }
516 }
517}
518
519// Helper for ResizeSection(). The GNU_STACK program header is unused in
520// Android, so we can repurpose it here. Before packing, the program header
521// table contains something like:
522//
523// Type Offset VirtAddr PhysAddr FileSiz MemSiz Flg Align
524// LOAD 0x000000 0x00000000 0x00000000 0x1efc818 0x1efc818 R E 0x1000
525// LOAD 0x1efd008 0x01efe008 0x01efe008 0x17ec3c 0x1a0324 RW 0x1000
526// DYNAMIC 0x205ec50 0x0205fc50 0x0205fc50 0x00108 0x00108 RW 0x4
527// GNU_STACK 0x000000 0x00000000 0x00000000 0x00000 0x00000 RW 0
528//
529// The hole in the file is in the first of these. In order to preserve all
530// load addresses, what we do is to turn the GNU_STACK into a new LOAD entry
531// that maps segments up to where we created the hole, adjust the first LOAD
532// entry so that it maps segments after that, adjust any other program
533// headers whose offset is after the hole start, and finally order the LOAD
534// segments by offset, to give:
535//
536// Type Offset VirtAddr PhysAddr FileSiz MemSiz Flg Align
537// LOAD 0x000000 0x00000000 0x00000000 0x14ea4 0x14ea4 R E 0x1000
538// LOAD 0x014ea4 0x00212ea4 0x00212ea4 0x1cea164 0x1cea164 R E 0x1000
539// DYNAMIC 0x1e60c50 0x0205fc50 0x0205fc50 0x00108 0x00108 RW 0x4
540// LOAD 0x1cff008 0x01efe008 0x01efe008 0x17ec3c 0x1a0324 RW 0x1000
541//
542// We work out the split points by finding the .rel.dyn or .rela.dyn section
543// that contains the hole, and by finding the last section in a given segment.
544//
545// To unpack, we reverse the above to leave the file as it was originally.
546void SplitProgramHeadersForHole(Elf* elf,
547 ELF::Off hole_start,
548 ssize_t hole_size) {
549 CHECK(hole_size < 0);
550 const ELF::Ehdr* elf_header = ELF::getehdr(elf);
551 CHECK(elf_header);
552
553 ELF::Phdr* elf_program_header = ELF::getphdr(elf);
554 CHECK(elf_program_header);
555
556 const size_t program_header_count = elf_header->e_phnum;
557
558 // Locate the segment that we can overwrite to form the new LOAD entry,
559 // and the segment that we are going to split into two parts.
560 ELF::Phdr* spliced_header =
561 FindUnusedGnuStackSegment(elf_program_header, program_header_count);
562 ELF::Phdr* split_header =
563 FindFirstLoadSegment(elf_program_header, program_header_count);
564
565 VLOG(1) << "phdr[" << split_header - elf_program_header << "] split";
566 VLOG(1) << "phdr[" << spliced_header - elf_program_header << "] new LOAD";
567
568 // Find the section that contains the hole. We split on the section that
569 // follows it.
570 Elf_Scn* holed_section =
571 FindSectionContainingHole(elf, hole_start, hole_size);
572
573 size_t string_index;
574 elf_getshdrstrndx(elf, &string_index);
575
576 ELF::Shdr* section_header = ELF::getshdr(holed_section);
577 std::string name = elf_strptr(elf, string_index, section_header->sh_name);
578 VLOG(1) << "section " << name << " split after";
579
580 // Find the last section in the segment we are splitting.
581 Elf_Scn* last_section =
582 FindLastSectionInSegment(elf, split_header, hole_start, hole_size);
583
584 section_header = ELF::getshdr(last_section);
585 name = elf_strptr(elf, string_index, section_header->sh_name);
586 VLOG(1) << "section " << name << " split end";
587
588 // Split on the section following the holed one, and up to (but not
589 // including) the section following the last one in the split segment.
590 Elf_Scn* split_section = elf_nextscn(elf, holed_section);
591 LOG_IF(FATAL, !split_section)
592 << "No section follows the section that contains the hole";
593 Elf_Scn* end_section = elf_nextscn(elf, last_section);
594 LOG_IF(FATAL, !end_section)
595 << "No section follows the last section in the segment being split";
596
597 // Split the first portion of split_header into spliced_header.
598 const ELF::Shdr* split_section_header = ELF::getshdr(split_section);
599 spliced_header->p_type = split_header->p_type;
600 spliced_header->p_offset = split_header->p_offset;
601 spliced_header->p_vaddr = split_header->p_vaddr;
602 spliced_header->p_paddr = split_header->p_paddr;
603 CHECK(split_header->p_filesz == split_header->p_memsz);
604 spliced_header->p_filesz = split_section_header->sh_offset;
605 spliced_header->p_memsz = split_section_header->sh_offset;
606 spliced_header->p_flags = split_header->p_flags;
607 spliced_header->p_align = split_header->p_align;
608
609 // Now rewrite split_header to remove the part we spliced from it.
610 const ELF::Shdr* end_section_header = ELF::getshdr(end_section);
611 split_header->p_offset = spliced_header->p_filesz;
612 CHECK(split_header->p_vaddr == split_header->p_paddr);
613 split_header->p_vaddr = split_section_header->sh_addr;
614 split_header->p_paddr = split_section_header->sh_addr;
615 CHECK(split_header->p_filesz == split_header->p_memsz);
616 split_header->p_filesz =
617 end_section_header->sh_offset - spliced_header->p_filesz;
618 split_header->p_memsz =
619 end_section_header->sh_offset - spliced_header->p_filesz;
620
621 // Adjust the offsets of all program headers that are not one of the pair
622 // we just created by splitting.
623 AdjustProgramHeaderOffsets(elf_program_header,
624 program_header_count,
625 spliced_header,
626 split_header,
627 hole_start,
628 hole_size);
629
630 // Finally, order loadable segments by offset/address. The crazy linker
631 // contains assumptions about loadable segment ordering.
632 SortOrderSensitiveProgramHeaders(elf_program_header,
633 program_header_count);
634}
635
636// Helper for ResizeSection(). Undo the work of SplitProgramHeadersForHole().
637void CoalesceProgramHeadersForHole(Elf* elf,
638 ELF::Off hole_start,
639 ssize_t hole_size) {
640 CHECK(hole_size > 0);
641 const ELF::Ehdr* elf_header = ELF::getehdr(elf);
642 CHECK(elf_header);
643
644 ELF::Phdr* elf_program_header = ELF::getphdr(elf);
645 CHECK(elf_program_header);
646
647 const size_t program_header_count = elf_header->e_phnum;
648
649 // Locate the segment that we overwrote to form the new LOAD entry, and
650 // the segment that we split into two parts on packing.
651 ELF::Phdr* spliced_header =
652 FindFirstLoadSegment(elf_program_header, program_header_count);
653 ELF::Phdr* split_header =
654 FindOriginalFirstLoadSegment(elf_program_header, program_header_count);
655
656 VLOG(1) << "phdr[" << spliced_header - elf_program_header << "] stack";
657 VLOG(1) << "phdr[" << split_header - elf_program_header << "] coalesce";
658
659 // Find the last section in the second segment we are coalescing.
660 Elf_Scn* last_section =
661 FindLastSectionInSegment(elf, split_header, hole_start, hole_size);
662
663 size_t string_index;
664 elf_getshdrstrndx(elf, &string_index);
665
666 const ELF::Shdr* section_header = ELF::getshdr(last_section);
667 std::string name = elf_strptr(elf, string_index, section_header->sh_name);
668 VLOG(1) << "section " << name << " coalesced";
669
670 // Rewrite the coalesced segment into split_header.
671 const ELF::Shdr* last_section_header = ELF::getshdr(last_section);
672 split_header->p_offset = spliced_header->p_offset;
673 CHECK(split_header->p_vaddr == split_header->p_paddr);
674 split_header->p_vaddr = spliced_header->p_vaddr;
675 split_header->p_paddr = spliced_header->p_vaddr;
676 CHECK(split_header->p_filesz == split_header->p_memsz);
677 split_header->p_filesz =
678 last_section_header->sh_offset + last_section_header->sh_size;
679 split_header->p_memsz =
680 last_section_header->sh_offset + last_section_header->sh_size;
681
682 // Reconstruct the original GNU_STACK segment into spliced_header.
683 spliced_header->p_type = PT_GNU_STACK;
684 spliced_header->p_offset = 0;
685 spliced_header->p_vaddr = 0;
686 spliced_header->p_paddr = 0;
687 spliced_header->p_filesz = 0;
688 spliced_header->p_memsz = 0;
689 spliced_header->p_flags = PF_R | PF_W;
690 spliced_header->p_align = ELF::kGnuStackSegmentAlignment;
691
692 // Adjust the offsets of all program headers that are not one of the pair
693 // we just coalesced.
694 AdjustProgramHeaderOffsets(elf_program_header,
695 program_header_count,
696 spliced_header,
697 split_header,
698 hole_start,
699 hole_size);
700
701 // Finally, order loadable segments by offset/address. The crazy linker
702 // contains assumptions about loadable segment ordering.
703 SortOrderSensitiveProgramHeaders(elf_program_header,
704 program_header_count);
705}
706
707// Helper for ResizeSection(). Rewrite program headers.
708void RewriteProgramHeadersForHole(Elf* elf,
709 ELF::Off hole_start,
710 ssize_t hole_size) {
711 // If hole_size is negative then we are removing a piece of the file, and
712 // we want to split program headers so that we keep the same addresses
713 // for text and data. If positive, then we are putting that piece of the
714 // file back in, so we coalesce the previously split program headers.
715 if (hole_size < 0)
716 SplitProgramHeadersForHole(elf, hole_start, hole_size);
717 else if (hole_size > 0)
718 CoalesceProgramHeadersForHole(elf, hole_start, hole_size);
719}
720
721// Helper for ResizeSection(). Locate and return the dynamic section.
722Elf_Scn* GetDynamicSection(Elf* elf) {
723 const ELF::Ehdr* elf_header = ELF::getehdr(elf);
724 CHECK(elf_header);
725
726 const ELF::Phdr* elf_program_header = ELF::getphdr(elf);
727 CHECK(elf_program_header);
728
729 // Find the program header that describes the dynamic section.
730 const ELF::Phdr* dynamic_program_header = NULL;
731 for (size_t i = 0; i < elf_header->e_phnum; ++i) {
732 const ELF::Phdr* program_header = &elf_program_header[i];
733
734 if (program_header->p_type == PT_DYNAMIC) {
735 dynamic_program_header = program_header;
736 }
737 }
738 CHECK(dynamic_program_header);
739
740 // Now find the section with the same offset as this program header.
741 Elf_Scn* dynamic_section = NULL;
742 Elf_Scn* section = NULL;
743 while ((section = elf_nextscn(elf, section)) != NULL) {
744 ELF::Shdr* section_header = ELF::getshdr(section);
745
746 if (section_header->sh_offset == dynamic_program_header->p_offset) {
747 dynamic_section = section;
748 }
749 }
750 CHECK(dynamic_section != NULL);
751
752 return dynamic_section;
753}
754
755// Helper for ResizeSection(). Adjust the .dynamic section for the hole.
756template <typename Rel>
757void AdjustDynamicSectionForHole(Elf_Scn* dynamic_section,
758 ELF::Off hole_start,
759 ssize_t hole_size) {
760 Elf_Data* data = GetSectionData(dynamic_section);
761
762 const ELF::Dyn* dynamic_base = reinterpret_cast<ELF::Dyn*>(data->d_buf);
763 std::vector<ELF::Dyn> dynamics(
764 dynamic_base,
765 dynamic_base + data->d_size / sizeof(dynamics[0]));
766
767 for (size_t i = 0; i < dynamics.size(); ++i) {
768 ELF::Dyn* dynamic = &dynamics[i];
769 const ELF::Sword tag = dynamic->d_tag;
770
771 // DT_RELSZ or DT_RELASZ indicate the overall size of relocations.
772 // Only one will be present. Adjust by hole size.
773 if (tag == DT_RELSZ || tag == DT_RELASZ) {
774 dynamic->d_un.d_val += hole_size;
775 VLOG(1) << "dynamic[" << i << "] " << dynamic->d_tag
776 << " d_val adjusted to " << dynamic->d_un.d_val;
777 }
778
779 // DT_RELCOUNT or DT_RELACOUNT hold the count of relative relocations.
780 // Only one will be present. Packing reduces it to the alignment
781 // padding, if any; unpacking restores it to its former value. The
782 // crazy linker does not use it, but we update it anyway.
783 if (tag == DT_RELCOUNT || tag == DT_RELACOUNT) {
784 // Cast sizeof to a signed type to avoid the division result being
785 // promoted into an unsigned size_t.
786 const ssize_t sizeof_rel = static_cast<ssize_t>(sizeof(Rel));
787 dynamic->d_un.d_val += hole_size / sizeof_rel;
788 VLOG(1) << "dynamic[" << i << "] " << dynamic->d_tag
789 << " d_val adjusted to " << dynamic->d_un.d_val;
790 }
791
792 // DT_RELENT and DT_RELAENT do not change, but make sure they are what
793 // we expect. Only one will be present.
794 if (tag == DT_RELENT || tag == DT_RELAENT) {
795 CHECK(dynamic->d_un.d_val == sizeof(Rel));
796 }
797 }
798
799 void* section_data = &dynamics[0];
800 size_t bytes = dynamics.size() * sizeof(dynamics[0]);
801 RewriteSectionData(dynamic_section, section_data, bytes);
802}
803
804// Resize a section. If the new size is larger than the current size, open
805// up a hole by increasing file offsets that come after the hole. If smaller
806// than the current size, remove the hole by decreasing those offsets.
807template <typename Rel>
808void ResizeSection(Elf* elf, Elf_Scn* section, size_t new_size) {
809 ELF::Shdr* section_header = ELF::getshdr(section);
810 if (section_header->sh_size == new_size)
811 return;
812
813 // Note if we are resizing the real dyn relocations.
814 size_t string_index;
815 elf_getshdrstrndx(elf, &string_index);
816 const std::string section_name =
817 elf_strptr(elf, string_index, section_header->sh_name);
818 const bool is_relocations_resize =
819 (section_name == ".rel.dyn" || section_name == ".rela.dyn");
820
821 // Require that the section size and the data size are the same. True
822 // in practice for all sections we resize when packing or unpacking.
823 Elf_Data* data = GetSectionData(section);
824 CHECK(data->d_off == 0 && data->d_size == section_header->sh_size);
825
826 // Require that the section is not zero-length (that is, has allocated
827 // data that we can validly expand).
828 CHECK(data->d_size && data->d_buf);
829
830 const ELF::Off hole_start = section_header->sh_offset;
831 const ssize_t hole_size = new_size - data->d_size;
832
833 VLOG_IF(1, (hole_size > 0)) << "expand section size = " << data->d_size;
834 VLOG_IF(1, (hole_size < 0)) << "shrink section size = " << data->d_size;
835
836 // Resize the data and the section header.
837 data->d_size += hole_size;
838 section_header->sh_size += hole_size;
839
840 // Add the hole size to all offsets in the ELF file that are after the
841 // start of the hole. If the hole size is positive we are expanding the
842 // section to create a new hole; if negative, we are closing up a hole.
843
844 // Start with the main ELF header.
845 ELF::Ehdr* elf_header = ELF::getehdr(elf);
846 AdjustElfHeaderForHole(elf_header, hole_start, hole_size);
847
848 // Adjust all section headers.
849 AdjustSectionHeadersForHole(elf, hole_start, hole_size);
850
851 // If resizing the dynamic relocations, rewrite the program headers to
852 // either split or coalesce segments, and adjust dynamic entries to match.
853 if (is_relocations_resize) {
854 RewriteProgramHeadersForHole(elf, hole_start, hole_size);
855
856 Elf_Scn* dynamic_section = GetDynamicSection(elf);
857 AdjustDynamicSectionForHole<Rel>(dynamic_section, hole_start, hole_size);
858 }
859}
860
861// Find the first slot in a dynamics array with the given tag. The array
862// always ends with a free (unused) element, and which we exclude from the
863// search. Returns dynamics->size() if not found.
864size_t FindDynamicEntry(ELF::Sword tag,
865 std::vector<ELF::Dyn>* dynamics) {
866 // Loop until the penultimate entry. We exclude the end sentinel.
867 for (size_t i = 0; i < dynamics->size() - 1; ++i) {
868 if (dynamics->at(i).d_tag == tag)
869 return i;
870 }
871
872 // The tag was not found.
873 return dynamics->size();
874}
875
876// Replace the first free (unused) slot in a dynamics vector with the given
877// value. The vector always ends with a free (unused) element, so the slot
878// found cannot be the last one in the vector.
879void AddDynamicEntry(const ELF::Dyn& dyn,
880 std::vector<ELF::Dyn>* dynamics) {
881 const size_t slot = FindDynamicEntry(DT_NULL, dynamics);
882 if (slot == dynamics->size()) {
883 LOG(FATAL) << "No spare dynamic array slots found "
884 << "(to fix, increase gold's --spare-dynamic-tags value)";
885 }
886
887 // Replace this entry with the one supplied.
888 dynamics->at(slot) = dyn;
889 VLOG(1) << "dynamic[" << slot << "] overwritten with " << dyn.d_tag;
890}
891
892// Remove the element in the dynamics vector that matches the given tag with
893// unused slot data. Shuffle the following elements up, and ensure that the
894// last is the null sentinel.
895void RemoveDynamicEntry(ELF::Sword tag,
896 std::vector<ELF::Dyn>* dynamics) {
897 const size_t slot = FindDynamicEntry(tag, dynamics);
898 CHECK(slot != dynamics->size());
899
900 // Remove this entry by shuffling up everything that follows.
901 for (size_t i = slot; i < dynamics->size() - 1; ++i) {
902 dynamics->at(i) = dynamics->at(i + 1);
903 VLOG(1) << "dynamic[" << i
904 << "] overwritten with dynamic[" << i + 1 << "]";
905 }
906
907 // Ensure that the end sentinel is still present.
908 CHECK(dynamics->at(dynamics->size() - 1).d_tag == DT_NULL);
909}
910
911// Construct a null relocation without addend.
912void NullRelocation(ELF::Rel* relocation) {
913 relocation->r_offset = 0;
914 relocation->r_info = ELF_R_INFO(0, ELF::kNoRelocationCode);
915}
916
917// Construct a null relocation with addend.
918void NullRelocation(ELF::Rela* relocation) {
919 relocation->r_offset = 0;
920 relocation->r_info = ELF_R_INFO(0, ELF::kNoRelocationCode);
921 relocation->r_addend = 0;
922}
923
924// Pad relocations with the given number of null entries. Generates its
925// null entry with the appropriate NullRelocation() invocation.
926template <typename Rel>
927void PadRelocations(size_t count, std::vector<Rel>* relocations) {
928 Rel null_relocation;
929 NullRelocation(&null_relocation);
930 std::vector<Rel> padding(count, null_relocation);
931 relocations->insert(relocations->end(), padding.begin(), padding.end());
932}
933
934} // namespace
935
936// Remove relative entries from dynamic relocations and write as packed
937// data into android packed relocations.
938bool ElfFile::PackRelocations() {
939 // Load the ELF file into libelf.
940 if (!Load()) {
941 LOG(ERROR) << "Failed to load as ELF";
942 return false;
943 }
944
945 // Retrieve the current dynamic relocations section data.
946 Elf_Data* data = GetSectionData(relocations_section_);
947
948 if (relocations_type_ == REL) {
949 // Convert data to a vector of relocations.
950 const ELF::Rel* relocations_base = reinterpret_cast<ELF::Rel*>(data->d_buf);
951 std::vector<ELF::Rel> relocations(
952 relocations_base,
953 relocations_base + data->d_size / sizeof(relocations[0]));
954
955 LOG(INFO) << "Relocations : REL";
956 return PackTypedRelocations<ELF::Rel>(relocations);
957 }
958
959 if (relocations_type_ == RELA) {
960 // Convert data to a vector of relocations with addends.
961 const ELF::Rela* relocations_base =
962 reinterpret_cast<ELF::Rela*>(data->d_buf);
963 std::vector<ELF::Rela> relocations(
964 relocations_base,
965 relocations_base + data->d_size / sizeof(relocations[0]));
966
967 LOG(INFO) << "Relocations : RELA";
968 return PackTypedRelocations<ELF::Rela>(relocations);
969 }
970
971 NOTREACHED();
972 return false;
973}
974
975// Helper for PackRelocations(). Rel type is one of ELF::Rel or ELF::Rela.
976template <typename Rel>
977bool ElfFile::PackTypedRelocations(const std::vector<Rel>& relocations) {
978 // Filter relocations into those that are relative and others.
979 std::vector<Rel> relative_relocations;
980 std::vector<Rel> other_relocations;
981
982 for (size_t i = 0; i < relocations.size(); ++i) {
983 const Rel& relocation = relocations[i];
984 if (ELF_R_TYPE(relocation.r_info) == ELF::kRelativeRelocationCode) {
985 CHECK(ELF_R_SYM(relocation.r_info) == 0);
986 relative_relocations.push_back(relocation);
987 } else {
988 other_relocations.push_back(relocation);
989 }
990 }
991 LOG(INFO) << "Relative : " << relative_relocations.size() << " entries";
992 LOG(INFO) << "Other : " << other_relocations.size() << " entries";
993 LOG(INFO) << "Total : " << relocations.size() << " entries";
994
995 // If no relative relocations then we have nothing packable. Perhaps
996 // the shared object has already been packed?
997 if (relative_relocations.empty()) {
998 LOG(ERROR) << "No relative relocations found (already packed?)";
999 return false;
1000 }
1001
1002 // If not padding fully, apply only enough padding to preserve alignment.
1003 // Otherwise, pad so that we do not shrink the relocations section at all.
1004 if (!is_padding_relocations_) {
1005 // Calculate the size of the hole we will close up when we rewrite
1006 // dynamic relocations.
1007 ssize_t hole_size =
1008 relative_relocations.size() * sizeof(relative_relocations[0]);
1009 const ssize_t unaligned_hole_size = hole_size;
1010
1011 // Adjust the actual hole size to preserve alignment. We always adjust
1012 // by a whole number of NONE-type relocations.
1013 while (hole_size % kPreserveAlignment)
1014 hole_size -= sizeof(relative_relocations[0]);
1015 LOG(INFO) << "Compaction : " << hole_size << " bytes";
1016
1017 // Adjusting for alignment may have removed any packing benefit.
1018 if (hole_size == 0) {
1019 LOG(INFO) << "Too few relative relocations to pack after alignment";
1020 return false;
1021 }
1022
1023 // Find the padding needed in other_relocations to preserve alignment.
1024 // Ensure that we never completely empty the real relocations section.
1025 size_t padding_bytes = unaligned_hole_size - hole_size;
1026 if (padding_bytes == 0 && other_relocations.size() == 0) {
1027 do {
1028 padding_bytes += sizeof(relative_relocations[0]);
1029 } while (padding_bytes % kPreserveAlignment);
1030 }
1031 CHECK(padding_bytes % sizeof(other_relocations[0]) == 0);
1032 const size_t padding = padding_bytes / sizeof(other_relocations[0]);
1033
1034 // Padding may have removed any packing benefit.
1035 if (padding >= relative_relocations.size()) {
1036 LOG(INFO) << "Too few relative relocations to pack after padding";
1037 return false;
1038 }
1039
1040 // Add null relocations to other_relocations to preserve alignment.
1041 PadRelocations<Rel>(padding, &other_relocations);
1042 LOG(INFO) << "Alignment pad : " << padding << " relocations";
1043 } else {
1044 // If padding, add NONE-type relocations to other_relocations to make it
1045 // the same size as the the original relocations we read in. This makes
1046 // the ResizeSection() below a no-op.
1047 const size_t padding = relocations.size() - other_relocations.size();
1048 PadRelocations<Rel>(padding, &other_relocations);
1049 }
1050
1051 // Pack relative relocations.
1052 const size_t initial_bytes =
1053 relative_relocations.size() * sizeof(relative_relocations[0]);
1054 LOG(INFO) << "Unpacked relative: " << initial_bytes << " bytes";
1055 std::vector<uint8_t> packed;
1056 RelocationPacker packer;
1057 packer.PackRelativeRelocations(relative_relocations, &packed);
1058 const void* packed_data = &packed[0];
1059 const size_t packed_bytes = packed.size() * sizeof(packed[0]);
1060 LOG(INFO) << "Packed relative: " << packed_bytes << " bytes";
1061
1062 // If we have insufficient relative relocations to form a run then
1063 // packing fails.
1064 if (packed.empty()) {
1065 LOG(INFO) << "Too few relative relocations to pack";
1066 return false;
1067 }
1068
1069 // Run a loopback self-test as a check that packing is lossless.
1070 std::vector<Rel> unpacked;
1071 packer.UnpackRelativeRelocations(packed, &unpacked);
1072 CHECK(unpacked.size() == relative_relocations.size());
1073 CHECK(!memcmp(&unpacked[0],
1074 &relative_relocations[0],
1075 unpacked.size() * sizeof(unpacked[0])));
1076
1077 // Make sure packing saved some space.
1078 if (packed_bytes >= initial_bytes) {
1079 LOG(INFO) << "Packing relative relocations saves no space";
1080 return false;
1081 }
1082
1083 // Rewrite the current dynamic relocations section to be only the ARM
1084 // non-relative relocations, then shrink it to size.
1085 const void* section_data = &other_relocations[0];
1086 const size_t bytes = other_relocations.size() * sizeof(other_relocations[0]);
1087 ResizeSection<Rel>(elf_, relocations_section_, bytes);
1088 RewriteSectionData(relocations_section_, section_data, bytes);
1089
1090 // Rewrite the current packed android relocations section to hold the packed
1091 // relative relocations.
1092 ResizeSection<Rel>(elf_, android_relocations_section_, packed_bytes);
1093 RewriteSectionData(android_relocations_section_, packed_data, packed_bytes);
1094
1095 // Rewrite .dynamic to include two new tags describing the packed android
1096 // relocations.
1097 Elf_Data* data = GetSectionData(dynamic_section_);
1098 const ELF::Dyn* dynamic_base = reinterpret_cast<ELF::Dyn*>(data->d_buf);
1099 std::vector<ELF::Dyn> dynamics(
1100 dynamic_base,
1101 dynamic_base + data->d_size / sizeof(dynamics[0]));
1102 // Use two of the spare slots to describe the packed section.
1103 ELF::Shdr* section_header = ELF::getshdr(android_relocations_section_);
1104 {
1105 ELF::Dyn dyn;
1106 dyn.d_tag = DT_ANDROID_REL_OFFSET;
1107 dyn.d_un.d_ptr = section_header->sh_offset;
1108 AddDynamicEntry(dyn, &dynamics);
1109 }
1110 {
1111 ELF::Dyn dyn;
1112 dyn.d_tag = DT_ANDROID_REL_SIZE;
1113 dyn.d_un.d_val = section_header->sh_size;
1114 AddDynamicEntry(dyn, &dynamics);
1115 }
1116 const void* dynamics_data = &dynamics[0];
1117 const size_t dynamics_bytes = dynamics.size() * sizeof(dynamics[0]);
1118 RewriteSectionData(dynamic_section_, dynamics_data, dynamics_bytes);
1119
1120 Flush();
1121 return true;
1122}
1123
1124// Find packed relative relocations in the packed android relocations
1125// section, unpack them, and rewrite the dynamic relocations section to
1126// contain unpacked data.
1127bool ElfFile::UnpackRelocations() {
1128 // Load the ELF file into libelf.
1129 if (!Load()) {
1130 LOG(ERROR) << "Failed to load as ELF";
1131 return false;
1132 }
1133
1134 // Retrieve the current packed android relocations section data.
1135 Elf_Data* data = GetSectionData(android_relocations_section_);
1136
1137 // Convert data to a vector of bytes.
1138 const uint8_t* packed_base = reinterpret_cast<uint8_t*>(data->d_buf);
1139 std::vector<uint8_t> packed(
1140 packed_base,
1141 packed_base + data->d_size / sizeof(packed[0]));
1142
1143 if (packed.size() > 3 &&
1144 packed[0] == 'A' &&
1145 packed[1] == 'P' &&
1146 packed[2] == 'R' &&
1147 packed[3] == '1') {
1148 // Signature is APR1, unpack relocations.
1149 CHECK(relocations_type_ == REL);
1150 LOG(INFO) << "Relocations : REL";
1151 return UnpackTypedRelocations<ELF::Rel>(packed);
1152 }
1153
1154 if (packed.size() > 3 &&
1155 packed[0] == 'A' &&
1156 packed[1] == 'P' &&
1157 packed[2] == 'A' &&
1158 packed[3] == '1') {
1159 // Signature is APA1, unpack relocations with addends.
1160 CHECK(relocations_type_ == RELA);
1161 LOG(INFO) << "Relocations : RELA";
1162 return UnpackTypedRelocations<ELF::Rela>(packed);
1163 }
1164
1165 LOG(ERROR) << "Packed relative relocations not found (not packed?)";
1166 return false;
1167}
1168
1169// Helper for UnpackRelocations(). Rel type is one of ELF::Rel or ELF::Rela.
1170template <typename Rel>
1171bool ElfFile::UnpackTypedRelocations(const std::vector<uint8_t>& packed) {
1172 // Unpack the data to re-materialize the relative relocations.
1173 const size_t packed_bytes = packed.size() * sizeof(packed[0]);
1174 LOG(INFO) << "Packed relative: " << packed_bytes << " bytes";
1175 std::vector<Rel> relative_relocations;
1176 RelocationPacker packer;
1177 packer.UnpackRelativeRelocations(packed, &relative_relocations);
1178 const size_t unpacked_bytes =
1179 relative_relocations.size() * sizeof(relative_relocations[0]);
1180 LOG(INFO) << "Unpacked relative: " << unpacked_bytes << " bytes";
1181
1182 // Retrieve the current dynamic relocations section data.
1183 Elf_Data* data = GetSectionData(relocations_section_);
1184
1185 // Interpret data as relocations.
1186 const Rel* relocations_base = reinterpret_cast<Rel*>(data->d_buf);
1187 std::vector<Rel> relocations(
1188 relocations_base,
1189 relocations_base + data->d_size / sizeof(relocations[0]));
1190
1191 std::vector<Rel> other_relocations;
1192 size_t padding = 0;
1193
1194 // Filter relocations to locate any that are NONE-type. These will occur
1195 // if padding was turned on for packing.
1196 for (size_t i = 0; i < relocations.size(); ++i) {
1197 const Rel& relocation = relocations[i];
1198 if (ELF_R_TYPE(relocation.r_info) != ELF::kNoRelocationCode) {
1199 other_relocations.push_back(relocation);
1200 } else {
1201 ++padding;
1202 }
1203 }
1204 LOG(INFO) << "Relative : " << relative_relocations.size() << " entries";
1205 LOG(INFO) << "Other : " << other_relocations.size() << " entries";
1206
1207 // If we found the same number of null relocation entries in the dynamic
1208 // relocations section as we hold as unpacked relative relocations, then
1209 // this is a padded file.
1210 const bool is_padded = padding == relative_relocations.size();
1211
1212 // Unless padded, report by how much we expand the file.
1213 if (!is_padded) {
1214 // Calculate the size of the hole we will open up when we rewrite
1215 // dynamic relocations.
1216 ssize_t hole_size =
1217 relative_relocations.size() * sizeof(relative_relocations[0]);
1218
1219 // Adjust the hole size for the padding added to preserve alignment.
1220 hole_size -= padding * sizeof(other_relocations[0]);
1221 LOG(INFO) << "Expansion : " << hole_size << " bytes";
1222 }
1223
1224 // Rewrite the current dynamic relocations section to be the relative
1225 // relocations followed by other relocations. This is the usual order in
1226 // which we find them after linking, so this action will normally put the
1227 // entire dynamic relocations section back to its pre-split-and-packed state.
1228 relocations.assign(relative_relocations.begin(), relative_relocations.end());
1229 relocations.insert(relocations.end(),
1230 other_relocations.begin(), other_relocations.end());
1231 const void* section_data = &relocations[0];
1232 const size_t bytes = relocations.size() * sizeof(relocations[0]);
1233 LOG(INFO) << "Total : " << relocations.size() << " entries";
1234 ResizeSection<Rel>(elf_, relocations_section_, bytes);
1235 RewriteSectionData(relocations_section_, section_data, bytes);
1236
1237 // Nearly empty the current packed android relocations section. Leaves a
1238 // four-byte stub so that some data remains allocated to the section.
1239 // This is a convenience which allows us to re-pack this file again without
1240 // having to remove the section and then add a new small one with objcopy.
1241 // The way we resize sections relies on there being some data in a section.
1242 ResizeSection<Rel>(
1243 elf_, android_relocations_section_, sizeof(kStubIdentifier));
1244 RewriteSectionData(
1245 android_relocations_section_, &kStubIdentifier, sizeof(kStubIdentifier));
1246
1247 // Rewrite .dynamic to remove two tags describing packed android relocations.
1248 data = GetSectionData(dynamic_section_);
1249 const ELF::Dyn* dynamic_base = reinterpret_cast<ELF::Dyn*>(data->d_buf);
1250 std::vector<ELF::Dyn> dynamics(
1251 dynamic_base,
1252 dynamic_base + data->d_size / sizeof(dynamics[0]));
1253 RemoveDynamicEntry(DT_ANDROID_REL_OFFSET, &dynamics);
1254 RemoveDynamicEntry(DT_ANDROID_REL_SIZE, &dynamics);
1255 const void* dynamics_data = &dynamics[0];
1256 const size_t dynamics_bytes = dynamics.size() * sizeof(dynamics[0]);
1257 RewriteSectionData(dynamic_section_, dynamics_data, dynamics_bytes);
1258
1259 Flush();
1260 return true;
1261}
1262
1263// Flush rewritten shared object file data.
1264void ElfFile::Flush() {
1265 // Flag all ELF data held in memory as needing to be written back to the
1266 // file, and tell libelf that we have controlled the file layout.
1267 elf_flagelf(elf_, ELF_C_SET, ELF_F_DIRTY);
1268 elf_flagelf(elf_, ELF_C_SET, ELF_F_LAYOUT);
1269
1270 // Write ELF data back to disk.
1271 const off_t file_bytes = elf_update(elf_, ELF_C_WRITE);
1272 CHECK(file_bytes > 0);
1273 VLOG(1) << "elf_update returned: " << file_bytes;
1274
1275 // Clean up libelf, and truncate the output file to the number of bytes
1276 // written by elf_update().
1277 elf_end(elf_);
1278 elf_ = NULL;
1279 const int truncate = ftruncate(fd_, file_bytes);
1280 CHECK(truncate == 0);
1281}
1282
1283} // namespace relocation_packer