2 * Copyright (C) 2012 The Android Open Source Project
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include "linker_phdr.h"
34 #include <sys/types.h>
39 #include "linker_debug.h"
40 #include "linker_utils.h"
42 static int GetTargetElfMachine() {
45 #elif defined(__aarch64__)
47 #elif defined(__i386__)
49 #elif defined(__mips__)
51 #elif defined(__x86_64__)
57 TECHNICAL NOTE ON ELF LOADING.
59 An ELF file's program header table contains one or more PT_LOAD
60 segments, which corresponds to portions of the file that need to
61 be mapped into the process' address space.
63 Each loadable segment has the following important properties:
65 p_offset -> segment file offset
66 p_filesz -> segment file size
67 p_memsz -> segment memory size (always >= p_filesz)
68 p_vaddr -> segment's virtual address
69 p_flags -> segment flags (e.g. readable, writable, executable)
71 We will ignore the p_paddr and p_align fields of ElfW(Phdr) for now.
73 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
74 ranges of virtual addresses. A few rules apply:
76 - the virtual address ranges should not overlap.
78 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
79 between them should always be initialized to 0.
81 - ranges do not necessarily start or end at page boundaries. Two distinct
82 segments can have their start and end on the same page. In this case, the
83 page inherits the mapping flags of the latter segment.
85 Finally, the real load addrs of each segment is not p_vaddr. Instead the
86 loader decides where to load the first segment, then will load all others
87 relative to the first one to respect the initial range layout.
89 For example, consider the following list:
91 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
92 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
94 This corresponds to two segments that cover these virtual address ranges:
99 If the loader decides to load the first segment at address 0xa0000000
100 then the segments' load address ranges will be:
102 0xa0030000...0xa0034000
103 0xa0040000...0xa0048000
105 In other words, all segments must be loaded at an address that has the same
106 constant offset from their p_vaddr value. This offset is computed as the
107 difference between the first segment's load address, and its p_vaddr value.
109 However, in practice, segments do _not_ start at page boundaries. Since we
110 can only memory-map at page boundaries, this means that the bias is
113 load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
115 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
116 possible wrap around UINT32_MAX for possible large p_vaddr values).
118 And that the phdr0_load_address must start at a page boundary, with
119 the segment's real content starting at:
121 phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
123 Note that ELF requires the following condition to make the mmap()-ing work:
125 PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
127 The load_bias must be added to any p_vaddr value read from the ELF file to
128 determine the corresponding memory address.
132 #define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
133 #define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
134 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
135 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
137 ElfReader::ElfReader()
138 : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
139 phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
140 strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr),
141 mapped_by_caller_(false) {
144 bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
149 file_offset_ = file_offset;
150 file_size_ = file_size;
152 if (ReadElfHeader() &&
154 ReadProgramHeaders() &&
155 ReadSectionHeaders() &&
156 ReadDynamicSection()) {
163 bool ElfReader::Load(const android_dlextinfo* extinfo) {
166 if (ReserveAddressSpace(extinfo) &&
175 const char* ElfReader::get_string(ElfW(Word) index) const {
176 CHECK(strtab_ != nullptr);
177 CHECK(index < strtab_size_);
179 return strtab_ + index;
182 bool ElfReader::ReadElfHeader() {
183 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
185 DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
189 if (rc != sizeof(header_)) {
190 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
191 static_cast<size_t>(rc));
197 bool ElfReader::VerifyElfHeader() {
198 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
199 DL_ERR("\"%s\" has bad ELF magic", name_.c_str());
203 // Try to give a clear diagnostic for ELF class mismatches, since they're
204 // an easy mistake to make during the 32-bit/64-bit transition period.
205 int elf_class = header_.e_ident[EI_CLASS];
206 #if defined(__LP64__)
207 if (elf_class != ELFCLASS64) {
208 if (elf_class == ELFCLASS32) {
209 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
211 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
216 if (elf_class != ELFCLASS32) {
217 if (elf_class == ELFCLASS64) {
218 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
220 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
226 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
227 DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
231 if (header_.e_type != ET_DYN) {
232 DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
236 if (header_.e_version != EV_CURRENT) {
237 DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
241 if (header_.e_machine != GetTargetElfMachine()) {
242 DL_ERR("\"%s\" has unexpected e_machine: %d", name_.c_str(), header_.e_machine);
249 bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size) {
253 return safe_add(&range_start, file_offset_, offset) &&
254 safe_add(&range_end, range_start, size) &&
255 range_start < file_size_ &&
256 range_end <= file_size_;
259 // Loads the program header table from an ELF file into a read-only private
260 // anonymous mmap-ed block.
261 bool ElfReader::ReadProgramHeaders() {
262 phdr_num_ = header_.e_phnum;
264 // Like the kernel, we only accept program header tables that
265 // are smaller than 64KiB.
266 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
267 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
272 size_t size = phdr_num_ * sizeof(ElfW(Phdr));
273 if (!CheckFileRange(header_.e_phoff, size)) {
274 DL_ERR("\"%s\" has invalid phdr offset/size", name_.c_str());
278 if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
279 DL_ERR("\"%s\" phdr mmap failed: %s", name_.c_str(), strerror(errno));
283 phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
287 bool ElfReader::ReadSectionHeaders() {
288 shdr_num_ = header_.e_shnum;
290 if (shdr_num_ == 0) {
291 DL_ERR("\"%s\" has no section headers", name_.c_str());
295 size_t size = shdr_num_ * sizeof(ElfW(Shdr));
296 if (!CheckFileRange(header_.e_shoff, size)) {
297 DL_ERR("\"%s\" has invalid shdr offset/size", name_.c_str());
301 if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
302 DL_ERR("\"%s\" shdr mmap failed: %s", name_.c_str(), strerror(errno));
306 shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
310 bool ElfReader::ReadDynamicSection() {
311 // 1. Find .dynamic section (in section headers)
312 const ElfW(Shdr)* dynamic_shdr = nullptr;
313 for (size_t i = 0; i < shdr_num_; ++i) {
314 if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
315 dynamic_shdr = &shdr_table_ [i];
320 if (dynamic_shdr == nullptr) {
321 DL_ERR("\"%s\" .dynamic section header was not found", name_.c_str());
325 if (dynamic_shdr->sh_link >= shdr_num_) {
326 DL_ERR("\"%s\" .dynamic section has invalid sh_link: %d", name_.c_str(), dynamic_shdr->sh_link);
330 const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
332 if (strtab_shdr->sh_type != SHT_STRTAB) {
333 DL_ERR("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
334 name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
338 if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
339 DL_ERR("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
340 PRINT("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
344 if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
345 DL_ERR("\"%s\" dynamic section mmap failed: %s", name_.c_str(), strerror(errno));
349 dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
351 if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
352 DL_ERR("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
357 if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
358 DL_ERR("\"%s\" strtab section mmap failed: %s", name_.c_str(), strerror(errno));
362 strtab_ = static_cast<const char*>(strtab_fragment_.data());
363 strtab_size_ = strtab_fragment_.size();
367 /* Returns the size of the extent of all the possibly non-contiguous
368 * loadable segments in an ELF program header table. This corresponds
369 * to the page-aligned size in bytes that needs to be reserved in the
370 * process' address space. If there are no loadable segments, 0 is
373 * If out_min_vaddr or out_max_vaddr are not null, they will be
374 * set to the minimum and maximum addresses of pages to be reserved,
375 * or 0 if there is nothing to load.
377 size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
378 ElfW(Addr)* out_min_vaddr,
379 ElfW(Addr)* out_max_vaddr) {
380 ElfW(Addr) min_vaddr = UINTPTR_MAX;
381 ElfW(Addr) max_vaddr = 0;
383 bool found_pt_load = false;
384 for (size_t i = 0; i < phdr_count; ++i) {
385 const ElfW(Phdr)* phdr = &phdr_table[i];
387 if (phdr->p_type != PT_LOAD) {
390 found_pt_load = true;
392 if (phdr->p_vaddr < min_vaddr) {
393 min_vaddr = phdr->p_vaddr;
396 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
397 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
400 if (!found_pt_load) {
404 min_vaddr = PAGE_START(min_vaddr);
405 max_vaddr = PAGE_END(max_vaddr);
407 if (out_min_vaddr != nullptr) {
408 *out_min_vaddr = min_vaddr;
410 if (out_max_vaddr != nullptr) {
411 *out_max_vaddr = max_vaddr;
413 return max_vaddr - min_vaddr;
416 // Reserve a virtual address range big enough to hold all loadable
417 // segments of a program header table. This is done by creating a
418 // private anonymous mmap() with PROT_NONE.
419 bool ElfReader::ReserveAddressSpace(const android_dlextinfo* extinfo) {
420 ElfW(Addr) min_vaddr;
421 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
422 if (load_size_ == 0) {
423 DL_ERR("\"%s\" has no loadable segments", name_.c_str());
427 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
429 size_t reserved_size = 0;
430 bool reserved_hint = true;
431 bool strict_hint = false;
432 // Assume position independent executable by default.
433 void* mmap_hint = nullptr;
435 if (extinfo != nullptr) {
436 if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS) {
437 reserved_size = extinfo->reserved_size;
438 reserved_hint = false;
439 } else if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS_HINT) {
440 reserved_size = extinfo->reserved_size;
443 if (addr != nullptr && (extinfo->flags & ANDROID_DLEXT_FORCE_FIXED_VADDR) != 0) {
445 } else if ((extinfo->flags & ANDROID_DLEXT_LOAD_AT_FIXED_ADDRESS) != 0) {
446 mmap_hint = extinfo->reserved_addr;
451 if (load_size_ > reserved_size) {
452 if (!reserved_hint) {
453 DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
454 reserved_size - load_size_, load_size_, name_.c_str());
457 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
458 start = mmap(mmap_hint, load_size_, PROT_NONE, mmap_flags, -1, 0);
459 if (start == MAP_FAILED) {
460 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
463 if (strict_hint && (start != mmap_hint)) {
464 munmap(start, load_size_);
465 DL_ERR("couldn't reserve %zd bytes of address space at %p for \"%s\"",
466 load_size_, mmap_hint, name_.c_str());
470 start = extinfo->reserved_addr;
471 mapped_by_caller_ = true;
475 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
479 bool ElfReader::LoadSegments() {
480 for (size_t i = 0; i < phdr_num_; ++i) {
481 const ElfW(Phdr)* phdr = &phdr_table_[i];
483 if (phdr->p_type != PT_LOAD) {
487 // Segment addresses in memory.
488 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
489 ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
491 ElfW(Addr) seg_page_start = PAGE_START(seg_start);
492 ElfW(Addr) seg_page_end = PAGE_END(seg_end);
494 ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
497 ElfW(Addr) file_start = phdr->p_offset;
498 ElfW(Addr) file_end = file_start + phdr->p_filesz;
500 ElfW(Addr) file_page_start = PAGE_START(file_start);
501 ElfW(Addr) file_length = file_end - file_page_start;
503 if (file_size_ <= 0) {
504 DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
508 if (file_end > static_cast<size_t>(file_size_)) {
509 DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
510 " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
511 name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
512 reinterpret_cast<void*>(phdr->p_filesz),
513 reinterpret_cast<void*>(file_end), file_size_);
517 if (file_length != 0) {
518 void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
520 PFLAGS_TO_PROT(phdr->p_flags),
521 MAP_FIXED|MAP_PRIVATE,
523 file_offset_ + file_page_start);
524 if (seg_addr == MAP_FAILED) {
525 DL_ERR("couldn't map \"%s\" segment %zd: %s", name_.c_str(), i, strerror(errno));
530 // if the segment is writable, and does not end on a page boundary,
531 // zero-fill it until the page limit.
532 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
533 memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
536 seg_file_end = PAGE_END(seg_file_end);
538 // seg_file_end is now the first page address after the file
539 // content. If seg_end is larger, we need to zero anything
540 // between them. This is done by using a private anonymous
541 // map for all extra pages.
542 if (seg_page_end > seg_file_end) {
543 void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
544 seg_page_end - seg_file_end,
545 PFLAGS_TO_PROT(phdr->p_flags),
546 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
549 if (zeromap == MAP_FAILED) {
550 DL_ERR("couldn't zero fill \"%s\" gap: %s", name_.c_str(), strerror(errno));
558 /* Used internally. Used to set the protection bits of all loaded segments
559 * with optional extra flags (i.e. really PROT_WRITE). Used by
560 * phdr_table_protect_segments and phdr_table_unprotect_segments.
562 static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
563 ElfW(Addr) load_bias, int extra_prot_flags) {
564 const ElfW(Phdr)* phdr = phdr_table;
565 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
567 for (; phdr < phdr_limit; phdr++) {
568 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
572 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
573 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
575 int prot = PFLAGS_TO_PROT(phdr->p_flags);
576 if ((extra_prot_flags & PROT_WRITE) != 0) {
577 // make sure we're never simultaneously writable / executable
581 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
582 seg_page_end - seg_page_start,
583 prot | extra_prot_flags);
591 /* Restore the original protection modes for all loadable segments.
592 * You should only call this after phdr_table_unprotect_segments and
593 * applying all relocations.
596 * phdr_table -> program header table
597 * phdr_count -> number of entries in tables
598 * load_bias -> load bias
600 * 0 on error, -1 on failure (error code in errno).
602 int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table,
603 size_t phdr_count, ElfW(Addr) load_bias) {
604 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
607 /* Change the protection of all loaded segments in memory to writable.
608 * This is useful before performing relocations. Once completed, you
609 * will have to call phdr_table_protect_segments to restore the original
610 * protection flags on all segments.
612 * Note that some writable segments can also have their content turned
613 * to read-only by calling phdr_table_protect_gnu_relro. This is no
617 * phdr_table -> program header table
618 * phdr_count -> number of entries in tables
619 * load_bias -> load bias
621 * 0 on error, -1 on failure (error code in errno).
623 int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
624 size_t phdr_count, ElfW(Addr) load_bias) {
625 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
628 /* Used internally by phdr_table_protect_gnu_relro and
629 * phdr_table_unprotect_gnu_relro.
631 static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
632 ElfW(Addr) load_bias, int prot_flags) {
633 const ElfW(Phdr)* phdr = phdr_table;
634 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
636 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
637 if (phdr->p_type != PT_GNU_RELRO) {
641 // Tricky: what happens when the relro segment does not start
642 // or end at page boundaries? We're going to be over-protective
643 // here and put every page touched by the segment as read-only.
645 // This seems to match Ian Lance Taylor's description of the
646 // feature at http://www.airs.com/blog/archives/189.
649 // Note that the current dynamic linker code will only work
650 // correctly if the PT_GNU_RELRO segment starts on a page
651 // boundary. This is because the dynamic linker rounds the
652 // p_vaddr field down to the previous page boundary. If
653 // there is anything on the page which should not be read-only,
654 // the program is likely to fail at runtime. So in effect the
655 // linker must only emit a PT_GNU_RELRO segment if it ensures
656 // that it starts on a page boundary.
657 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
658 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
660 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
661 seg_page_end - seg_page_start,
670 /* Apply GNU relro protection if specified by the program header. This will
671 * turn some of the pages of a writable PT_LOAD segment to read-only, as
672 * specified by one or more PT_GNU_RELRO segments. This must be always
673 * performed after relocations.
675 * The areas typically covered are .got and .data.rel.ro, these are
676 * read-only from the program's POV, but contain absolute addresses
677 * that need to be relocated before use.
680 * phdr_table -> program header table
681 * phdr_count -> number of entries in tables
682 * load_bias -> load bias
684 * 0 on error, -1 on failure (error code in errno).
686 int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
687 size_t phdr_count, ElfW(Addr) load_bias) {
688 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
691 /* Serialize the GNU relro segments to the given file descriptor. This can be
692 * performed after relocations to allow another process to later share the
693 * relocated segment, if it was loaded at the same address.
696 * phdr_table -> program header table
697 * phdr_count -> number of entries in tables
698 * load_bias -> load bias
699 * fd -> writable file descriptor to use
701 * 0 on error, -1 on failure (error code in errno).
703 int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
705 ElfW(Addr) load_bias,
707 const ElfW(Phdr)* phdr = phdr_table;
708 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
709 ssize_t file_offset = 0;
711 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
712 if (phdr->p_type != PT_GNU_RELRO) {
716 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
717 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
718 ssize_t size = seg_page_end - seg_page_start;
720 ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
721 if (written != size) {
724 void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
725 MAP_PRIVATE|MAP_FIXED, fd, file_offset);
726 if (map == MAP_FAILED) {
734 /* Where possible, replace the GNU relro segments with mappings of the given
735 * file descriptor. This can be performed after relocations to allow a file
736 * previously created by phdr_table_serialize_gnu_relro in another process to
737 * replace the dirty relocated pages, saving memory, if it was loaded at the
738 * same address. We have to compare the data before we map over it, since some
739 * parts of the relro segment may not be identical due to other libraries in
740 * the process being loaded at different addresses.
743 * phdr_table -> program header table
744 * phdr_count -> number of entries in tables
745 * load_bias -> load bias
746 * fd -> readable file descriptor to use
748 * 0 on error, -1 on failure (error code in errno).
750 int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
752 ElfW(Addr) load_bias,
754 // Map the file at a temporary location so we can compare its contents.
755 struct stat file_stat;
756 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
759 off_t file_size = file_stat.st_size;
760 void* temp_mapping = nullptr;
762 temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
763 if (temp_mapping == MAP_FAILED) {
767 size_t file_offset = 0;
769 // Iterate over the relro segments and compare/remap the pages.
770 const ElfW(Phdr)* phdr = phdr_table;
771 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
773 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
774 if (phdr->p_type != PT_GNU_RELRO) {
778 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
779 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
781 char* file_base = static_cast<char*>(temp_mapping) + file_offset;
782 char* mem_base = reinterpret_cast<char*>(seg_page_start);
783 size_t match_offset = 0;
784 size_t size = seg_page_end - seg_page_start;
786 if (file_size - file_offset < size) {
787 // File is too short to compare to this segment. The contents are likely
788 // different as well (it's probably for a different library version) so
789 // just don't bother checking.
793 while (match_offset < size) {
794 // Skip over dissimilar pages.
795 while (match_offset < size &&
796 memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) {
797 match_offset += PAGE_SIZE;
800 // Count similar pages.
801 size_t mismatch_offset = match_offset;
802 while (mismatch_offset < size &&
803 memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) {
804 mismatch_offset += PAGE_SIZE;
807 // Map over similar pages.
808 if (mismatch_offset > match_offset) {
809 void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
810 PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, match_offset);
811 if (map == MAP_FAILED) {
812 munmap(temp_mapping, file_size);
817 match_offset = mismatch_offset;
820 // Add to the base file offset in case there are multiple relro segments.
823 munmap(temp_mapping, file_size);
830 # ifndef PT_ARM_EXIDX
831 # define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
834 /* Return the address and size of the .ARM.exidx section in memory,
838 * phdr_table -> program header table
839 * phdr_count -> number of entries in tables
840 * load_bias -> load bias
842 * arm_exidx -> address of table in memory (null on failure).
843 * arm_exidx_count -> number of items in table (0 on failure).
845 * 0 on error, -1 on failure (_no_ error code in errno)
847 int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
848 ElfW(Addr) load_bias,
849 ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
850 const ElfW(Phdr)* phdr = phdr_table;
851 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
853 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
854 if (phdr->p_type != PT_ARM_EXIDX) {
858 *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
859 *arm_exidx_count = phdr->p_memsz / 8;
862 *arm_exidx = nullptr;
863 *arm_exidx_count = 0;
868 /* Return the address and size of the ELF file's .dynamic section in memory,
869 * or null if missing.
872 * phdr_table -> program header table
873 * phdr_count -> number of entries in tables
874 * load_bias -> load bias
876 * dynamic -> address of table in memory (null on failure).
877 * dynamic_flags -> protection flags for section (unset on failure)
881 void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
882 ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
883 ElfW(Word)* dynamic_flags) {
885 for (size_t i = 0; i<phdr_count; ++i) {
886 const ElfW(Phdr)& phdr = phdr_table[i];
887 if (phdr.p_type == PT_DYNAMIC) {
888 *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
890 *dynamic_flags = phdr.p_flags;
897 /* Return the program interpreter string, or nullptr if missing.
900 * phdr_table -> program header table
901 * phdr_count -> number of entries in tables
902 * load_bias -> load bias
904 * pointer to the program interpreter string.
906 const char* phdr_table_get_interpreter_name(const ElfW(Phdr) * phdr_table, size_t phdr_count,
907 ElfW(Addr) load_bias) {
908 for (size_t i = 0; i<phdr_count; ++i) {
909 const ElfW(Phdr)& phdr = phdr_table[i];
910 if (phdr.p_type == PT_INTERP) {
911 return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
917 // Sets loaded_phdr_ to the address of the program header table as it appears
918 // in the loaded segments in memory. This is in contrast with phdr_table_,
919 // which is temporary and will be released before the library is relocated.
920 bool ElfReader::FindPhdr() {
921 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
923 // If there is a PT_PHDR, use it directly.
924 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
925 if (phdr->p_type == PT_PHDR) {
926 return CheckPhdr(load_bias_ + phdr->p_vaddr);
930 // Otherwise, check the first loadable segment. If its file offset
931 // is 0, it starts with the ELF header, and we can trivially find the
932 // loaded program header from it.
933 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
934 if (phdr->p_type == PT_LOAD) {
935 if (phdr->p_offset == 0) {
936 ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
937 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
938 ElfW(Addr) offset = ehdr->e_phoff;
939 return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
945 DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
949 // Ensures that our program header is actually within a loadable
950 // segment. This should help catch badly-formed ELF files that
951 // would cause the linker to crash later when trying to access it.
952 bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
953 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
954 ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
955 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
956 if (phdr->p_type != PT_LOAD) {
959 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
960 ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
961 if (seg_start <= loaded && loaded_end <= seg_end) {
962 loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
966 DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
967 name_.c_str(), reinterpret_cast<void*>(loaded));