1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "crazy_linker_elf_loader.h"
7 #include <limits.h> // For PAGE_SIZE and PAGE_MASK
9 #include "crazy_linker_debug.h"
10 #include "linker_phdr.h"
12 #define PAGE_START(x) ((x) & PAGE_MASK)
13 #define PAGE_OFFSET(x) ((x) & ~PAGE_MASK)
14 #define PAGE_END(x) PAGE_START((x) + (PAGE_SIZE - 1))
18 #define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
19 #define PFLAGS_TO_PROT(x) \
20 (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
21 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
22 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
24 ElfLoader::ElfLoader(bool no_map_exec_support_fallback_enabled)
32 wanted_load_address_(0),
37 no_map_exec_support_fallback_enabled_(
38 no_map_exec_support_fallback_enabled) {}
40 ElfLoader::~ElfLoader() {
42 // Deallocate the temporary program header copy.
43 munmap(phdr_mmap_, phdr_size_);
47 bool ElfLoader::LoadAt(const char* lib_path,
49 uintptr_t wanted_address,
52 LOG("%s: lib_path='%s', file_offset=%p, load_address=%p\n",
58 // Check that the load address is properly page-aligned.
59 if (wanted_address != PAGE_START(wanted_address)) {
60 error->Format("Load address is not page aligned (%08x)", wanted_address);
63 wanted_load_address_ = reinterpret_cast<void*>(wanted_address);
65 // Check that the file offset is also properly page-aligned.
66 // PAGE_START() can't be used here due to the compiler complaining about
67 // comparing signed (off_t) and unsigned (size_t) values.
68 if ((file_offset & static_cast<off_t>(PAGE_SIZE - 1)) != 0) {
69 error->Format("File offset is not page aligned (%08x)", file_offset);
72 file_offset_ = file_offset;
75 if (!fd_.OpenReadOnly(lib_path)) {
76 error->Format("Can't open file: %s", strerror(errno));
80 if (file_offset && fd_.SeekTo(file_offset) < 0) {
82 "Can't seek to file offset %08x: %s", file_offset, strerror(errno));
88 if (!ReadElfHeader(error) || !ReadProgramHeader(error) ||
89 !ReserveAddressSpace(error)) {
93 if (!LoadSegments(error) || !FindPhdr(error)) {
94 // An error occured, cleanup the address space by un-mapping the
95 // range that was reserved by ReserveAddressSpace().
96 if (load_start_ && load_size_)
97 munmap(load_start_, load_size_);
105 bool ElfLoader::ReadElfHeader(Error* error) {
106 int ret = fd_.Read(&header_, sizeof(header_));
108 error->Format("Can't read file: %s", strerror(errno));
111 if (ret != static_cast<int>(sizeof(header_))) {
112 error->Set("File too small to be ELF");
116 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
117 error->Set("Bad ELF magic");
121 if (header_.e_ident[EI_CLASS] != ELF::kElfClass) {
122 error->Format("Not a %d-bit class: %d",
124 header_.e_ident[EI_CLASS]);
128 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
129 error->Format("Not little-endian class: %d", header_.e_ident[EI_DATA]);
133 if (header_.e_type != ET_DYN) {
134 error->Format("Not a shared library type: %d", header_.e_type);
138 if (header_.e_version != EV_CURRENT) {
139 error->Format("Unexpected ELF version: %d", header_.e_version);
143 if (header_.e_machine != ELF_MACHINE) {
144 error->Format("Unexpected ELF machine type: %d", header_.e_machine);
151 // Loads the program header table from an ELF file into a read-only private
152 // anonymous mmap-ed block.
153 bool ElfLoader::ReadProgramHeader(Error* error) {
154 phdr_num_ = header_.e_phnum;
156 // Like the kernel, only accept program header tables smaller than 64 KB.
157 if (phdr_num_ < 1 || phdr_num_ > 65536 / sizeof(ELF::Phdr)) {
158 error->Format("Invalid program header count: %d", phdr_num_);
162 ELF::Addr page_min = PAGE_START(header_.e_phoff);
164 PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(ELF::Phdr)));
165 ELF::Addr page_offset = PAGE_OFFSET(header_.e_phoff);
167 phdr_size_ = page_max - page_min;
169 void* mmap_result = fd_.Map(
170 NULL, phdr_size_, PROT_READ, MAP_PRIVATE, page_min + file_offset_);
171 if (mmap_result == MAP_FAILED) {
172 error->Format("Phdr mmap failed: %s", strerror(errno));
176 phdr_mmap_ = mmap_result;
177 phdr_table_ = reinterpret_cast<ELF::Phdr*>(
178 reinterpret_cast<char*>(mmap_result) + page_offset);
182 // Reserve a virtual address range big enough to hold all loadable
183 // segments of a program header table. This is done by creating a
184 // private anonymous mmap() with PROT_NONE.
186 // This will use the wanted_load_address_ value. Fails if the requested
187 // address range cannot be reserved. Typically this would be because
188 // it overlaps an existing, possibly system, mapping.
189 bool ElfLoader::ReserveAddressSpace(Error* error) {
192 phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr, NULL);
193 if (load_size_ == 0) {
194 error->Set("No loadable segments");
198 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
199 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
201 // Support loading at a fixed address.
202 if (wanted_load_address_) {
203 addr = static_cast<uint8_t*>(wanted_load_address_);
206 LOG("%s: address=%p size=%p\n", __FUNCTION__, addr, load_size_);
207 void* start = mmap(addr, load_size_, PROT_NONE, mmap_flags, -1, 0);
208 if (start == MAP_FAILED) {
209 error->Format("Could not reserve %d bytes of address space", load_size_);
212 if (wanted_load_address_ && start != addr) {
213 error->Format("Could not map at %p requested, backing out", addr);
214 munmap(start, load_size_);
219 load_bias_ = reinterpret_cast<ELF::Addr>(start) - min_vaddr;
223 // Returns the address of the program header table as it appears in the loaded
224 // segments in memory. This is in contrast with 'phdr_table_' which
225 // is temporary and will be released before the library is relocated.
226 bool ElfLoader::FindPhdr(Error* error) {
227 const ELF::Phdr* phdr_limit = phdr_table_ + phdr_num_;
229 // If there is a PT_PHDR, use it directly.
230 for (const ELF::Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
231 if (phdr->p_type == PT_PHDR) {
232 return CheckPhdr(load_bias_ + phdr->p_vaddr, error);
236 // Otherwise, check the first loadable segment. If its file offset
237 // is 0, it starts with the ELF header, and we can trivially find the
238 // loaded program header from it.
239 for (const ELF::Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
240 if (phdr->p_type == PT_LOAD) {
241 if (phdr->p_offset == 0) {
242 ELF::Addr elf_addr = load_bias_ + phdr->p_vaddr;
243 const ELF::Ehdr* ehdr = (const ELF::Ehdr*)(void*)elf_addr;
244 ELF::Addr offset = ehdr->e_phoff;
245 return CheckPhdr((ELF::Addr)ehdr + offset, error);
251 error->Set("Can't find loaded program header");
255 // Ensures that our program header is actually within a loadable
256 // segment. This should help catch badly-formed ELF files that
257 // would cause the linker to crash later when trying to access it.
258 bool ElfLoader::CheckPhdr(ELF::Addr loaded, Error* error) {
259 const ELF::Phdr* phdr_limit = phdr_table_ + phdr_num_;
260 ELF::Addr loaded_end = loaded + (phdr_num_ * sizeof(ELF::Phdr));
261 for (ELF::Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
262 if (phdr->p_type != PT_LOAD) {
265 ELF::Addr seg_start = phdr->p_vaddr + load_bias_;
266 ELF::Addr seg_end = phdr->p_filesz + seg_start;
267 if (seg_start <= loaded && loaded_end <= seg_end) {
268 loaded_phdr_ = reinterpret_cast<const ELF::Phdr*>(loaded);
272 error->Format("Loaded program header %x not in loadable segment", loaded);
276 // Map all loadable segments in process' address space.
277 // This assumes you already called phdr_table_reserve_memory to
278 // reserve the address space range for the library.
279 bool ElfLoader::LoadSegments(Error* error) {
280 for (size_t i = 0; i < phdr_num_; ++i) {
281 const ELF::Phdr* phdr = &phdr_table_[i];
283 if (phdr->p_type != PT_LOAD) {
287 // Segment addresses in memory.
288 ELF::Addr seg_start = phdr->p_vaddr + load_bias_;
289 ELF::Addr seg_end = seg_start + phdr->p_memsz;
291 ELF::Addr seg_page_start = PAGE_START(seg_start);
292 ELF::Addr seg_page_end = PAGE_END(seg_end);
294 ELF::Addr seg_file_end = seg_start + phdr->p_filesz;
297 ELF::Addr file_start = phdr->p_offset;
298 ELF::Addr file_end = file_start + phdr->p_filesz;
300 ELF::Addr file_page_start = PAGE_START(file_start);
301 ELF::Addr file_length = file_end - file_page_start;
303 LOG("%s: file_offset=%p file_length=%p start_address=%p end_address=%p\n",
305 file_offset_ + file_page_start,
308 seg_page_start + PAGE_END(file_length));
310 if (file_length != 0) {
311 const int prot_flags = PFLAGS_TO_PROT(phdr->p_flags);
312 void* seg_addr = fd_.Map((void*)seg_page_start,
315 MAP_FIXED | MAP_PRIVATE,
316 file_page_start + file_offset_);
317 if (seg_addr == MAP_FAILED) {
318 if (!no_map_exec_support_fallback_enabled_ || errno != EACCES ||
319 !(prot_flags & PROT_EXEC)) {
320 // We don't have a fallback in this case.
321 error->Format("Could not map segment %d: %s", i, strerror(errno));
325 // We were unable to map executable code from the file directly.
326 // This can happen because of overly strict SELinux settings prevent
327 // mapping executable code directly. We fallback by copying the
328 // executable code into memory.
330 // Cast away the const (we are making the pages writable).
331 seg_addr = (void*)seg_page_start;
333 // Add PROT_WRITE to the pages. Note that even though the above Map()
334 // failed these pages have already been mapped MAP_ANONYMOUS by
335 // ReserveAddressSpace, so at this point we just use mprotect.
336 if (mprotect(seg_addr, file_length, prot_flags | PROT_WRITE) == -1) {
337 error->Format("mprotect failed to add PROT_WRITE %d: %s",
342 // Map the library for READ.
343 void* lib_addr = fd_.Map(NULL,
347 file_page_start + file_offset_);
348 if (lib_addr == MAP_FAILED) {
350 "Could not map segment (PROT_READ) %d: %s",
355 // Copy the library into the desired location in memory.
356 memcpy(seg_addr, lib_addr, file_length);
358 // Unmap the library.
359 if (munmap(lib_addr, file_length) == -1) {
360 error->Format("Failed to unmap the library segment %d: %s",
365 if (!(prot_flags & PROT_WRITE)) {
366 // Remove write permissions (PROT_WRITE).
367 if (mprotect(seg_addr, file_length, prot_flags) == -1) {
368 error->Format("mprotect failed to remove PROT_WRITE %d: %s",
376 // if the segment is writable, and does not end on a page boundary,
377 // zero-fill it until the page limit.
378 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
379 memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
382 seg_file_end = PAGE_END(seg_file_end);
384 // seg_file_end is now the first page address after the file
385 // content. If seg_end is larger, we need to zero anything
386 // between them. This is done by using a private anonymous
387 // map for all extra pages.
388 if (seg_page_end > seg_file_end) {
389 void* zeromap = mmap((void*)seg_file_end,
390 seg_page_end - seg_file_end,
391 PFLAGS_TO_PROT(phdr->p_flags),
392 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE,
395 if (zeromap == MAP_FAILED) {
396 error->Format("Could not zero-fill gap: %s", strerror(errno));