2 Copyright (C) 2008-2010 Red Hat, Inc.
3 This file is part of elfutils.
5 This file is free software; you can redistribute it and/or modify
6 it under the terms of either
8 * the GNU Lesser General Public License as published by the Free
9 Software Foundation; either version 3 of the License, or (at
10 your option) any later version
14 * the GNU General Public License as published by the Free
15 Software Foundation; either version 2 of the License, or (at
16 your option) any later version
18 or both in parallel, as here.
20 elfutils is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
25 You should have received copies of the GNU General Public License and
26 the GNU Lesser General Public License along with this program. If
27 not, see <http://www.gnu.org/licenses/>. */
30 #include "../libelf/libelfP.h" /* For NOTE_ALIGN. */
35 #include <sys/param.h>
42 /* This is a prototype of what a new libelf interface might be.
43 This implementation is pessimal for non-mmap cases and should
44 be replaced by more diddling inside libelf internals. */
46 elf_begin_rand (Elf *parent, loff_t offset, loff_t size, loff_t *next)
51 /* On failure return, we update *NEXT to point back at OFFSET. */
52 inline Elf *fail (int error)
56 //__libelf_seterrno (error);
57 __libdwfl_seterrno (DWFL_E (LIBELF, error));
61 loff_t min = (parent->kind == ELF_K_ELF ?
62 (parent->class == ELFCLASS32
63 ? sizeof (Elf32_Ehdr) : sizeof (Elf64_Ehdr))
64 : parent->kind == ELF_K_AR ? SARMAG
67 if (unlikely (offset < min)
68 || unlikely (offset >= (loff_t) parent->maximum_size))
69 return fail (ELF_E_RANGE);
71 /* For an archive, fetch just the size field
72 from the archive header to override SIZE. */
73 if (parent->kind == ELF_K_AR)
75 struct ar_hdr h = { .ar_size = "" };
77 if (unlikely (parent->maximum_size - offset < sizeof h))
78 return fail (ELF_E_RANGE);
80 if (parent->map_address != NULL)
81 memcpy (h.ar_size, parent->map_address + parent->start_offset + offset,
83 else if (unlikely (pread_retry (parent->fildes,
84 h.ar_size, sizeof (h.ar_size),
85 parent->start_offset + offset
86 + offsetof (struct ar_hdr, ar_size))
87 != sizeof (h.ar_size)))
88 return fail (ELF_E_READ_ERROR);
93 size = strtoll (h.ar_size, &endp, 10);
94 if (unlikely (endp == h.ar_size)
95 || unlikely ((loff_t) parent->maximum_size - offset < size))
96 return fail (ELF_E_INVALID_ARCHIVE);
99 if (unlikely ((loff_t) parent->maximum_size - offset < size))
100 return fail (ELF_E_RANGE);
102 /* Even if we fail at this point, update *NEXT to point past the file. */
104 *next = offset + size;
106 if (unlikely (offset == 0)
107 && unlikely (size == (loff_t) parent->maximum_size))
108 return elf_clone (parent, parent->cmd);
110 /* Note the image is guaranteed live only as long as PARENT
111 lives. Using elf_memory is quite suboptimal if the whole
112 file is not mmap'd. We really should have something like
113 a generalization of the archive support. */
114 Elf_Data *data = elf_getdata_rawchunk (parent, offset, size, ELF_T_BYTE);
117 assert ((loff_t) data->d_size == size);
118 return elf_memory (data->d_buf, size);
123 dwfl_report_core_segments (Dwfl *dwfl, Elf *elf, size_t phnum, GElf_Phdr *notes)
125 if (unlikely (dwfl == NULL))
131 notes->p_type = PT_NULL;
133 for (size_t ndx = 0; result >= 0 && ndx < phnum; ++ndx)
136 GElf_Phdr *phdr = gelf_getphdr (elf, ndx, &phdr_mem);
137 if (unlikely (phdr == NULL))
139 __libdwfl_seterrno (DWFL_E_LIBELF);
142 switch (phdr->p_type)
145 result = dwfl_report_segment (dwfl, ndx, phdr, 0, NULL);
161 /* Never read more than this much without mmap. */
162 #define MAX_EAGER_COST 8192
165 core_file_read_eagerly (Dwfl_Module *mod,
166 void **userdata __attribute__ ((unused)),
167 const char *name __attribute__ ((unused)),
168 Dwarf_Addr start __attribute__ ((unused)),
169 void **buffer, size_t *buffer_available,
170 GElf_Off cost, GElf_Off worthwhile,
172 GElf_Off contiguous __attribute__ ((unused)),
173 void *arg, Elf **elfp)
177 if (whole <= *buffer_available)
179 /* All there ever was, we already have on hand. */
181 if (core->map_address == NULL)
183 /* We already malloc'd the buffer. */
184 *elfp = elf_memory (*buffer, whole);
185 if (unlikely (*elfp == NULL))
188 (*elfp)->flags |= ELF_F_MALLOCED;
190 *buffer_available = 0;
194 /* We can use the image inside the core file directly. */
195 *elfp = elf_begin_rand (core, *buffer - core->map_address, whole, NULL);
197 *buffer_available = 0;
198 return *elfp != NULL;
201 /* We don't have the whole file.
202 Figure out if this is better than nothing. */
205 /* Caller doesn't think so. */
209 XXX would like to fall back to partial file via memory
210 when build id find_elf fails
211 also, link_map name may give file name from disk better than partial here
212 requires find_elf hook re-doing the magic to fall back if no file found
215 if (mod->build_id_len > 0)
216 /* There is a build ID that could help us find the whole file,
217 which might be more useful than what we have.
218 We'll just rely on that. */
221 if (core->map_address != NULL)
222 /* It's cheap to get, so get it. */
225 /* Only use it if there isn't too much to be read. */
226 return cost <= MAX_EAGER_COST;
230 dwfl_elf_phdr_memory_callback (Dwfl *dwfl, int ndx,
231 void **buffer, size_t *buffer_available,
240 /* Called for cleanup. */
241 if (elf->map_address == NULL)
244 *buffer_available = 0;
248 const GElf_Off align = dwfl->segment_align ?: 1;
252 if (unlikely (gelf_getphdr (elf, ndx++, &phdr) == NULL))
254 while (phdr.p_type != PT_LOAD
255 || ((phdr.p_vaddr + phdr.p_memsz + align - 1) & -align) <= vaddr);
257 GElf_Off start = vaddr - phdr.p_vaddr + phdr.p_offset;
261 inline void update_end ()
263 end = (phdr.p_offset + phdr.p_filesz + align - 1) & -align;
264 end_vaddr = (phdr.p_vaddr + phdr.p_memsz + align - 1) & -align;
269 /* Use following contiguous segments to get towards SIZE. */
270 inline bool more (size_t size)
272 while (end <= start || end - start < size)
274 if (phdr.p_filesz < phdr.p_memsz)
275 /* This segment is truncated, so no following one helps us. */
278 if (unlikely (gelf_getphdr (elf, ndx++, &phdr) == NULL))
281 if (phdr.p_type == PT_LOAD)
283 if (phdr.p_offset > end
284 || phdr.p_vaddr > end_vaddr)
285 /* It's discontiguous! */
294 /* We need at least this much. */
295 if (! more (minread))
298 /* See how much more we can get of what the caller wants. */
299 (void) more (*buffer_available);
301 /* If it's already on hand anyway, use as much as there is. */
302 if (elf->map_address != NULL)
303 (void) more (elf->maximum_size - start);
305 /* Make sure we don't look past the end of the actual file,
306 even if the headers tell us to. */
307 if (unlikely (end > elf->maximum_size))
308 end = elf->maximum_size;
310 /* If the file is too small, there is nothing at all to get. */
311 if (unlikely (start >= end))
314 if (elf->map_address != NULL)
316 void *contents = elf->map_address + elf->start_offset + start;
317 size_t size = end - start;
319 if (minread == 0) /* String mode. */
321 const void *eos = memchr (contents, '\0', size);
322 if (unlikely (eos == NULL) || unlikely (eos == contents))
324 size = eos + 1 - contents;
330 *buffer_available = size;
334 *buffer_available = MIN (size, *buffer_available);
335 memcpy (*buffer, contents, *buffer_available);
340 void *into = *buffer;
343 *buffer_available = MIN (minread ?: 512,
344 MAX (4096, MIN (end - start,
345 *buffer_available)));
346 into = malloc (*buffer_available);
347 if (unlikely (into == NULL))
349 __libdwfl_seterrno (DWFL_E_NOMEM);
354 ssize_t nread = pread_retry (elf->fildes, into, *buffer_available, start);
355 if (nread < (ssize_t) minread)
360 __libdwfl_seterrno (DWFL_E_ERRNO);
364 if (minread == 0) /* String mode. */
366 const void *eos = memchr (into, '\0', nread);
367 if (unlikely (eos == NULL) || unlikely (eos == into))
373 nread = eos + 1 - into;
378 *buffer_available = nread;
385 dwfl_core_file_report (Dwfl *dwfl, Elf *elf)
388 if (unlikely (elf_getphdrnum (elf, &phnum) != 0))
390 __libdwfl_seterrno (DWFL_E_LIBELF);
394 /* First report each PT_LOAD segment. */
395 GElf_Phdr notes_phdr;
396 int ndx = dwfl_report_core_segments (dwfl, elf, phnum, ¬es_phdr);
397 if (unlikely (ndx <= 0))
400 /* Now sniff segment contents for modules. */
405 int seg = dwfl_segment_report_module (dwfl, ndx, NULL,
406 &dwfl_elf_phdr_memory_callback, elf,
407 core_file_read_eagerly, elf);
408 if (unlikely (seg < 0))
418 while (ndx < (int) phnum);
420 /* Next, we should follow the chain from DT_DEBUG. */
422 const void *auxv = NULL;
423 size_t auxv_size = 0;
424 if (likely (notes_phdr.p_type == PT_NOTE))
426 /* PT_NOTE -> NT_AUXV -> AT_PHDR -> PT_DYNAMIC -> DT_DEBUG */
428 Elf_Data *notes = elf_getdata_rawchunk (elf,
432 if (likely (notes != NULL))
438 while ((pos = gelf_getnote (notes, pos, &nhdr,
439 &name_pos, &desc_pos)) > 0)
440 if (nhdr.n_type == NT_AUXV
441 && nhdr.n_namesz == sizeof "CORE"
442 && !memcmp (notes->d_buf + name_pos, "CORE", sizeof "CORE"))
444 auxv = notes->d_buf + desc_pos;
445 auxv_size = nhdr.n_descsz;
451 /* Now we have NT_AUXV contents. From here on this processing could be
452 used for a live process with auxv read from /proc. */
454 int listed = dwfl_link_map_report (dwfl, auxv, auxv_size,
455 dwfl_elf_phdr_memory_callback, elf);
457 /* We return the number of modules we found if we found any.
458 If we found none, we return -1 instead of 0 if there was an
459 error rather than just nothing found. If link_map handling
460 failed, we still have the sniffed modules. */
461 return sniffed == 0 || listed > sniffed ? listed : sniffed;
463 INTDEF (dwfl_core_file_report)