/// section header (i.e. the symbol position as we are reading the first
/// symbol).
///
+ /// @param section the ksymtab section to consider.
+ ///
+ /// @param position_relative_relocations if true, then consider that
+ /// the section designated by @p section contains position-relative
+ /// relocated symbol addresses.
+ ///
+ ///@param symbol_offset if different from zero
+ /// If symbol_offset is != 0, adjust the position we consider the section
+ /// start. That is useful to read the ksymtab with a slight offset.
+ ///
/// @return the symbol resulting from the lookup of the symbol address we
/// got from reading the first entry of the ksymtab or null if no such entry
/// could be found.
elf_symbol_sptr
try_reading_first_ksymtab_entry(Elf_Scn* section,
- bool position_relative_relocations) const
+ bool position_relative_relocations,
+ int symbol_offset = 0) const
{
// this function does not support relocatable ksymtab entries (as for
// example in kernel modules). Hence assert here on not having any
else
symbol_value_size = architecture_word_size();
+ const int read_offset = (symbol_offset * symbol_value_size);
+ bytes += read_offset;
+
if (position_relative_relocations)
{
int32_t offset = 0;
is_big_endian, offset));
GElf_Shdr section_header;
gelf_getshdr(section, §ion_header);
- symbol_address = offset + section_header.sh_addr;
+ // the actual symbol address is relative to its position. Since we do
+ // not know the position, we take the beginning of the section, add the
+ // read_offset that we might have and finally apply the offset we
+ // read from the section.
+ symbol_address = section_header.sh_addr + read_offset + offset;
}
else
ABG_ASSERT(read_int_from_array_of_bytes(bytes, symbol_value_size,
get_ksymtab_entry_size() const
{
if (ksymtab_entry_size_ == 0)
- // The entry size if 2 * symbol_value_size.
- ksymtab_entry_size_ = 2 * get_ksymtab_symbol_value_size();
+ {
+ const unsigned char symbol_size = get_ksymtab_symbol_value_size();
+ Elf_Scn* ksymtab = find_any_ksymtab_section();
+ if (ksymtab)
+ {
+ GElf_Shdr ksymtab_shdr;
+ gelf_getshdr(ksymtab, &ksymtab_shdr);
+
+ // ksymtab entries have the following layout
+ //
+ // struct {
+ // T symbol_address; // .symtab entry
+ // T name_address; // .strtab entry
+ // }
+ //
+ // with T being a suitable type to represent the absolute,
+ // relocatable or position relative value of the address. T's size
+ // is determined by get_ksymtab_symbol_value_size().
+ //
+ // Since Kernel v5.4, the entries have the following layout
+ //
+ // struct {
+ // T symbol_address; // .symtab entry
+ // T name_address; // .strtab entry
+ // T namespace; // .strtab entry
+ // }
+ //
+ // To determine the ksymtab entry size, find the next entry that
+ // refers to a valid .symtab entry. The offset to that one is what
+ // we are searching for.
+ for (unsigned entries = 2; entries <= 3; ++entries)
+ {
+ const unsigned candidate_size = entries * symbol_size;
+
+ // if there is exactly one entry, section size == entry size
+ // (this looks like an optimization, but in fact it prevents
+ // from illegal reads if there is actually only one entry)
+ if (ksymtab_shdr.sh_size == candidate_size)
+ {
+ ksymtab_entry_size_ = candidate_size;
+ break;
+ }
+
+ // otherwise check whether the symbol following the candidate
+ // number of entries is a valid ELF symbol. For that we read
+ // the ksymtab with the given offset and if the symbol is
+ // valid, we found our entry size.
+ const ksymtab_format format = get_ksymtab_format();
+ if (try_reading_first_ksymtab_entry
+ (ksymtab, format == V4_19_KSYMTAB_FORMAT, entries))
+ {
+ ksymtab_entry_size_ = candidate_size;
+ break;
+ }
+ }
+ ABG_ASSERT(ksymtab_entry_size_ != 0);
+ }
+ }
return ksymtab_entry_size_;
}
bool
load_kernel_symbol_table(kernel_symbol_table_kind kind)
{
- size_t nb_entries = 0;
Elf_Scn *section = 0, *reloc_section = 0;
address_set_sptr linux_exported_fns_set, linux_exported_vars_set;
case KERNEL_SYMBOL_TABLE_KIND_KSYMTAB:
section = find_ksymtab_section();
reloc_section = find_ksymtab_reloc_section();
- nb_entries = get_nb_ksymtab_entries();
linux_exported_fns_set = create_or_get_linux_exported_fn_syms();
linux_exported_vars_set = create_or_get_linux_exported_var_syms();
break;
case KERNEL_SYMBOL_TABLE_KIND_KSYMTAB_GPL:
section = find_ksymtab_gpl_section();
reloc_section = find_ksymtab_gpl_reloc_section();
- nb_entries = get_nb_ksymtab_gpl_entries();
linux_exported_fns_set = create_or_get_linux_exported_gpl_fn_syms();
linux_exported_vars_set = create_or_get_linux_exported_gpl_var_syms();
break;
}
- if (!linux_exported_vars_set
- || !linux_exported_fns_set
- || !section
- || !nb_entries)
+ if (!linux_exported_vars_set || !linux_exported_fns_set || !section)
return false;
ksymtab_format format = get_ksymtab_format();
// apply those relocations and we're safe to read the relocation section to
// determine which exported symbols are in the ksymtab.
if (!reloc_section || format == PRE_V4_19_KSYMTAB_FORMAT)
- return populate_symbol_map_from_ksymtab(section, linux_exported_fns_set,
- linux_exported_vars_set,
- nb_entries);
+ {
+ size_t nb_entries = 0;
+ if (kind == KERNEL_SYMBOL_TABLE_KIND_KSYMTAB)
+ nb_entries = get_nb_ksymtab_entries();
+ else if (kind == KERNEL_SYMBOL_TABLE_KIND_KSYMTAB_GPL)
+ nb_entries = get_nb_ksymtab_gpl_entries();
+
+ if (!nb_entries)
+ return false;
+
+ return populate_symbol_map_from_ksymtab(
+ section, linux_exported_fns_set, linux_exported_vars_set,
+ nb_entries);
+ }
else
return populate_symbol_map_from_ksymtab_reloc(reloc_section,
linux_exported_fns_set,