1 // SPDX-License-Identifier: GPL-2.0-only
3 * AArch64 loadable module support.
5 * Copyright (C) 2012 ARM Limited
7 * Author: Will Deacon <will.deacon@arm.com>
10 #include <linux/bitops.h>
11 #include <linux/elf.h>
12 #include <linux/ftrace.h>
13 #include <linux/gfp.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
17 #include <linux/moduleloader.h>
18 #include <linux/scs.h>
19 #include <linux/vmalloc.h>
20 #include <asm/alternative.h>
23 #include <asm/sections.h>
25 void *module_alloc(unsigned long size)
27 u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
28 gfp_t gfp_mask = GFP_KERNEL;
31 /* Silence the initial allocation */
32 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
33 gfp_mask |= __GFP_NOWARN;
35 if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
36 IS_ENABLED(CONFIG_KASAN_SW_TAGS))
37 /* don't exceed the static module region - see below */
38 module_alloc_end = MODULES_END;
40 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
41 module_alloc_end, gfp_mask, PAGE_KERNEL, VM_DEFER_KMEMLEAK,
42 NUMA_NO_NODE, __builtin_return_address(0));
44 if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
45 (IS_ENABLED(CONFIG_KASAN_VMALLOC) ||
46 (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
47 !IS_ENABLED(CONFIG_KASAN_SW_TAGS))))
49 * KASAN without KASAN_VMALLOC can only deal with module
50 * allocations being served from the reserved module region,
51 * since the remainder of the vmalloc region is already
52 * backed by zero shadow pages, and punching holes into it
53 * is non-trivial. Since the module region is not randomized
54 * when KASAN is enabled without KASAN_VMALLOC, it is even
55 * less likely that the module region gets exhausted, so we
56 * can simply omit this fallback in that case.
58 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
59 module_alloc_base + SZ_2G, GFP_KERNEL,
60 PAGE_KERNEL, 0, NUMA_NO_NODE,
61 __builtin_return_address(0));
63 if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
68 /* Memory is intended to be executable, reset the pointer tag. */
69 return kasan_reset_tag(p);
72 enum aarch64_reloc_op {
79 static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
85 return val - (u64)place;
87 return (val & ~0xfff) - ((u64)place & ~0xfff);
92 pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
96 static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
98 s64 sval = do_reloc(op, place, val);
101 * The ELF psABI for AArch64 documents the 16-bit and 32-bit place
102 * relative and absolute relocations as having a range of [-2^15, 2^16)
103 * or [-2^31, 2^32), respectively. However, in order to be able to
104 * detect overflows reliably, we have to choose whether we interpret
105 * such quantities as signed or as unsigned, and stick with it.
106 * The way we organize our address space requires a signed
107 * interpretation of 32-bit relative references, so let's use that
108 * for all R_AARCH64_PRELxx relocations. This means our upper
109 * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX.
114 *(s16 *)place = sval;
117 if (sval < 0 || sval > U16_MAX)
121 if (sval < S16_MIN || sval > S16_MAX)
125 pr_err("Invalid 16-bit data relocation (%d)\n", op);
130 *(s32 *)place = sval;
133 if (sval < 0 || sval > U32_MAX)
137 if (sval < S32_MIN || sval > S32_MAX)
141 pr_err("Invalid 32-bit data relocation (%d)\n", op);
146 *(s64 *)place = sval;
149 pr_err("Invalid length (%d) for data relocation\n", len);
155 enum aarch64_insn_movw_imm_type {
156 AARCH64_INSN_IMM_MOVNZ,
157 AARCH64_INSN_IMM_MOVKZ,
160 static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
161 int lsb, enum aarch64_insn_movw_imm_type imm_type)
165 u32 insn = le32_to_cpu(*place);
167 sval = do_reloc(op, place, val);
170 if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
172 * For signed MOVW relocations, we have to manipulate the
173 * instruction encoding depending on whether or not the
174 * immediate is less than zero.
178 /* >=0: Set the instruction to MOVZ (opcode 10b). */
182 * <0: Set the instruction to MOVN (opcode 00b).
183 * Since we've masked the opcode already, we
184 * don't need to do anything other than
185 * inverting the new immediate field.
191 /* Update the instruction with the new encoding. */
192 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
193 *place = cpu_to_le32(insn);
201 static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
202 int lsb, int len, enum aarch64_insn_imm_type imm_type)
206 u32 insn = le32_to_cpu(*place);
208 /* Calculate the relocation value. */
209 sval = do_reloc(op, place, val);
212 /* Extract the value bits and shift them to bit 0. */
213 imm_mask = (BIT(lsb + len) - 1) >> lsb;
214 imm = sval & imm_mask;
216 /* Update the instruction's immediate field. */
217 insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
218 *place = cpu_to_le32(insn);
221 * Extract the upper value bits (including the sign bit) and
222 * shift them to bit 0.
224 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
227 * Overflow has occurred if the upper bits are not all equal to
228 * the sign bit of the value.
230 if ((u64)(sval + 1) >= 2)
236 static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
237 __le32 *place, u64 val)
241 if (!is_forbidden_offset_for_adrp(place))
242 return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
243 AARCH64_INSN_IMM_ADR);
245 /* patch ADRP to ADR if it is in range */
246 if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
247 AARCH64_INSN_IMM_ADR)) {
248 insn = le32_to_cpu(*place);
251 /* out of range for ADR -> emit a veneer */
252 val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff);
255 insn = aarch64_insn_gen_branch_imm((u64)place, val,
256 AARCH64_INSN_BRANCH_NOLINK);
259 *place = cpu_to_le32(insn);
263 int apply_relocate_add(Elf64_Shdr *sechdrs,
265 unsigned int symindex,
275 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
277 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
278 /* loc corresponds to P in the AArch64 ELF document. */
279 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
282 /* sym is the ELF symbol we're referring to. */
283 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
284 + ELF64_R_SYM(rel[i].r_info);
286 /* val corresponds to (S + A) in the AArch64 ELF document. */
287 val = sym->st_value + rel[i].r_addend;
289 /* Check for overflow by default. */
290 overflow_check = true;
292 /* Perform the static relocation. */
293 switch (ELF64_R_TYPE(rel[i].r_info)) {
294 /* Null relocations. */
300 /* Data relocations. */
301 case R_AARCH64_ABS64:
302 overflow_check = false;
303 ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
305 case R_AARCH64_ABS32:
306 ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
308 case R_AARCH64_ABS16:
309 ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
311 case R_AARCH64_PREL64:
312 overflow_check = false;
313 ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
315 case R_AARCH64_PREL32:
316 ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
318 case R_AARCH64_PREL16:
319 ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
322 /* MOVW instruction relocations. */
323 case R_AARCH64_MOVW_UABS_G0_NC:
324 overflow_check = false;
326 case R_AARCH64_MOVW_UABS_G0:
327 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
328 AARCH64_INSN_IMM_MOVKZ);
330 case R_AARCH64_MOVW_UABS_G1_NC:
331 overflow_check = false;
333 case R_AARCH64_MOVW_UABS_G1:
334 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
335 AARCH64_INSN_IMM_MOVKZ);
337 case R_AARCH64_MOVW_UABS_G2_NC:
338 overflow_check = false;
340 case R_AARCH64_MOVW_UABS_G2:
341 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
342 AARCH64_INSN_IMM_MOVKZ);
344 case R_AARCH64_MOVW_UABS_G3:
345 /* We're using the top bits so we can't overflow. */
346 overflow_check = false;
347 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
348 AARCH64_INSN_IMM_MOVKZ);
350 case R_AARCH64_MOVW_SABS_G0:
351 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
352 AARCH64_INSN_IMM_MOVNZ);
354 case R_AARCH64_MOVW_SABS_G1:
355 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
356 AARCH64_INSN_IMM_MOVNZ);
358 case R_AARCH64_MOVW_SABS_G2:
359 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
360 AARCH64_INSN_IMM_MOVNZ);
362 case R_AARCH64_MOVW_PREL_G0_NC:
363 overflow_check = false;
364 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
365 AARCH64_INSN_IMM_MOVKZ);
367 case R_AARCH64_MOVW_PREL_G0:
368 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
369 AARCH64_INSN_IMM_MOVNZ);
371 case R_AARCH64_MOVW_PREL_G1_NC:
372 overflow_check = false;
373 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
374 AARCH64_INSN_IMM_MOVKZ);
376 case R_AARCH64_MOVW_PREL_G1:
377 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
378 AARCH64_INSN_IMM_MOVNZ);
380 case R_AARCH64_MOVW_PREL_G2_NC:
381 overflow_check = false;
382 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
383 AARCH64_INSN_IMM_MOVKZ);
385 case R_AARCH64_MOVW_PREL_G2:
386 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
387 AARCH64_INSN_IMM_MOVNZ);
389 case R_AARCH64_MOVW_PREL_G3:
390 /* We're using the top bits so we can't overflow. */
391 overflow_check = false;
392 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
393 AARCH64_INSN_IMM_MOVNZ);
396 /* Immediate instruction relocations. */
397 case R_AARCH64_LD_PREL_LO19:
398 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
399 AARCH64_INSN_IMM_19);
401 case R_AARCH64_ADR_PREL_LO21:
402 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
403 AARCH64_INSN_IMM_ADR);
405 case R_AARCH64_ADR_PREL_PG_HI21_NC:
406 overflow_check = false;
408 case R_AARCH64_ADR_PREL_PG_HI21:
409 ovf = reloc_insn_adrp(me, sechdrs, loc, val);
410 if (ovf && ovf != -ERANGE)
413 case R_AARCH64_ADD_ABS_LO12_NC:
414 case R_AARCH64_LDST8_ABS_LO12_NC:
415 overflow_check = false;
416 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
417 AARCH64_INSN_IMM_12);
419 case R_AARCH64_LDST16_ABS_LO12_NC:
420 overflow_check = false;
421 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
422 AARCH64_INSN_IMM_12);
424 case R_AARCH64_LDST32_ABS_LO12_NC:
425 overflow_check = false;
426 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
427 AARCH64_INSN_IMM_12);
429 case R_AARCH64_LDST64_ABS_LO12_NC:
430 overflow_check = false;
431 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
432 AARCH64_INSN_IMM_12);
434 case R_AARCH64_LDST128_ABS_LO12_NC:
435 overflow_check = false;
436 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
437 AARCH64_INSN_IMM_12);
439 case R_AARCH64_TSTBR14:
440 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
441 AARCH64_INSN_IMM_14);
443 case R_AARCH64_CONDBR19:
444 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
445 AARCH64_INSN_IMM_19);
447 case R_AARCH64_JUMP26:
448 case R_AARCH64_CALL26:
449 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
450 AARCH64_INSN_IMM_26);
452 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
454 val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
457 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
458 26, AARCH64_INSN_IMM_26);
463 pr_err("module %s: unsupported RELA relocation: %llu\n",
464 me->name, ELF64_R_TYPE(rel[i].r_info));
468 if (overflow_check && ovf == -ERANGE)
476 pr_err("module %s: overflow in relocation type %d val %Lx\n",
477 me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
481 static inline void __init_plt(struct plt_entry *plt, unsigned long addr)
483 *plt = get_plt_entry(addr, plt);
486 static int module_init_ftrace_plt(const Elf_Ehdr *hdr,
487 const Elf_Shdr *sechdrs,
490 #if defined(CONFIG_ARM64_MODULE_PLTS) && defined(CONFIG_DYNAMIC_FTRACE)
492 struct plt_entry *plts;
494 s = find_section(hdr, sechdrs, ".text.ftrace_trampoline");
498 plts = (void *)s->sh_addr;
500 __init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR);
502 mod->arch.ftrace_trampolines = plts;
507 int module_finalize(const Elf_Ehdr *hdr,
508 const Elf_Shdr *sechdrs,
512 s = find_section(hdr, sechdrs, ".altinstructions");
514 apply_alternatives_module((void *)s->sh_addr, s->sh_size);
516 if (scs_is_dynamic()) {
517 s = find_section(hdr, sechdrs, ".init.eh_frame");
519 scs_patch((void *)s->sh_addr, s->sh_size);
522 return module_init_ftrace_plt(hdr, sechdrs, me);