RET = 3, /* tramp / site cond-tail-call */
};
-static void __static_call_transform(void *insn, enum insn_type type, void *func)
+static void __ref __static_call_transform(void *insn, enum insn_type type, void *func)
{
int size = CALL_INSN_SIZE;
const void *code;
if (memcmp(insn, code, size) == 0)
return;
+ if (unlikely(system_state == SYSTEM_BOOTING))
+ return text_poke_early(insn, code, size);
+
text_poke_bp(insn, code, size, NULL);
}
#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
+extern void __init static_call_init(void);
+
struct static_call_mod {
struct static_call_mod *next;
struct module *mod; /* for vmlinux, mod == NULL */
struct static_call_key {
void *func;
- struct static_call_mod *mods;
+ union {
+ /* bit 0: 0 = mods, 1 = sites */
+ unsigned long type;
+ struct static_call_mod *mods;
+ struct static_call_site *sites;
+ };
};
extern void __static_call_update(struct static_call_key *key, void *tramp, void *func);
DECLARE_STATIC_CALL(name, _func); \
struct static_call_key STATIC_CALL_KEY(name) = { \
.func = _func, \
- .mods = NULL, \
+ .type = 1, \
}; \
ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
#elif defined(CONFIG_HAVE_STATIC_CALL)
+static inline void static_call_init(void) { }
+
struct static_call_key {
void *func;
};
#else /* Generic implementation */
+static inline void static_call_init(void) { }
+
struct static_call_key {
void *func;
};
static_call_site_cmp, static_call_site_swap);
}
+static inline bool static_call_key_has_mods(struct static_call_key *key)
+{
+ return !(key->type & 1);
+}
+
+static inline struct static_call_mod *static_call_key_next(struct static_call_key *key)
+{
+ if (!static_call_key_has_mods(key))
+ return NULL;
+
+ return key->mods;
+}
+
+static inline struct static_call_site *static_call_key_sites(struct static_call_key *key)
+{
+ if (static_call_key_has_mods(key))
+ return NULL;
+
+ return (struct static_call_site *)(key->type & ~1);
+}
+
void __static_call_update(struct static_call_key *key, void *tramp, void *func)
{
struct static_call_site *site, *stop;
- struct static_call_mod *site_mod;
+ struct static_call_mod *site_mod, first;
cpus_read_lock();
static_call_lock();
if (WARN_ON_ONCE(!static_call_initialized))
goto done;
- for (site_mod = key->mods; site_mod; site_mod = site_mod->next) {
+ first = (struct static_call_mod){
+ .next = static_call_key_next(key),
+ .mod = NULL,
+ .sites = static_call_key_sites(key),
+ };
+
+ for (site_mod = &first; site_mod; site_mod = site_mod->next) {
struct module *mod = site_mod->mod;
if (!site_mod->sites) {
/*
* This can happen if the static call key is defined in
* a module which doesn't use it.
+ *
+ * It also happens in the has_mods case, where the
+ * 'first' entry has no sites associated with it.
*/
continue;
}
if (key != prev_key) {
prev_key = key;
+ /*
+ * For vmlinux (!mod) avoid the allocation by storing
+ * the sites pointer in the key itself. Also see
+ * __static_call_update()'s @first.
+ *
+ * This allows architectures (eg. x86) to call
+ * static_call_init() before memory allocation works.
+ */
+ if (!mod) {
+ key->sites = site;
+ key->type |= 1;
+ goto do_transform;
+ }
+
site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
if (!site_mod)
return -ENOMEM;
+ /*
+ * When the key has a direct sites pointer, extract
+ * that into an explicit struct static_call_mod, so we
+ * can have a list of modules.
+ */
+ if (static_call_key_sites(key)) {
+ site_mod->mod = NULL;
+ site_mod->next = NULL;
+ site_mod->sites = static_call_key_sites(key);
+
+ key->mods = site_mod;
+
+ site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
+ if (!site_mod)
+ return -ENOMEM;
+ }
+
site_mod->mod = mod;
site_mod->sites = site;
- site_mod->next = key->mods;
+ site_mod->next = static_call_key_next(key);
key->mods = site_mod;
}
+do_transform:
arch_static_call_transform(site_addr, NULL, key->func,
static_call_is_tail(site));
}
return __static_call_mod_text_reserved(start, end);
}
-static void __init static_call_init(void)
+void __init static_call_init(void)
{
int ret;