struct fbi_var_data *vars;
struct fbi_step *steps_source;
struct fbi_step *steps_dest = NULL;
- uint8_t i;
-
+ uint8_t i, n;
+ int ret = 0;
memcpy(dest, source, sizeof(*source));
vars_size = source->fbi_i.var_count * sizeof(*source->fbi_i.vars);
vars = kmalloc(vars_size, GFP_KERNEL);
+ if (vars == NULL)
+ return -ENOMEM;
+
memcpy(vars, source->fbi_i.vars, vars_size);
for (i = 0; i != source->fbi_i.var_count; i++) {
steps_dest = kmalloc(steps_size, GFP_KERNEL);
if (steps_dest == NULL) {
print_err("can not alloc data\n");
- return -ENOMEM;
+ n = i;
+ ret = -ENOMEM;
+ goto err;
}
memcpy(steps_dest, steps_source, steps_size);
dest->fbi_i.vars = vars;
- return 0;
+ return ret;
+err:
+ for (i = 0; i < n; i++)
+ kfree(vars[i].steps);
+ kfree(vars);
+ return ret;
}
/* Register */
#include <linux/list.h>
#include <linux/hash.h>
+
#define sign_extend(x, signbit) ((x) | (0 - ((x) & (1 << (signbit)))))
#define branch_displacement(insn) sign_extend(((insn) & 0xffffff) << 2, 25)
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
+#include <linux/stop_machine.h>
#include <ksyms/ksyms.h>
#include <master/swap_initializer.h>
{
memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
-#ifdef CONFIG_ARM
- p->safe_arm = old_p->safe_arm;
- p->safe_thumb = old_p->safe_thumb;
-#endif
}
/*
(p->list.next == &old_p->list) && (p->list.prev == &old_p->list))) {
/* Only probe on the hash list */
swap_arch_disarm_kprobe(p);
+
+ /* FIXME: move sync out from atomic context */
+ if (!in_atomic())
+ synchronize_sched();
+
hlist_del_rcu(&old_p->hlist);
remove_kprobe(old_p);
/* TODO: test - remove retprobe after func entry but before its exit */
ri = get_free_rp_inst(rp);
if (ri != NULL) {
+ int skip = 0;
+
ri->rp = rp;
ri->task = current;
if (rp->entry_handler)
- rp->entry_handler(ri, regs);
-
- swap_arch_prepare_kretprobe(ri, regs);
-
- add_rp_inst(ri);
+ skip = rp->entry_handler(ri, regs);
+
+ if (skip) {
+ add_rp_inst(ri);
+ recycle_rp_inst(ri);
+ } else {
+ swap_arch_prepare_kretprobe(ri, regs);
+ add_rp_inst(ri);
+ }
} else {
++rp->nmissed;
}
}
}
+
+struct unreg_krp_args {
+ struct kretprobe **rps;
+ size_t size;
+ int rp_disarm;
+};
+
+static int __swap_unregister_kretprobes_top(void *data)
+{
+ struct unreg_krp_args *args = data;
+ struct kretprobe **rps = args->rps;
+ size_t size = args->size;
+ int rp_disarm = args->rp_disarm;
+ unsigned long flags;
+ const size_t end = ((size_t) 0) - 1;
+
+ for (--size; size != end; --size) {
+ swap_unregister_kprobe(&rps[size]->kp);
+ if (rp_disarm) {
+ spin_lock_irqsave(&kretprobe_lock, flags);
+ swap_disarm_krp(rps[size]);
+ spin_unlock_irqrestore(&kretprobe_lock, flags);
+ }
+ }
+
+ return 0;
+}
+
/**
* @brief Kretprobes unregister top. Unregisters kprobes.
*
void swap_unregister_kretprobes_top(struct kretprobe **rps, size_t size,
int rp_disarm)
{
- unsigned long flags;
- const size_t end = ((size_t) 0) - 1;
-
- spin_lock_irqsave(&kretprobe_lock, flags);
- for (--size; size != end; --size) {
- swap_unregister_kprobe(&rps[size]->kp);
- if (rp_disarm)
- swap_disarm_krp(rps[size]);
+ struct unreg_krp_args args = {
+ .rps = rps,
+ .size = size,
+ .rp_disarm = rp_disarm,
+ };
+
+ if (rp_disarm) {
+ int ret;
+
+ ret = stop_machine(__swap_unregister_kretprobes_top,
+ &args, NULL);
+ if (ret)
+ pr_err("%s failed (%d)\n", __func__, ret);
+ } else {
+ __swap_unregister_kretprobes_top(&args);
}
- spin_unlock_irqrestore(&kretprobe_lock, flags);
}
EXPORT_SYMBOL_GPL(swap_unregister_kretprobes_top);
return retval;
}
+static void krp_inst_flush(struct task_struct *task)
+{
+ unsigned long flags;
+ struct kretprobe_instance *ri;
+ struct hlist_node *tmp;
+ struct hlist_head *head;
+ DECLARE_NODE_PTR_FOR_HLIST(node);
+
+ spin_lock_irqsave(&kretprobe_lock, flags);
+ head = kretprobe_inst_table_head(task);
+ swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ if (ri->task == task) {
+ printk("task[%u %u %s]: flush krp_inst, ret_addr=%p\n",
+ task->tgid, task->pid, task->comm,
+ ri->ret_addr);
+ recycle_rp_inst(ri);
+ }
+ }
+ spin_unlock_irqrestore(&kretprobe_lock, flags);
+}
+
+static int put_task_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct task_struct *t = (struct task_struct *)swap_get_karg(regs, 0);
+
+ /* task has died */
+ krp_inst_flush(t);
+
+ return 0;
+}
+
+static struct kprobe put_task_kp = {
+ .pre_handler = put_task_handler,
+};
+
static int init_module_deps(void)
{
int ret;
if (module_alloc == NULL)
goto not_found;
+ sym = "__put_task_struct";
+ put_task_kp.addr = (void *)swap_ksyms(sym);
+ if (put_task_kp.addr == NULL)
+ goto not_found;
+
ret = init_module_deps();
if (ret)
return ret;
static int init_kprobes(void)
{
+ int ret;
+
init_sm();
atomic_set(&kprobe_count, 0);
- return swap_arch_init_kprobes();
+ ret = swap_arch_init_kprobes();
+ if (ret)
+ return ret;
+
+ ret = swap_register_kprobe(&put_task_kp);
+ if (ret) {
+ swap_arch_exit_kprobes();
+ return ret;
+ }
+
+ return 0;
}
static void exit_kprobes(void)
{
+ swap_unregister_kprobe(&put_task_kp);
swap_arch_exit_kprobes();
exit_sm();
}
* NULL if original function should be called.
* Not supported for X86, not tested for MIPS. */
kprobe_opcode_t *ss_addr[NR_CPUS];
-#ifdef CONFIG_ARM
- /** Safe/unsafe to use probe on ARM.*/
- unsigned safe_arm:1;
- /** Safe/unsafe to use probe on Thumb.*/
- unsigned safe_thumb:1;
-#endif
};
/**
void swap_unregister_kretprobes_bottom(struct kretprobe **rps, size_t size);
-int swap_disarm_urp_inst_for_task(struct task_struct *parent,
- struct task_struct *task);
-
int trampoline_probe_handler (struct kprobe *p, struct pt_regs *regs);
int,
struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, int write_access)
-IMP_MOD_DEP_WRAPPER(handle_mm_fault, mm, vma, address, write_access)
-#endif
-#else
+{
+ if (in_atomic())
+ return VM_FAULT_ERROR | VM_FAULT_OOM;
+
+ IMP_MOD_DEP_WRAPPER(handle_mm_fault, mm, vma, address, write_access)
+}
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18) */
+#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 30) */
DECLARE_MOD_DEP_WRAPPER(swap_handle_mm_fault,
int,
struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
-IMP_MOD_DEP_WRAPPER(handle_mm_fault, mm, vma, address, flags)
-#endif
+{
+ if (in_atomic())
+ return VM_FAULT_ERROR | VM_FAULT_OOM;
+
+ IMP_MOD_DEP_WRAPPER(handle_mm_fault, mm, vma, address, flags)
+}
+#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 30) */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38)
DECLARE_MOD_DEP_WRAPPER(swap_get_gate_vma,
{
#ifdef __HAVE_ARCH_GATE_AREA
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38)
- struct mm_struct *mm = task->mm;
+ struct mm_struct *mm;
+
+ if (task == NULL)
+ return 0;
+
+ mm = task->mm;
IMP_MOD_DEP_WRAPPER(in_gate_area, mm, addr)
#else /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38) */
IMP_MOD_DEP_WRAPPER(in_gate_area, task, addr)
struct chunk chunk;
};
-static void chunk_init(struct chunk *chunk,
- void *data,
- size_t size,
- size_t size_block)
+static int chunk_init(struct chunk *chunk, void *data,
+ size_t size, size_t size_block)
{
unsigned long i;
unsigned long *p;
chunk->index = kmalloc(sizeof(*chunk->index)*chunk->count_available,
GFP_ATOMIC);
+ if (chunk->index == NULL) {
+ pr_err("%s: failed to allocate memory\n", __func__);
+ return -ENOMEM;
+ }
+
p = chunk->index;
for (i = 0; i != chunk->count_available; ++p)
*p = ++i;
+
+ return 0;
}
static void chunk_uninit(struct chunk *chunk)
static struct fixed_alloc *create_fixed_alloc(struct slot_manager *sm)
{
+ int ret;
void *data;
struct fixed_alloc *fa;
return NULL;
data = sm->alloc(sm);
- if (data == NULL) {
- kfree(fa);
- return NULL;
- }
+ if (data == NULL)
+ goto free_fa;
- chunk_init(&fa->chunk, data,
- PAGE_SIZE/sizeof(unsigned long), sm->slot_size);
+ ret = chunk_init(&fa->chunk, data,
+ PAGE_SIZE / sizeof(unsigned long), sm->slot_size);
+ if (ret)
+ goto free_sm;
return fa;
+
+free_sm:
+ sm->free(sm, data);
+free_fa:
+ kfree(fa);
+ return NULL;
}
static void free_fixed_alloc(struct slot_manager *sm, struct fixed_alloc *fa)
static char *fops_fpath(struct file *file, char *buf, int buflen)
{
- char *filename = d_path(&file->f_path, buf, buflen);
+ char *filename;
+
+ path_get(&file->f_path);
+ filename = d_path(&file->f_path, buf, buflen);
+ path_put(&file->f_path);
if (IS_ERR_OR_NULL(filename)) {
printk(FOPS_PREFIX "d_path FAILED: %ld\n", PTR_ERR(filename));
if (IS_ERR_OR_NULL(filename)) {
printk(FOPS_PREFIX "dentry_path_raw FAILED: %ld\n",
PTR_ERR(filename));
- strcpy(buf, NA);
+ strncpy(buf, NA, buflen);
filename = buf;
}
--cnt;
- rpp = kmalloc(GFP_KERNEL, sizeof(&(((struct ks_probe *) 0)->rp)) * cnt);
+ rpp = kmalloc(GFP_KERNEL, sizeof(*rpp) * cnt);
if (rpp == NULL) {
for (; cnt != end; --cnt) {
ret = unregister_syscall(id_p[cnt]);
n = strncpy_from_user(p, ofile, size);
if (n < 0) {
printk(KSF_PREFIX "cannot copy ofile\n");
- swap_msg_put(m);
+ goto put_msg;
}
swap_msg_flush(m, ret + n + 1);
{
struct probe *p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (p == NULL)
+ return NULL;
+
p->p.jp.kp.addr = p->p.rp.kp.addr = (void *)addr;
p->p.jp.pre_entry = pre_handler;
p->p.jp.entry = jp_handler;
print_parse_debug("MSG_START. size=%d\n", size);
ret = msg_start(&mb);
break;
- case MSG_STOP: {
- struct cpumask mask;
-
+ case MSG_STOP:
print_parse_debug("MSG_STOP. size=%d\n", size);
-
- swap_disable_nonboot_cpus_lock(&mask);
ret = msg_stop(&mb);
- swap_enable_nonboot_cpus_unlock(&mask);
-
break;
- }
case MSG_CONFIG:
print_parse_debug("MSG_CONFIG. size=%d\n", size);
ret = msg_config(&mb);
goto cp2buf;
}
+ path_get(&file->f_path);
filename = d_path(&file->f_path, tmp_buf, TMP_BUF_LEN);
+ path_put(&file->f_path);
+
if (IS_ERR_OR_NULL(filename)) {
filename = NA;
goto cp2buf;
preload_probe.o \
preload_control.o \
preload_threads.o \
- preload_patcher.o \
preload_pd.o
}
+/* Called only form handlers. If we're there, then it is instrumented. */
+enum preload_call_type preload_control_call_type_always_inst(void *caller)
+{
+ if (__is_instrumented(caller))
+ return INTERNAL_CALL;
+
+ return EXTERNAL_CALL;
+
+}
enum preload_call_type preload_control_call_type(struct us_ip *ip, void *caller)
{
int i;
unsigned int ret = 0;
+ if (target_binaries_cnt == 0)
+ return 0;
+
__target_binaries_lock();
*filenames_p = kmalloc(sizeof(**filenames_p) * target_binaries_cnt,
int preload_control_init(void);
void preload_control_exit(void);
+enum preload_call_type preload_control_call_type_always_inst(void *caller);
enum preload_call_type preload_control_call_type(struct us_ip *ip, void *caller);
int preload_control_add_instrumented_binary(char *filename);
int preload_control_clean_instrumented_bins(void);
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/mutex.h>
+#include <linux/spinlock.h>
#include <linux/limits.h>
#include <asm/uaccess.h>
#include <master/swap_debugfs.h>
#include "preload_debugfs.h"
#include "preload_module.h"
#include "preload_control.h"
-#include "preload_patcher.h"
#include "preload_storage.h"
static const char PRELOAD_FOLDER[] = "preload";
};
static struct dentry *preload_root;
-static struct loader_info __loader_info = {
- .path = NULL,
- .offset = 0,
- .dentry = NULL
-};
+static struct loader_info __loader_info;
static unsigned long r_debug_offset = 0;
-static DEFINE_MUTEX(__dentry_lock);
+static DEFINE_SPINLOCK(__dentry_lock);
static inline void dentry_lock(void)
{
- mutex_lock(&__dentry_lock);
+ spin_lock(&__dentry_lock);
}
static inline void dentry_unlock(void)
{
- mutex_unlock(&__dentry_lock);
+ spin_unlock(&__dentry_lock);
}
{
if (__loader_info.path != NULL)
kfree(__loader_info.path);
+ __loader_info.path = NULL;
dentry_lock();
if (__loader_info.dentry != NULL)
put_dentry(__loader_info.dentry);
+
+ __loader_info.dentry = NULL;
+ __loader_info.offset = 0;
+
dentry_unlock();
}
*/
static ssize_t loader_path_write(struct file *file, const char __user *buf,
- size_t len, loff_t *ppos)
+ size_t len, loff_t *ppos)
{
ssize_t ret;
char *path;
path = kmalloc(len, GFP_KERNEL);
if (path == NULL) {
- ret = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
if (copy_from_user(path, buf, len)) {
ret = -EINVAL;
- goto out;
+ goto err;
}
path[len - 1] = '\0';
set_loader_file(path);
ret = len;
-out:
+ return ret;
+err:
+ kfree(path);
return ret;
}
#include <linux/types.h>
#include <linux/uaccess.h>
#include <kprobe/swap_kprobes.h>
+#include <kprobe/swap_kprobes_deps.h>
#include <us_manager/us_manager_common.h>
#include <us_manager/sspt/sspt_page.h>
#include <us_manager/sspt/sspt_file.h>
#include "preload_storage.h"
#include "preload_control.h"
#include "preload_threads.h"
-#include "preload_patcher.h"
#include "preload_pd.h"
#define page_to_proc(page) ((page)->file->proc)
unsigned long origin;
};
-static int get_put_counter;
+static atomic_t dentry_balance = ATOMIC_INIT(0);
enum preload_status_t {
SWAP_PRELOAD_NOT_READY = 0,
static inline struct process_data *__get_process_data(struct uretprobe *rp)
{
- struct process_data *pd;
- struct us_ip *ip = to_us_ip(rp);
-
- pd = ip_to_proc(ip)->private_data;
+ struct us_ip *ip = to_us_ip(rp);
+ struct sspt_proc *proc = ip_to_proc(ip);
- return pd;
+ return preload_pd_get(proc);
}
static struct dentry *__get_dentry(struct dentry *dentry)
{
- get_put_counter++;
+ atomic_inc(&dentry_balance);
return dget(dentry);
}
void put_dentry(struct dentry *dentry)
{
- get_put_counter--;
+ atomic_dec(&dentry_balance);
dput(dentry);
}
return NULL;
}
+static struct vm_area_struct *__get_libpthread_vma(struct task_struct *task)
+{
+ struct vm_area_struct *vma = NULL;
+ struct bin_info *libpthread_info;
+
+ libpthread_info = preload_storage_get_libpthread_info();
+
+ if (!libpthread_info) {
+ printk(PRELOAD_PREFIX "Cannot get libpthread info [%u %u %s]!\n",
+ task->tgid, task->pid, task->comm);
+ return NULL;
+ }
+
+ for (vma = task->mm->mmap; vma; vma = vma->vm_next) {
+ if (vma->vm_file && vma->vm_flags & VM_EXEC
+ && vma->vm_file->f_dentry == libpthread_info->dentry) {
+ preload_storage_put_libpthread_info(libpthread_info);
+ return vma;
+ }
+ }
+
+ preload_storage_put_libpthread_info(libpthread_info);
+ return NULL;
+}
+
+static struct vm_area_struct *__get_libsmack_vma(struct task_struct *task)
+{
+ struct vm_area_struct *vma = NULL;
+ struct bin_info *libsmack_info;
+
+ libsmack_info = preload_storage_get_libsmack_info();
+
+ if (!libsmack_info) {
+ printk(PRELOAD_PREFIX "Cannot get libsmack info [%u %u %s]!\n",
+ task->tgid, task->pid, task->comm);
+ return NULL;
+ }
+
+ for (vma = task->mm->mmap; vma; vma = vma->vm_next) {
+ if (vma->vm_file && vma->vm_flags & VM_EXEC
+ && vma->vm_file->f_dentry == libsmack_info->dentry) {
+ preload_storage_put_libsmack_info(libsmack_info);
+ return vma;
+ }
+ }
+
+ preload_storage_put_libsmack_info(libsmack_info);
+ return NULL;
+}
+
static inline struct vm_area_struct *__get_vma_by_addr(struct task_struct *task,
unsigned long caller_addr)
{
static inline int __msg_sanitization(char *user_msg, size_t len,
char *call_type_p, char *caller_p)
{
- int ret;
-
- ret = access_ok(VERIFY_READ, (unsigned long)user_msg, (unsigned long)len);
- if (ret == 0)
- return -EINVAL;
-
if ((call_type_p < user_msg) || (call_type_p > user_msg + len) ||
(caller_p < user_msg) || (caller_p > user_msg + len))
return -EINVAL;
static bool __is_proc_mmap_mappable(struct task_struct *task)
{
struct vm_area_struct *linker_vma = __get_linker_vma(task);
+ struct sspt_proc *proc;
unsigned long r_debug_addr;
unsigned int state;
- int ret;
+ enum { r_state_offset = sizeof(int) + sizeof(void *) + sizeof(long) };
if (linker_vma == NULL)
return false;
if (r_debug_addr == 0)
return false;
- ret = preload_patcher_get_ui((void *)r_debug_addr + sizeof(int) +
- sizeof(void *) + sizeof(unsigned long),
- &state, task);
- if (ret != sizeof(state))
+ r_debug_addr += r_state_offset;
+ proc = sspt_proc_get_by_task(task);
+ if (proc)
+ proc->r_state_addr = r_debug_addr;
+
+ if (get_user(state, (unsigned long *)r_debug_addr))
return false;
- return ( state == 0 ? true : false );
+ return !state;
}
static bool __not_system_caller(struct task_struct *task,
{
struct vm_area_struct *linker_vma = __get_linker_vma(task);
struct vm_area_struct *libc_vma = __get_libc_vma(task);
-
- if (linker_vma == NULL || libc_vma == NULL || caller == NULL ||
- caller == linker_vma || caller == libc_vma)
+ struct vm_area_struct *libpthread_vma = __get_libpthread_vma(task);
+ struct vm_area_struct *libsmack_vma = __get_libsmack_vma(task);
+
+ if (linker_vma == NULL ||
+ libc_vma == NULL ||
+ libpthread_vma == NULL ||
+ libsmack_vma == NULL ||
+ caller == NULL ||
+ caller == linker_vma ||
+ caller == libc_vma ||
+ caller == libpthread_vma ||
+ caller == libsmack_vma)
return false;
return true;
struct pt_regs *regs)
{
unsigned long caller_addr = get_regs_ret_func(regs);
- struct vm_area_struct *cvma = __get_vma_by_addr(current, caller_addr);
+ struct vm_area_struct *cvma = __get_vma_by_addr(current, caller_addr);
if (!__is_proc_mmap_mappable(task) ||
!__not_system_caller(task, cvma))
static inline void __write_data_to_msg(char *msg, size_t len,
unsigned long call_type_off,
- unsigned long caller_off)
+ unsigned long caller_off,
+ unsigned long caller_addr)
{
unsigned char call_type = 0;
unsigned long caller = 0;
int ret;
- ret = preload_threads_get_caller(current, &caller);
- if (ret != 0) {
- caller = 0xbadbeef;
- printk(PRELOAD_PREFIX "Error! Cannot get caller address for %d/%d\n",
- current->tgid, current->pid);
- }
+ if (caller_addr != 0) {
+ caller = caller_addr;
+ call_type = preload_control_call_type_always_inst((void *)caller);
+ } else {
+ ret = preload_threads_get_caller(current, &caller);
+ if (ret != 0) {
+ caller = 0xbadbeef;
+ printk(PRELOAD_PREFIX "Error! Cannot get caller address for %d/%d\n",
+ current->tgid, current->pid);
+ }
- ret = preload_threads_get_call_type(current, &call_type);
- if (ret != 0) {
- call_type = 0xff;
- printk(PRELOAD_PREFIX "Error! Cannot get call type for %d/%d\n",
- current->tgid, current->pid);
+ ret = preload_threads_get_call_type(current, &call_type);
+ if (ret != 0) {
+ call_type = 0xff;
+ printk(PRELOAD_PREFIX "Error! Cannot get call type for %d/%d\n",
+ current->tgid, current->pid);
+ }
}
/* Using the same types as in the library. */
{
struct mmap_priv *priv = (struct mmap_priv *)ri->data;
struct task_struct *task = current->group_leader;
+ struct process_data *pd;
struct sspt_proc *proc;
unsigned long vaddr;
if (!proc)
return 0;
+ pd = preload_pd_get(proc);
+ if (pd == NULL) {
+ printk(PRELOAD_PREFIX "%d: No process data! Current %d %s\n",
+ __LINE__, current->tgid, current->comm);
+ return 0;
+ }
+
switch (priv->type) {
case MMAP_LOADER:
- preload_pd_set_loader_base(proc->private_data, vaddr);
+ preload_pd_set_loader_base(pd, vaddr);
break;
case MMAP_HANDLERS:
- preload_pd_set_handlers_base(proc->private_data, vaddr);
+ preload_pd_set_handlers_base(pd, vaddr);
break;
case MMAP_SKIP:
default:
unsigned long flags = get_preload_flags(current);
unsigned long offset = ip->desc->info.pl_i.handler;
unsigned long vaddr = 0;
+ unsigned long base;
char __user *path = NULL;
if ((flags & HANDLER_RUNNING) ||
if (!__should_we_preload_handlers(current, regs))
goto out_set_origin;
+ base = preload_pd_get_loader_base(pd);
+ if (base == 0)
+ break; /* loader isn't mapped */
+
/* jump to loader code if ready */
- vaddr = preload_pd_get_loader_base(pd) + preload_debugfs_get_loader_offset();
+ vaddr = base + preload_debugfs_get_loader_offset();
if (vaddr) {
/* save original regs state */
__save_uregs(ri, regs);
/* handlers have not yet been loaded... just ignore */
break;
case LOADED:
+ base = preload_pd_get_handlers_base(pd);
+ if (base == 0)
+ break; /* handlers isn't mapped */
+
/* jump to preloaded handler */
- vaddr = preload_pd_get_handlers_base(pd) + offset;
+ vaddr = base + offset;
if (vaddr) {
unsigned long disable_addr;
unsigned long caddr = get_regs_ret_func(regs);
unsigned long caller_offset;
unsigned long call_type_offset;
unsigned long caller_addr;
- bool drop;
int ret;
+ /* FIXME: swap_get_uarg uses get_user(), it might sleep */
user_buf = (char *)swap_get_uarg(regs, 0);
len = swap_get_uarg(regs, 1);
call_type_p = (char *)swap_get_uarg(regs, 2);
return 0;
}
- ret = preload_threads_get_drop(current, &drop);
- if (ret != 0 || drop)
+ ret = preload_threads_get_drop(current);
+ if (ret > 0)
return 0;
- buf = kmalloc(len, GFP_KERNEL);
+ buf = kmalloc(len, GFP_ATOMIC);
if (buf == NULL) {
printk(PRELOAD_PREFIX "No mem for buffer! Size = %d\n", len);
return 0;
}
- if (copy_from_user(buf, user_buf, len)) {
+ ret = read_proc_vm_atomic(current, (unsigned long)user_buf, buf, len);
+ if (ret < 0) {
printk(PRELOAD_PREFIX "Cannot copy data from userspace! Size = %d"
- " ptr 0x%lx\n", len, (unsigned long)user_buf);
+ " ptr 0x%lx ret %d\n", len, (unsigned long)user_buf, ret);
goto write_msg_fail;
}
call_type_offset = (unsigned long)(call_type_p - user_buf);
caller_offset = (unsigned long)(caller_p - user_buf);
- __write_data_to_msg(buf, len, call_type_offset, caller_offset);
-
- /* FIXME refactor this hack for opengl tizen probes */
- if (caller_addr)
- *(uintptr_t *)(buf + caller_offset) = (uintptr_t)caller_addr;
+ __write_data_to_msg(buf, len, call_type_offset, caller_offset, caller_addr);
ret = swap_msg_raw(buf, len);
if (ret != len)
int preload_module_uprobe_init(struct us_ip *ip)
{
struct uretprobe *rp = &ip->retprobe;
- struct sspt_proc *proc = page_to_proc(ip->page);
- int ret;
-
- if (proc->private_data == NULL) {
- ret = preload_pd_create_pd(&(proc->private_data), proc->task);
- if (ret != 0)
- return ret;
- }
rp->entry_handler = preload_us_entry;
rp->handler = preload_us_ret;
* to dlopen */
rp->data_size = sizeof(struct us_priv);
- preload_pd_inc_refs(proc->private_data);
-
return 0;
}
void preload_module_uprobe_exit(struct us_ip *ip)
{
- struct sspt_proc *proc = ip_to_proc(ip);
-
- preload_pd_dec_refs(proc->private_data);
}
int preload_set(void)
static void preload_module_exit(void)
{
+ int balance;
+
us_manager_unreg_cb(__preload_cbs_start_h);
us_manager_unreg_cb(__preload_cbs_stop_h);
unregister_preload_probes();
preload_storage_exit();
preload_debugfs_exit();
- WARN(get_put_counter, "Bad GET/PUT balance: %d\n", get_put_counter);
+ balance = atomic_read(&dentry_balance);
+ atomic_set(&dentry_balance, 0);
+
+ WARN(balance, "Bad GET/PUT dentry balance: %d\n", balance);
}
SWAP_LIGHT_INIT_MODULE(NULL, preload_module_init, preload_module_exit,
+++ /dev/null
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm_types.h>
-#include <linux/mm.h>
-
-#include <kprobe/swap_kprobes_deps.h>
-
-#include "preload_patcher.h"
-#include "preload_debugfs.h"
-#include "preload_storage.h"
-
-
-static inline bool check_vma(struct vm_area_struct *vma, struct dentry *dentry)
-{
- struct file *file = vma->vm_file;
-
- return (file && (vma->vm_flags & VM_EXEC) && (file->f_dentry == dentry));
-}
-
-
-static inline int __patch_proc_mem(struct task_struct *task, unsigned long addr,
- void *buf, int size)
-{
- return write_proc_vm_atomic(task, addr, buf, size);
-}
-
-static inline int __read_proc_mem(struct task_struct *task, unsigned long addr,
- void *value, size_t value_size)
-{
- return read_proc_vm_atomic(task, addr, value, value_size);
-}
-
-
-
-
-int preload_patcher_patch_proc(void *addr, unsigned long val,
- struct task_struct *task)
-{
- return __patch_proc_mem(task, (unsigned long)addr, &val, sizeof(val));
-}
-
-int preload_patcher_write_string(void *addr, char *string, size_t len,
- struct task_struct *task)
-{
- return __patch_proc_mem(task, (unsigned long)addr, string, len);
-}
-
-int preload_patcher_get_ul(void *addr, unsigned long *val,
- struct task_struct *task)
-{
- return __read_proc_mem(task, (unsigned long)addr, val, sizeof(*val));
-}
-
-int preload_patcher_get_ui(void *addr, unsigned int *val,
- struct task_struct *task)
-{
- return __read_proc_mem(task, (unsigned long)addr, val, sizeof(*val));
-}
-
-int preload_patcher_null_mem(void *addr, int size, struct task_struct *task)
-{
- char *buf;
- int ret;
-
- buf = kmalloc(size, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- memset(buf, 0, size);
-
- ret = __patch_proc_mem(task, (unsigned long)addr, buf, size);
-
- kfree(buf);
-
- return ret;
-}
+++ /dev/null
-#ifndef __PRELOAD_PATCHER_H__
-#define __PRELOAD_PATCHER_H__
-
-struct task_struct;
-
-int preload_patcher_patch_proc(void *addr, unsigned long val,
- struct task_struct *task);
-int preload_patcher_write_string(void *addr, char *string, size_t len,
- struct task_struct *task);
-int preload_patcher_get_ul(void *addr, unsigned long *val,
- struct task_struct *task);
-int preload_patcher_null_mem(void *addr, int size, struct task_struct *task);
-int preload_patcher_get_ui(void *addr, unsigned int *val,
- struct task_struct *task);
-
-
-
-#endif /* __PRELOAD_PATCHER_H__ */
#include <linux/mman.h>
#include <linux/hardirq.h>
#include <us_manager/us_manager_common.h>
+#include <us_manager/sspt/sspt_proc.h>
#include "preload_pd.h"
#include "preload_threads.h"
#include "preload_debugfs.h"
#include "preload_storage.h"
-#include "preload_patcher.h"
#include "preload.h"
struct process_data {
- char is_mapped;
enum preload_state_t state;
unsigned long loader_base;
unsigned long handlers_base;
-static unsigned long __find_dentry_base(struct mm_struct *mm,
- struct dentry *dentry)
-{
- struct vm_area_struct *vma;
-
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- if (check_vma(vma, dentry))
- return vma->vm_start;
- }
-
- return 0;
-}
-
-static unsigned long find_dentry_base(struct task_struct *task,
- struct dentry *dentry)
-{
- struct mm_struct *mm = task->mm;
- unsigned long addr;
-
-#ifdef CONFIG_ARM
- down_read(&mm->mmap_sem);
-#endif /* CONFIG_ARM */
- addr = __find_dentry_base(mm, dentry);
-#ifdef CONFIG_ARM
- up_read(&mm->mmap_sem);
-#endif /* CONFIG_ARM */
-
- return addr;
-}
-
static int __pd_create_on_demand(void)
{
if (handlers_info == NULL) {
unsigned long preload_pd_get_loader_base(struct process_data *pd)
{
if (pd == NULL)
- return ERROR;
+ return 0;
return __get_loader_base(pd);
}
void preload_pd_set_loader_base(struct process_data *pd, unsigned long vaddr)
{
- if (pd == NULL) {
- printk(PRELOAD_PREFIX "%d: No process data! Current %d %s\n", __LINE__,
- current->tgid, current->comm);
- return;
- }
-
__set_loader_base(pd, vaddr);
}
void preload_pd_set_handlers_base(struct process_data *pd, unsigned long vaddr)
{
- if (pd == NULL) {
- printk(PRELOAD_PREFIX "%d: No process data! Current %d %s\n", __LINE__,
- current->tgid, current->comm);
- return;
- }
-
__set_handlers_base(pd, vaddr);
}
char __user *preload_pd_get_path(struct process_data *pd)
{
- /* This function should be called only for current */
-
- struct task_struct *task = current;
- unsigned long page = __get_data_page(pd);
- int ret;
-
- if (pd == NULL || page == 0)
- return NULL;
-
- if (pd->is_mapped == 1)
- return __get_path(pd);
-
- ret = preload_patcher_write_string((void *)page, handlers_info->path,
- strnlen(handlers_info->path, PATH_MAX),
- task);
- if (ret <= 0) {
- printk(KERN_ERR PRELOAD_PREFIX "Cannot copy string to user!\n");
- goto get_path_failed;
- }
+ char __user *path = __get_path(pd);
- pd->is_mapped = 1;
-
- return __get_path(pd);
-
-get_path_failed:
-
- return NULL;
+ return path;
}
return __get_refcount(pd);
}
-int preload_pd_create_pd(void** target_place, struct task_struct *task)
+struct process_data *preload_pd_get(struct sspt_proc *proc)
+{
+ return (struct process_data *)proc->private_data;
+}
+
+static unsigned long make_preload_path(void)
{
- struct process_data *pd;
- unsigned long page = 0;
- unsigned long base;
- struct dentry *dentry;
+ unsigned long page = -EINVAL;
+
+ if (handlers_info) {
+ const char *path = handlers_info->path;
+ size_t len = strnlen(path, PATH_MAX);
+
+ down_write(¤t->mm->mmap_sem);
+ page = swap_do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ up_write(¤t->mm->mmap_sem);
+
+ if (IS_ERR_VALUE(page)) {
+ printk(KERN_ERR PRELOAD_PREFIX
+ "Cannot alloc page for %u\n", current->tgid);
+ goto out;
+ }
+
+ /* set preload_library path */
+ if (copy_to_user((void __user *)page, path, len) != 0)
+ printk(KERN_ERR PRELOAD_PREFIX
+ "Cannot copy string to user!\n");
+ }
+
+out:
+ return page;
+}
+
+static struct vm_area_struct *find_vma_by_dentry(struct mm_struct *mm,
+ struct dentry *dentry)
+{
+ struct vm_area_struct *vma;
+
+ for (vma = mm->mmap; vma; vma = vma->vm_next)
+ if (check_vma(vma, dentry))
+ return vma;
+
+ return NULL;
+}
+
+static void set_already_mapp(struct process_data *pd, struct mm_struct *mm)
+{
+ struct vm_area_struct *vma;
+ struct dentry *ld = preload_debugfs_get_loader_dentry();
+ struct dentry *handlers = handlers_info->dentry;
+
+ down_read(&mm->mmap_sem);
+ if (ld) {
+ vma = find_vma_by_dentry(mm, ld);
+ if (vma)
+ __set_loader_base(pd, vma->vm_start);
+ }
+
+ if (handlers) {
+ vma = find_vma_by_dentry(mm, handlers);
+ if (vma) {
+ __set_handlers_base(pd, vma->vm_start);
+ __set_state(pd, LOADED);
+ }
+ }
+ up_read(&mm->mmap_sem);
+}
+
+static struct process_data *do_create_pd(struct task_struct *task)
+{
+ struct process_data *pd;
+ unsigned long page;
int ret;
ret = __pd_create_on_demand();
goto create_pd_exit;
}
- ret = 0;
-
- /* 1. check if loader is already mapped */
- dentry = preload_debugfs_get_loader_dentry();
- base = find_dentry_base(task, dentry);
- if (base)
- __set_loader_base(pd, base);
-
- /* 2. check if handlers are already mapped */
- base = find_dentry_base(task, handlers_info->dentry);
- if (base) {
- __set_handlers_base(pd, base);
- __set_state(pd, LOADED);
- }
-
- /* 3. map page to store path */
-#ifdef CONFIG_ARM
- down_write(¤t->mm->mmap_sem);
-#endif
-
- page = swap_do_mmap(NULL, 0, PAGE_SIZE, PROT_READ,
- MAP_ANONYMOUS | MAP_PRIVATE, 0);
-#ifdef CONFIG_ARM
- up_write(¤t->mm->mmap_sem);
-#endif
- if (IS_ERR((void *)page)) {
- printk(KERN_ERR PRELOAD_PREFIX "Cannot alloc page for %u\n", task->tgid);
- ret = -ENOMEM;
- goto create_pd_exit;
+ page = make_preload_path();
+ if (IS_ERR_VALUE(page)) {
+ ret = (long)page;
+ goto free_pd;
}
- pd->is_mapped = 0;
-
__set_data_page(pd, page);
__set_attempts(pd, PRELOAD_MAX_ATTEMPTS);
+ set_already_mapp(pd, task->mm);
+
+ return pd;
- *target_place = pd;
+free_pd:
+ kfree(pd);
create_pd_exit:
- return ret;
+ printk(KERN_ERR PRELOAD_PREFIX "do_pd_create_pd: error=%d\n", ret);
+ return NULL;
+}
+
+static void *pd_create(struct sspt_proc *proc)
+{
+ struct process_data *pd;
+
+ pd = do_create_pd(proc->task);
+
+ return (void *)pd;
}
+static void pd_destroy(struct sspt_proc *proc, void *data)
+{
+ /* FIXME: to be implemented */
+}
+
+struct sspt_proc_cb pd_cb = {
+ .priv_create = pd_create,
+ .priv_destroy = pd_destroy
+};
+
int preload_pd_init(void)
{
- return 0;
+ int ret;
+
+ ret = sspt_proc_cb_set(&pd_cb);
+
+ return ret;
}
void preload_pd_uninit(void)
{
+ sspt_proc_cb_set(NULL);
+
if (handlers_info)
preload_storage_put_handlers_info(handlers_info);
handlers_info = NULL;
#define __PRELOAD_PD_H__
struct process_data;
-struct task_struct;
+struct sspt_proc;
/* process preload states */
enum preload_state_t {
ERROR
};
+struct process_data *preload_pd_get(struct sspt_proc *proc);
+
enum preload_state_t preload_pd_get_state(struct process_data *pd);
void preload_pd_set_state(struct process_data *pd, enum preload_state_t state);
unsigned long preload_pd_get_loader_base(struct process_data *pd);
char __user *preload_pd_get_path(struct process_data *pd);
void preload_pd_put_path(struct process_data *pd);
-int preload_pd_create_pd(void **target_place, struct task_struct *task);
-
int preload_pd_init(void);
void preload_pd_uninit(void);
preload_module_write_msg_init(ip);
}
+static int write_msg_reg(struct us_ip *ip)
+{
+ ip->uprobe.atomic_ctx = false;
+
+ return get_caller_register_probe(ip);
+}
+
static void write_msg_uninit(struct us_ip *ip)
{
preload_module_write_msg_exit(ip);
static struct probe_iface write_msg_iface = {
.init = write_msg_init,
.uninit = write_msg_uninit,
- .reg = get_caller_register_probe,
+ .reg = write_msg_reg,
.unreg = get_caller_unregister_probe,
.get_uprobe = get_caller_get_uprobe,
.copy = get_caller_info_copy,
static struct bin_info __handlers_info = { NULL, NULL };
static struct bin_info __linker_info = { NULL, NULL };
static struct bin_info __libc_info;
+static struct bin_info __libpthread_info;
+static struct bin_info __libsmack_info;
static inline struct bin_info *__get_handlers_info(void)
{
__libc_info.dentry = NULL;
}
+static inline void __drop_libpthread_info(void)
+{
+ if (__libpthread_info.dentry)
+ put_dentry(__libpthread_info.dentry);
+
+ __libpthread_info.path = NULL;
+ __libpthread_info.dentry = NULL;
+}
+
+static inline void __drop_libsmack_info(void)
+{
+ if (__libsmack_info.dentry)
+ put_dentry(__libsmack_info.dentry);
+
+ __libsmack_info.path = NULL;
+ __libsmack_info.dentry = NULL;
+}
+
void preload_storage_put_linker_info(struct bin_info *info)
{
}
return &__libc_info;
}
+struct bin_info *preload_storage_get_libpthread_info(void)
+{
+ return &__libpthread_info;
+}
+
+struct bin_info *preload_storage_get_libsmack_info(void)
+{
+ return &__libsmack_info;
+}
+
void preload_storage_put_libc_info(struct bin_info *info)
{
}
+void preload_storage_put_libpthread_info(struct bin_info *info)
+{
+}
+
+void preload_storage_put_libsmack_info(struct bin_info *info)
+{
+}
+
int preload_storage_init(void)
{
__libc_info.path = "/lib/libc.so.6";
if (!__libc_info.dentry)
return -ENOENT;
+ /* TODO check if we have not library */
+ __libpthread_info.path = "/lib/libpthread.so.0";
+ __libpthread_info.dentry = get_dentry(__libpthread_info.path);
+
+ if (!__libpthread_info.dentry)
+ return -ENOENT;
+
+ /* TODO check if we have not library */
+ __libsmack_info.path = "/usr/lib/libsmack.so.1.0.0";
+ __libsmack_info.dentry = get_dentry(__libsmack_info.path);
+
+ if (!__libsmack_info.dentry)
+ return -ENOENT;
+
return 0;
}
void preload_storage_exit(void)
{
+ __drop_libsmack_info();
+ __drop_libpthread_info();
__drop_libc_info();
__drop_handlers_info();
__drop_linker_info();
struct bin_info *preload_storage_get_libc_info(void);
void preload_storage_put_libc_info(struct bin_info *info);
+struct bin_info *preload_storage_get_libpthread_info(void);
+void preload_storage_put_libpthread_info(struct bin_info *info);
+
+struct bin_info *preload_storage_get_libsmack_info(void);
+void preload_storage_put_libsmack_info(struct bin_info *info);
+
int preload_storage_init(void);
void preload_storage_exit(void);
#include "preload.h"
#include "preload_threads.h"
#include "preload_debugfs.h"
-#include "preload_patcher.h"
#include "preload_pd.h"
struct preload_td {
unsigned long get_preload_flags(struct task_struct *task)
{
- return get_preload_td(task)->flags;
+ struct preload_td *td = get_preload_td(task);
+
+ if (td == NULL)
+ return 0;
+
+ return td->flags;
}
void set_preload_flags(struct task_struct *task,
unsigned long flags)
{
- get_preload_td(task)->flags = flags;
+ struct preload_td *td = get_preload_td(task);
+
+ if (td == NULL) {
+ printk(KERN_ERR "%s: invalid arguments\n", __FUNCTION__);
+ return;
+ }
+
+ td->flags = flags;
}
static inline int __add_to_disable_list(struct thread_slot *slot,
unsigned long disable_addr)
{
- struct disabled_addr *da = kmalloc(sizeof(*da), GFP_KERNEL);
+ struct disabled_addr *da = kmalloc(sizeof(*da), GFP_ATOMIC);
if (da == NULL)
return -ENOMEM;
/* Adds a new slot */
static inline struct thread_slot *__grow_slot(void)
{
- struct thread_slot *tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
+ struct thread_slot *tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
if (tmp == NULL)
return NULL;
{
struct preload_td *td = get_preload_td(task);
+ if (td == NULL)
+ return NULL;
+
return list_empty(&td->slots) ? NULL :
list_last_entry(&td->slots, struct thread_slot, list);
}
return ret;
}
-int preload_threads_get_drop(struct task_struct *task, bool *drop)
+int preload_threads_get_drop(struct task_struct *task)
{
struct thread_slot *slot;
int ret = 0;
slot = __get_task_slot(task);
if (slot != NULL) {
- *drop = slot->drop;
+ ret = (int) slot->drop;
goto get_drop_done;
}
int preload_threads_get_caller(struct task_struct *task, unsigned long *caller);
int preload_threads_get_call_type(struct task_struct *task,
unsigned char *call_type);
-int preload_threads_get_drop(struct task_struct *task, bool *drop);
+int preload_threads_get_drop(struct task_struct *task);
bool preload_threads_check_disabled_probe(struct task_struct *task,
unsigned long addr);
void preload_threads_enable_probe(struct task_struct *task, unsigned long addr);
memcpy(dest, source, sizeof(*source));
len = strlen(source->rp_i.args) + 1;
- dest->rp_i.args = kmalloc(len, GFP_KERNEL);
+ dest->rp_i.args = kmalloc(len, GFP_ATOMIC);
if (dest->rp_i.args == NULL)
return -ENOMEM;
memcpy(dest->rp_i.args, source->rp_i.args, len);
*/
void sampler_timers_set_quantum(unsigned int timer_quantum)
{
- sampler_timer_quantum = timer_quantum * 1000 * 1000;
+ u64 tmp = (u64)timer_quantum;
+ sampler_timer_quantum = tmp * 1000 * 1000;
}
td->data = NULL;
return;
}
-
- WARN(!ok, TD_PREFIX "td(%p) check failed: %08lx", td, get_magic(td));
}
void *swap_task_data_get(struct task_struct *task, int *ok)
}
EXPORT_SYMBOL_GPL(swap_task_data_set);
+
+static atomic_t start_flag = ATOMIC_INIT(0);
+
static int copy_process_ret_handler(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
struct task_struct *task;
+ if (atomic_read(&start_flag) == 0)
+ return 0;
+
task = (struct task_struct *)regs_return_value(regs);
if (!IS_ERR(task))
swap_task_data_clean(task);
static int do_exit_handler(struct kprobe *p, struct pt_regs *regs)
{
- struct task_data *td = __td(current);
+ struct task_data *td;
+
+ if (atomic_read(&start_flag) == 0)
+ return 0;
+ td = __td(current);
__td_free(td);
return 0;
static int __task_data_init(void *data)
{
struct task_struct *g, *t;
- unsigned long addr;
- int ret;
-
- addr = swap_ksyms_substr("copy_process");
- if (addr == 0) {
- printk(TD_PREFIX "Cannot find address for copy_process\n");
- return -EINVAL;
- }
- copy_process_rp.kp.addr = (kprobe_opcode_t *)addr;
- ret = swap_register_kretprobe(©_process_rp);
- if (ret)
- goto reg_failed;
- addr = swap_ksyms_substr("do_exit");
- if (addr == 0) {
- printk(TD_PREFIX "Cannot find address for do_exit\n");
- return -EINVAL;
- }
- do_exit_probe.addr = (kprobe_opcode_t *)addr;
- ret = swap_register_kprobe(&do_exit_probe);
- if (ret)
- goto unreg_copy_process;
+ /* set start_flags */
+ atomic_set(&start_flag, 1);
do_each_thread(g, t) {
swap_task_data_clean(t);
} while_each_thread(g, t);
return 0;
-
-unreg_copy_process:
- swap_unregister_kretprobe(©_process_rp);
-
-reg_failed:
- printk(TD_PREFIX "0x%lx: probe registration failed\n", addr);
-
- return ret;
}
static int __task_data_exit(void *data)
struct task_struct *g, *t;
struct task_data *td;
- swap_unregister_kprobe(&do_exit_probe);
- swap_unregister_kretprobe(©_process_rp);
-
do_each_thread(g, t) {
td = __td(t);
__td_free(td);
} while_each_thread(g, t);
+ /* reset start_flags */
+ atomic_set(&start_flag, 0);
+
return 0;
}
{
int ret;
+ ret = swap_register_kprobe(&do_exit_probe);
+ if (ret) {
+ pr_err(TD_PREFIX "register on 'do_exit' failed: ret=%d\n", ret);
+ return;
+ }
+
+ ret = swap_register_kretprobe(©_process_rp);
+ if (ret) {
+ swap_unregister_kprobe(&do_exit_probe);
+ pr_err(TD_PREFIX "register on 'copy_process' failed: ret=%d\n", ret);
+ return;
+ }
+
/* stop_machine: cannot get tasklist_lock from module */
ret = stop_machine(__task_data_init, NULL, NULL);
if (ret)
int ret;
/* stop_machine: the same here */
- ret = stop_machine(__task_data_exit, ©_process_rp, NULL);
+ ret = stop_machine(__task_data_exit, NULL, NULL);
if (ret) {
printk(TD_PREFIX "task data cleanup failed: %d\n", ret);
/* something went wrong: at least make sure we unregister
* all the installed probes */
swap_unregister_kprobe(&do_exit_probe);
- swap_unregister_kretprobe(©_process_rp);
}
+
+ swap_unregister_kretprobe(©_process_rp);
+ swap_unregister_kprobe(&do_exit_probe);
+}
+
+static int task_data_once(void)
+{
+ const char *sym;
+
+ sym = "copy_process";
+ copy_process_rp.kp.addr = (kprobe_opcode_t *)swap_ksyms_substr(sym);
+ if (copy_process_rp.kp.addr == NULL)
+ goto not_found;
+
+ sym = "do_exit";
+ do_exit_probe.addr = (kprobe_opcode_t *)swap_ksyms_substr(sym);
+ if (do_exit_probe.addr == NULL)
+ goto not_found;
+
+ return 0;
+
+not_found:
+ pr_err(TD_PREFIX "ERROR: symbol %s(...) not found\n", sym);
+ return -ESRCH;
}
static int task_data_init(void)
int ret = 0;
__task_data_cbs_start_h = us_manager_reg_cb(START_CB, task_data_start);
-
if (__task_data_cbs_start_h < 0) {
ret = __task_data_cbs_start_h;
printk(KERN_ERR TD_PREFIX "start_cb registration failed\n");
goto out;
}
- __task_data_cbs_stop_h = us_manager_reg_cb(STOP_CB, task_data_stop);
-
+ __task_data_cbs_stop_h = us_manager_reg_cb(STOP_CB_TD, task_data_stop);
if (__task_data_cbs_stop_h < 0) {
ret = __task_data_cbs_stop_h;
us_manager_unreg_cb(__task_data_cbs_start_h);
us_manager_unreg_cb(__task_data_cbs_stop_h);
}
-SWAP_LIGHT_INIT_MODULE(NULL, task_data_init, task_data_exit, NULL, NULL);
-
+SWAP_LIGHT_INIT_MODULE(task_data_once, task_data_init, task_data_exit,
+ NULL, NULL);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SWAP Task Data Module");
#include "trampoline_thumb.h"
+#define UBP_ARM (BREAKPOINT_INSTRUCTION)
+#define UBP_THUMB (BREAKPOINT_INSTRUCTION & 0xffff)
+
/**
* @def flush_insns
* @brief Flushes instructions.
(insn & 0xf800) == 0xf800);
}
-static int arch_copy_trampoline_arm_uprobe(struct uprobe *p)
-{
- int ret;
- unsigned long insn = p->opcode;
- unsigned long vaddr = (unsigned long)p->addr;
- unsigned long *tramp = p->atramp.tramp_arm;
-
- ret = arch_make_trampoline_arm(vaddr, insn, tramp);
- p->safe_arm = !!ret;
-
- return ret;
-}
-
static int arch_check_insn_thumb(unsigned long insn)
{
int ret = 0;
return 0;
}
-static int arch_copy_trampoline_thumb_uprobe(struct uprobe *p)
+static int arch_make_trampoline_thumb(unsigned long vaddr, unsigned long insn,
+ unsigned long *tramp, size_t tramp_len)
{
- int uregs, pc_dep;
+ int ret;
+ int uregs = 0;
+ int pc_dep = 0;
unsigned int addr;
- unsigned long vaddr = (unsigned long)p->addr;
- unsigned long insn = p->opcode;
- unsigned long *tramp = p->atramp.tramp_thumb;
- enum { tramp_len = sizeof(p->atramp.tramp_thumb) };
-
- p->safe_thumb = 1;
- if (vaddr & 0x01) {
- printk(KERN_INFO "Error in %s at %d: attempt to register "
- "uprobe at an unaligned address\n", __FILE__, __LINE__);
- return -EINVAL;
- }
-
- if (!arch_check_insn_thumb(insn))
- p->safe_thumb = 0;
- uregs = 0;
- pc_dep = 0;
+ ret = arch_check_insn_thumb(insn);
+ if (ret) {
+ pr_err("THUMB inst isn't support vaddr=%lx insn=%08lx\n",
+ vaddr, insn);
+ return ret;
+ }
if (THUMB_INSN_MATCH(APC, insn) || THUMB_INSN_MATCH(LRO3, insn)) {
uregs = 0x0700; /* 8-10 */
if (unlikely(uregs && pc_dep)) {
memcpy(tramp, pc_dep_insn_execbuf_thumb, tramp_len);
- if (prep_pc_dep_insn_execbuf_thumb(tramp, insn, uregs) != 0) {
- printk(KERN_INFO "Error in %s at %d: failed to "
- "prepare exec buffer for insn %lx!",
- __FILE__, __LINE__, insn);
- p->safe_thumb = 1;
- }
+ prep_pc_dep_insn_execbuf_thumb(tramp, insn, uregs);
addr = vaddr + 4;
*((unsigned short *)tramp + 13) = 0xdeff;
*/
int arch_prepare_uprobe(struct uprobe *p)
{
+ int ret;
struct task_struct *task = p->task;
- unsigned long vaddr = (unsigned long)p->addr;
+ unsigned long vaddr = (unsigned long)p->addr & ~((unsigned long)1);
unsigned long insn;
-
- if (vaddr & 0x01) {
- printk(KERN_INFO "Error in %s at %d: attempt "
- "to register uprobe at an unaligned address\n",
- __FILE__, __LINE__);
- return -EINVAL;
- }
+ int thumb_mode = (unsigned long)p->addr & 1;
+ unsigned long tramp[UPROBES_TRAMP_LEN];
+ unsigned long __user *utramp;
+ enum { tramp_len = sizeof(tramp) };
if (!read_proc_vm_atomic(task, vaddr, &insn, sizeof(insn))) {
printk(KERN_ERR "failed to read memory %lx!\n", vaddr);
return -EINVAL;
}
- p->opcode = insn;
-
- arch_copy_trampoline_arm_uprobe(p);
- arch_copy_trampoline_thumb_uprobe(p);
-
- if ((p->safe_arm) && (p->safe_thumb)) {
- printk(KERN_INFO "Error in %s at %d: failed "
- "arch_copy_trampoline_*_uprobe() (both) "
- "[tgid=%u, addr=%lx, data=%lx]\n",
- __FILE__, __LINE__, task->tgid, vaddr, insn);
- return -EFAULT;
+ ret = thumb_mode ?
+ arch_make_trampoline_thumb(vaddr, insn,
+ tramp, tramp_len) :
+ arch_make_trampoline_arm(vaddr, insn, tramp);
+ if (ret) {
+ pr_err("failed to make tramp, addr=%p\n", p->addr);
+ return ret;
}
- p->atramp.utramp = swap_slot_alloc(p->sm);
- if (p->atramp.utramp == NULL) {
+ utramp = swap_slot_alloc(p->sm);
+ if (utramp == NULL) {
printk(KERN_INFO "Error: swap_slot_alloc failed (%08lx)\n",
vaddr);
return -ENOMEM;
}
+ if (!write_proc_vm_atomic(p->task, (unsigned long)utramp, tramp,
+ tramp_len)) {
+ pr_err("failed to write memory tramp=%p!\n", utramp);
+ swap_slot_free(p->sm, utramp);
+ return -EINVAL;
+ }
+
+ flush_insns(utramp, tramp_len);
+ p->ainsn.insn = utramp;
+ p->opcode = insn;
+
+ /* for uretprobe */
+ add_uprobe_table(p);
+
return 0;
}
return 0;
}
+unsigned long arch_tramp_by_ri(struct uretprobe_instance *ri)
+{
+ /* Understand function mode */
+ return ((unsigned long)ri->sp & 1) ?
+ ((unsigned long)ri->rp->up.ainsn.insn + 0x1b) :
+ (unsigned long)(ri->rp->up.ainsn.insn +
+ UPROBES_TRAMP_RET_BREAK_IDX);
+}
+
/**
* @brief Disarms uretprobe instance.
*
* negative error code on error.
*/
int arch_disarm_urp_inst(struct uretprobe_instance *ri,
- struct task_struct *task)
+ struct task_struct *task, unsigned long tr)
{
struct pt_regs *uregs = task_pt_regs(ri->task);
unsigned long ra = swap_get_ret_addr(uregs);
unsigned long *stack = sp - RETPROBE_STACK_DEPTH + 1;
unsigned long *found = NULL;
unsigned long *buf[RETPROBE_STACK_DEPTH];
+ unsigned long vaddr;
int i, retval;
- /* Understand function mode */
- if ((long)ri->sp & 1) {
- tramp = (unsigned long *)
- ((unsigned long)ri->rp->up.ainsn.insn + 0x1b);
+ if (tr == 0) {
+ vaddr = (unsigned long)ri->rp->up.addr;
+ tramp = (unsigned long *)arch_tramp_by_ri(ri);
} else {
- tramp = (unsigned long *)(ri->rp->up.ainsn.insn +
- UPROBES_TRAMP_RET_BREAK_IDX);
+ /* ri - invalid */
+ vaddr = 0;
+ tramp = (unsigned long *)tr;
}
/* check stack */
}
printk(KERN_INFO "---> %s (%d/%d): trampoline found at "
- "%08lx (%08lx /%+d) - %p\n",
+ "%08lx (%08lx /%+d) - %lx, set ret_addr=%p\n",
task->comm, task->tgid, task->pid,
(unsigned long)found, (unsigned long)sp,
- found - sp, ri->rp->up.addr);
+ found - sp, vaddr, ri->ret_addr);
retval = write_proc_vm_atomic(task, (unsigned long)found,
&ri->ret_addr,
sizeof(ri->ret_addr));
check_lr: /* check lr anyway */
if (ra == (unsigned long)tramp) {
printk(KERN_INFO "---> %s (%d/%d): trampoline found at "
- "lr = %08lx - %p\n",
- task->comm, task->tgid, task->pid,
- ra, ri->rp->up.addr);
+ "lr = %08lx - %lx, set ret_addr=%p\n",
+ task->comm, task->tgid, task->pid, ra, vaddr, ri->ret_addr);
+
swap_set_ret_addr(uregs, (unsigned long)ri->ret_addr);
retval = 0;
} else if (retval) {
printk(KERN_INFO "---> %s (%d/%d): trampoline NOT found at "
- "sp = %08lx, lr = %08lx - %p\n",
+ "sp = %08lx, lr = %08lx - %lx, ret_addr=%p\n",
task->comm, task->tgid, task->pid,
- (unsigned long)sp, ra, ri->rp->up.addr);
+ (unsigned long)sp, ra, vaddr, ri->ret_addr);
}
return retval;
*/
void arch_remove_uprobe(struct uprobe *up)
{
- swap_slot_free(up->sm, up->atramp.utramp);
+ swap_slot_free(up->sm, up->ainsn.insn);
}
-static void restore_opcode_for_thumb(struct uprobe *p, struct pt_regs *regs)
+int arch_arm_uprobe(struct uprobe *p)
{
- if (thumb_mode(regs) && !is_thumb2(p->opcode)) {
- u16 tmp = p->opcode >> 16;
- write_proc_vm_atomic(current,
- (unsigned long)((u16 *)p->addr + 1), &tmp, 2);
- flush_insns(p->addr, 4);
+ int ret;
+ unsigned long vaddr = (unsigned long)p->addr & ~((unsigned long)1);
+ int thumb_mode = (unsigned long)p->addr & 1;
+ int len = 4 >> thumb_mode; /* if thumb_mode then len = 2 */
+ unsigned long insn = thumb_mode ? UBP_THUMB : UBP_ARM;
+
+ ret = write_proc_vm_atomic(p->task, vaddr, &insn, len);
+ if (!ret) {
+ pr_err("arch_arm_uprobe: failed to write memory tgid=%u addr=%08lx len=%d\n",
+ p->task->tgid, vaddr, len);
+
+ return -EACCES;
+ } else {
+ flush_insns(vaddr, len);
}
+
+ return 0;
}
-static int make_trampoline(struct uprobe *p, struct pt_regs *regs)
+void arch_disarm_uprobe(struct uprobe *p, struct task_struct *task)
{
- unsigned long *tramp, *utramp;
- int sw;
+ int ret;
- /*
- * 0 bit - thumb mode (0 - arm, 1 - thumb)
- * 1 bit - arm mode support (0 - off, 1 on)
- * 2 bit - thumb mode support (0 - off, 1 on)`
- */
- sw = (!!thumb_mode(regs)) |
- (int)!p->safe_arm << 1 |
- (int)!p->safe_thumb << 2;
-
- switch (sw) {
- /* ARM */
- case 0b110:
- case 0b010:
- tramp = p->atramp.tramp_arm;
- break;
- /* THUMB */
- case 0b111:
- case 0b101:
- restore_opcode_for_thumb(p, regs);
- tramp = p->atramp.tramp_thumb;
- break;
- default:
- printk(KERN_INFO "Error in %s at %d: we are in arm mode "
- "(!) and check instruction was fail "
- "(%0lX instruction at %p address)!\n",
- __FILE__, __LINE__, p->opcode, p->addr);
-
- disarm_uprobe(p, p->task);
+ unsigned long vaddr = (unsigned long)p->addr & ~((unsigned long)1);
+ int thumb_mode = (unsigned long)p->addr & 1;
+ int len = 4 >> thumb_mode; /* if thumb_mode then len = 2 */
- return 1;
+ ret = write_proc_vm_atomic(task, vaddr, &p->opcode, len);
+ if (!ret) {
+ pr_err("arch_disarm_uprobe: failed to write memory tgid=%u addr=%08lx len=%d\n",
+ task->tgid, vaddr, len);
+ } else {
+ flush_insns(vaddr, len);
}
+}
- utramp = p->atramp.utramp;
+static int urp_handler(struct pt_regs *regs, pid_t tgid)
+{
+ struct uprobe *p;
+ unsigned long vaddr = regs->ARM_pc;
+ unsigned long offset_bp = thumb_mode(regs) ?
+ 0x1a :
+ 4 * UPROBES_TRAMP_RET_BREAK_IDX;
+ unsigned long tramp_addr = vaddr - offset_bp;
- if (!write_proc_vm_atomic(p->task, (unsigned long)utramp, tramp,
- UPROBES_TRAMP_LEN * sizeof(*tramp))) {
- printk(KERN_ERR "failed to write memory %p!\n", utramp);
- return -EINVAL;
+ p = get_uprobe_by_insn_slot((void *)tramp_addr, tgid, regs);
+ if (p == NULL) {
+ printk(KERN_INFO
+ "no_uprobe: Not one of ours: let kernel handle it %lx\n",
+ vaddr);
+ return 1;
}
- flush_insns(utramp, UPROBES_TRAMP_LEN * sizeof(*tramp));
- p->ainsn.insn = utramp;
+ trampoline_uprobe_handler(p, regs);
return 0;
}
-
/**
* @brief Prepares singlestep for current CPU.
*
- * @param p Pointer to uprobe.
+ * @param p Pointer to kprobe.
* @param regs Pointer to CPU registers data.
* @return Void.
*/
-
-static void uprobe_prepare_singlestep(struct uprobe *p, struct pt_regs *regs)
+static void arch_prepare_singlestep(struct uprobe *p, struct pt_regs *regs)
{
int cpu = smp_processor_id();
}
}
-static int uprobe_handler(struct pt_regs *regs)
-{
- uprobe_opcode_t *addr = (uprobe_opcode_t *)(regs->ARM_pc);
- struct task_struct *task = current;
- pid_t tgid = task->tgid;
- struct uprobe *p;
-
- p = get_uprobe(addr, tgid);
- if (p == NULL) {
- unsigned long offset_bp = thumb_mode(regs) ?
- 0x1a :
- 4 * UPROBES_TRAMP_RET_BREAK_IDX;
- void *tramp_addr = (void *)addr - offset_bp;
-
- p = get_uprobe_by_insn_slot(tramp_addr, tgid, regs);
- if (p == NULL) {
- printk(KERN_INFO "no_uprobe: Not one of ours: let "
- "kernel handle it %p\n", addr);
- return 1;
- }
-
- trampoline_uprobe_handler(p, regs);
- } else {
- if (p->ainsn.insn == NULL) {
-
- if (make_trampoline(p, regs)) {
- printk(KERN_INFO "no_uprobe live\n");
- return 0;
- }
-
- /* for uretprobe */
- add_uprobe_table(p);
- }
-
- if (!p->pre_handler || !p->pre_handler(p, regs))
- uprobe_prepare_singlestep(p, regs);
- }
-
- return 0;
-}
-
/**
* @brief Breakpoint instruction handler.
*
*/
int uprobe_trap_handler(struct pt_regs *regs, unsigned int instr)
{
- int ret;
+ int ret = 0;
+ struct uprobe *p;
unsigned long flags;
- local_irq_save(flags);
+ unsigned long vaddr = regs->ARM_pc | !!thumb_mode(regs);
+ pid_t tgid = current->tgid;
+ local_irq_save(flags);
preempt_disable();
- ret = uprobe_handler(regs);
- swap_preempt_enable_no_resched();
+ p = get_uprobe((uprobe_opcode_t *)vaddr, tgid);
+ if (p) {
+ bool prepare = false;
+
+ if (p->atomic_ctx) {
+ if (!p->pre_handler || !p->pre_handler(p, regs))
+ prepare = true;
+ } else {
+ swap_preempt_enable_no_resched();
+ local_irq_restore(flags);
+
+ if (!p->pre_handler || !p->pre_handler(p, regs))
+ prepare = true;
+
+ local_irq_save(flags);
+ preempt_disable();
+ }
+
+ if (prepare)
+ arch_prepare_singlestep(p, regs);
+ } else {
+ ret = urp_handler(regs, tgid);
+
+ /* check ARM/THUMB mode on correct */
+ if (ret) {
+ vaddr ^= 1;
+ p = get_uprobe((uprobe_opcode_t *)vaddr, tgid);
+ if (p) {
+ pr_err("invalid mode: thumb=%d addr=%p insn=%08lx\n",
+ !!thumb_mode(regs), p->addr, p->opcode);
+ ret = 0;
+
+ swap_preempt_enable_no_resched();
+ local_irq_restore(flags);
+
+ disarm_uprobe(p, current);
+
+ local_irq_save(flags);
+ preempt_disable();
+ }
+ }
+ }
+
+ swap_preempt_enable_no_resched();
local_irq_restore(flags);
+
return ret;
}
/* userspace probes hook (arm) */
static struct undef_hook undef_hook_for_us_arm = {
.instr_mask = 0xffffffff,
- .instr_val = BREAKPOINT_INSTRUCTION,
+ .instr_val = UBP_ARM,
.cpsr_mask = MODE_MASK,
.cpsr_val = USR_MODE,
.fn = uprobe_trap_handler
/* userspace probes hook (thumb) */
static struct undef_hook undef_hook_for_us_thumb = {
.instr_mask = 0xffffffff,
- .instr_val = BREAKPOINT_INSTRUCTION & 0x0000ffff,
+ .instr_val = UBP_THUMB,
.cpsr_mask = MODE_MASK,
.cpsr_val = USR_MODE,
.fn = uprobe_trap_handler
struct arch_tramp {
unsigned long tramp_arm[UPROBES_TRAMP_LEN]; /**< ARM trampoline */
unsigned long tramp_thumb[UPROBES_TRAMP_LEN]; /**< Thumb trampoline */
- void *utramp; /**< Pointer to trampoline */
+ void *utramp; /**< Pointer to trampoline */
};
void arch_opcode_analysis_uretprobe(struct uretprobe *rp);
int arch_prepare_uretprobe(struct uretprobe_instance *ri, struct pt_regs *regs);
int arch_disarm_urp_inst(struct uretprobe_instance *ri,
- struct task_struct *task);
+ struct task_struct *task, unsigned long tr);
+unsigned long arch_tramp_by_ri(struct uretprobe_instance *ri);
unsigned long arch_get_trampoline_addr(struct uprobe *p, struct pt_regs *regs);
void arch_set_orig_ret_addr(unsigned long orig_ret_addr, struct pt_regs *regs);
void arch_remove_uprobe(struct uprobe *up);
+int arch_arm_uprobe(struct uprobe *p);
+void arch_disarm_uprobe(struct uprobe *p, struct task_struct *task);
static inline unsigned long swap_get_uarg(struct pt_regs *regs, unsigned long n)
{
switch (n) {
case 0:
regs->ARM_r0 = val;
+ break;
case 1:
regs->ARM_r1 = val;
+ break;
case 2:
regs->ARM_r2 = val;
+ break;
case 3:
regs->ARM_r3 = val;
+ break;
+ default:
+ ptr = (u32 *)regs->ARM_sp + n - 4;
+ if (put_user(val, ptr))
+ pr_err("failed to dereference a pointer[%p]\n", ptr);
}
-
- ptr = (u32 *)regs->ARM_sp + n - 4;
- if (put_user(val, ptr))
- printk(KERN_INFO "failed to dereference a pointer, ptr=%p\n",
- ptr);
}
int swap_arch_init_uprobes(void);
UPROBES_TRAMP_RET_BREAK_IDX);
}
+unsigned long arch_tramp_by_ri(struct uretprobe_instance *ri)
+{
+ return trampoline_addr(&ri->rp->up);
+}
+
static struct uprobe_ctlblk *current_ucb(void)
{
/* FIXME hardcoded offset */
{
/* Replace the return addr with trampoline addr */
unsigned long ra = trampoline_addr(&ri->rp->up);
- ri->sp = (uprobe_opcode_t *)regs->sp;
+ unsigned long ret_addr;
+ ri->sp = (kprobe_opcode_t *)regs->sp;
- if (!read_proc_vm_atomic(current, regs->EREG(sp), &(ri->ret_addr),
- sizeof(ri->ret_addr))) {
- printk(KERN_ERR "failed to read user space func ra %lx addr=%p!\n",
- regs->EREG(sp), ri->rp->up.addr);
+ if (get_user(ret_addr, (unsigned long *)regs->sp)) {
+ pr_err("failed to read user space func ra %lx addr=%p!\n",
+ regs->sp, ri->rp->up.addr);
return -EINVAL;
}
- if (!write_proc_vm_atomic(current, regs->EREG(sp), &ra, sizeof(ra))) {
- printk(KERN_ERR "failed to write user space func ra %lx!\n",
- regs->EREG(sp));
+ if (put_user(ra, (unsigned long *)regs->sp)) {
+ pr_err("failed to write user space func ra %lx!\n", regs->sp);
return -EINVAL;
}
+ ri->ret_addr = (uprobe_opcode_t *)ret_addr;
+
return 0;
}
+static bool get_long(struct task_struct *task,
+ unsigned long vaddr, unsigned long *val)
+{
+ return sizeof(*val) != read_proc_vm_atomic(task, vaddr,
+ val, sizeof(*val));
+}
+
+static bool put_long(struct task_struct *task,
+ unsigned long vaddr, unsigned long *val)
+{
+ return sizeof(*val) != write_proc_vm_atomic(task, vaddr,
+ val, sizeof(*val));
+}
+
/**
* @brief Disarms uretprobe on x86 arch.
*
* negative error code on error.
*/
int arch_disarm_urp_inst(struct uretprobe_instance *ri,
- struct task_struct *task)
+ struct task_struct *task, unsigned long tr)
{
- int len;
unsigned long ret_addr;
unsigned long sp = (unsigned long)ri->sp;
- unsigned long tramp_addr = trampoline_addr(&ri->rp->up);
- len = read_proc_vm_atomic(task, sp, &ret_addr, sizeof(ret_addr));
- if (len != sizeof(ret_addr)) {
+ unsigned long tramp_addr;
+
+ if (tr == 0)
+ tramp_addr = arch_tramp_by_ri(ri);
+ else
+ tramp_addr = tr; /* ri - invalid */
+
+ if (get_long(task, sp, &ret_addr)) {
printk(KERN_INFO "---> %s (%d/%d): failed to read stack from %08lx\n",
task->comm, task->tgid, task->pid, sp);
return -EFAULT;
}
if (tramp_addr == ret_addr) {
- len = write_proc_vm_atomic(task, sp, &ri->ret_addr,
- sizeof(ri->ret_addr));
- if (len != sizeof(ri->ret_addr)) {
+ if (put_long(task, sp, (unsigned long *)&ri->ret_addr)) {
printk(KERN_INFO "---> %s (%d/%d): failed to write "
"orig_ret_addr to %08lx",
task->comm, task->tgid, task->pid, sp);
swap_slot_free(p->sm, p->ainsn.insn);
}
+int arch_arm_uprobe(struct uprobe *p)
+{
+ int ret;
+ uprobe_opcode_t insn = BREAKPOINT_INSTRUCTION;
+ unsigned long vaddr = (unsigned long)p->addr;
+
+ ret = write_proc_vm_atomic(p->task, vaddr, &insn, sizeof(insn));
+ if (!ret) {
+ pr_err("arch_arm_uprobe: failed to write memory tgid=%u vaddr=%08lx\n",
+ p->task->tgid, vaddr);
+
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+void arch_disarm_uprobe(struct uprobe *p, struct task_struct *task)
+{
+ int ret;
+ unsigned long vaddr = (unsigned long)p->addr;
+
+ ret = write_proc_vm_atomic(task, vaddr, &p->opcode, sizeof(p->opcode));
+ if (!ret) {
+ pr_err("arch_disarm_uprobe: failed to write memory tgid=%u, vaddr=%08lx\n",
+ task->tgid, vaddr);
+ }
+}
+
static void set_user_jmp_op(void *from, void *to)
{
struct __arch_jmp_op {
jop.raddr = (long)(to) - ((long)(from) + 5);
jop.op = RELATIVEJUMP_INSTRUCTION;
- if (!write_proc_vm_atomic(current, (unsigned long)from, &jop,
- sizeof(jop)))
- printk(KERN_WARNING
- "failed to write jump opcode to user space %p\n", from);
+ if (put_user(jop.op, (char *)from) ||
+ put_user(jop.raddr, (long *)(from + 1)))
+ pr_err("failed to write jump opcode to user space %p\n", from);
}
static void resume_execution(struct uprobe *p,
regs->EREG(flags) &= ~TF_MASK;
tos = (unsigned long *)&tos_dword;
- if (!read_proc_vm_atomic(current, regs->EREG(sp), &tos_dword,
- sizeof(tos_dword))) {
- printk(KERN_WARNING
- "failed to read dword from top of the user space stack %lx!\n",
- regs->sp);
+ if (get_user(tos_dword, (unsigned long *)regs->sp)) {
+ pr_err("failed to read from user space sp=%lx!\n", regs->sp);
return;
}
- if (!read_proc_vm_atomic(current, (unsigned long)p->ainsn.insn, insns,
- 2 * sizeof(uprobe_opcode_t))) {
- printk(KERN_WARNING
- "failed to read first 2 opcodes of instruction copy from user space %p!\n",
- p->ainsn.insn);
+ if (get_user(*(unsigned short *)insns, (unsigned short *)p->ainsn.insn)) {
+ pr_err("failed to read first 2 opcodes %p!\n", p->ainsn.insn);
return;
}
case 0x9a: /* call absolute -- same as call absolute, indirect */
*tos = orig_eip + (*tos - copy_eip);
- if (!write_proc_vm_atomic(current,
- regs->EREG(sp),
- &tos_dword,
- sizeof(tos_dword))) {
- printk(KERN_WARNING
- "failed to write dword to top of the user space stack %lx!\n",
- regs->sp);
+ if (put_user(tos_dword, (unsigned long *)regs->sp)) {
+ pr_err("failed to write dword to sp=%lx\n", regs->sp);
return;
}
*/
*tos = orig_eip + (*tos - copy_eip);
- if (!write_proc_vm_atomic(current, regs->EREG(sp),
- &tos_dword,
- sizeof(tos_dword))) {
- printk(KERN_WARNING
- "failed to write dword to top of the user space stack %lx!\n",
- regs->EREG(sp));
+ if (put_user(tos_dword, (unsigned long *)regs->sp)) {
+ pr_err("failed to write dword to sp=%lx\n", regs->sp);
return;
}
break;
}
- if (!write_proc_vm_atomic(current, regs->EREG(sp), &tos_dword,
- sizeof(tos_dword))) {
- printk(KERN_WARNING
- "failed to write dword to top of the user space stack %lx!\n",
- regs->EREG(sp));
+ if (put_user(tos_dword, (unsigned long *)regs->sp)) {
+ pr_err("failed to write dword to sp=%lx\n", regs->sp);
return;
}
int arch_prepare_uretprobe(struct uretprobe_instance *ri, struct pt_regs *regs);
int arch_disarm_urp_inst(struct uretprobe_instance *ri,
- struct task_struct *task);
+ struct task_struct *task, unsigned long tr);
+unsigned long arch_tramp_by_ri(struct uretprobe_instance *ri);
unsigned long arch_get_trampoline_addr(struct uprobe *p, struct pt_regs *regs);
void arch_set_orig_ret_addr(unsigned long orig_ret_addr, struct pt_regs *regs);
void arch_remove_uprobe(struct uprobe *up);
+int arch_arm_uprobe(struct uprobe *p);
+void arch_disarm_uprobe(struct uprobe *p, struct task_struct *task);
static inline unsigned long swap_get_uarg(struct pt_regs *regs, unsigned long n)
{
}
#endif
+
+struct uinst_info *uinst_info_create(unsigned long vaddr,
+ kprobe_opcode_t opcode)
+{
+ struct uinst_info *uinst;
+
+ uinst = kmalloc(sizeof(*uinst), GFP_ATOMIC);
+ if (uinst) {
+ INIT_HLIST_NODE(&uinst->hlist);
+ uinst->vaddr = vaddr;
+ uinst->opcode = opcode;
+ } else {
+ pr_err("Cannot allocate memory for uinst\n");
+ }
+
+ return uinst;
+}
+EXPORT_SYMBOL_GPL(uinst_info_create);
+
+void uinst_info_destroy(struct uinst_info *uinst)
+{
+ kfree(uinst);
+}
+EXPORT_SYMBOL_GPL(uinst_info_destroy);
+
+void uinst_info_disarm(struct uinst_info *uinst, struct task_struct *task)
+{
+ int ret = write_proc_vm_atomic(task, uinst->vaddr,
+ &uinst->opcode, sizeof(uinst->opcode));
+ if (!ret) {
+ printk("uinst_info_disarm: failed to write memory "
+ "tgid=%u, vaddr=%08lx!\n", task->tgid, uinst->vaddr);
+ }
+}
+EXPORT_SYMBOL_GPL(uinst_info_disarm);
+
/*
* Keep all fields in the uprobe consistent
*/
{
memcpy(&p->opcode, &old_p->opcode, sizeof(uprobe_opcode_t));
memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_insn));
-#ifdef CONFIG_ARM
- p->safe_arm = old_p->safe_arm;
- p->safe_thumb = old_p->safe_thumb;
-#endif
}
/*
static int arm_uprobe(struct uprobe *p)
{
- uprobe_opcode_t insn = BREAKPOINT_INSTRUCTION;
- int ret = write_proc_vm_atomic(p->task, (unsigned long)p->addr,
- &insn, sizeof(insn));
- if (!ret) {
- printk("arm_uprobe: failed to write memory "
- "tgid=%u addr=%p!\n", p->task->tgid, p->addr);
-
- return -EACCES;
- }
-
- return 0;
+ return arch_arm_uprobe(p);
}
/**
*/
void disarm_uprobe(struct uprobe *p, struct task_struct *task)
{
- int ret = write_proc_vm_atomic(task, (unsigned long)p->addr,
- &p->opcode, sizeof(p->opcode));
- if (!ret) {
- printk("disarm_uprobe: failed to write memory "
- "tgid=%u, addr=%p!\n", task->tgid, p->addr);
- }
+ arch_disarm_uprobe(p, task);
}
EXPORT_SYMBOL_GPL(disarm_uprobe);
if (!p->addr)
return -EINVAL;
-/* thumb address = address-1; */
-#if defined(CONFIG_ARM)
- /* TODO: must be corrected in 'bundle' */
- if ((unsigned long) p->addr & 0x01)
- p->addr = (uprobe_opcode_t *)((unsigned long)p->addr &
- 0xfffffffe);
-#endif
-
p->ainsn.insn = NULL;
INIT_LIST_HEAD(&p->list);
#ifdef KPROBES_PROFILE
task->tgid, task->pid, task->comm, p->addr);
ret = -EINVAL;
goto out;
-#ifdef CONFIG_ARM
- p->safe_arm = old_p->safe_arm;
- p->safe_thumb = old_p->safe_thumb;
-#endif
+
ret = register_aggr_uprobe(old_p, p);
DBPRINTF("goto out\n", ret);
goto out;
INIT_HLIST_HEAD(&rp->free_instances);
for (i = 0; i < rp->maxactive; i++) {
- inst = kmalloc(sizeof(*inst) + rp->data_size, GFP_ATOMIC);
+ inst = kmalloc(sizeof(*inst) + rp->data_size, GFP_KERNEL);
if (inst == NULL) {
free_urp_inst(rp);
return -ENOMEM;
EXPORT_SYMBOL_GPL(swap_register_uretprobe);
/**
- * @brief Disarms uretprobe instances for the specified child task.
- *
- * @param parent Pointer to the parent task struct.
- * @param task Pointer to the child task struct.
- * @return 0
- */
-int swap_disarm_urp_inst_for_task(struct task_struct *parent,
- struct task_struct *task)
-{
- unsigned long flags;
- struct uretprobe_instance *ri;
- struct hlist_head *head;
- struct hlist_node *tmp;
- DECLARE_NODE_PTR_FOR_HLIST(node);
-
- spin_lock_irqsave(&uretprobe_lock, flags);
-
- head = uretprobe_inst_table_head(parent->mm);
- swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
- if (parent == ri->task)
- arch_disarm_urp_inst(ri, task);
- }
-
- spin_unlock_irqrestore(&uretprobe_lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(swap_disarm_urp_inst_for_task);
-
-/**
- * @brief Disarms uretprobes for specified task.
- *
- * @param task Pointer to the task_struct.
- * @return Void.
- */
-void swap_discard_pending_uretprobes(struct task_struct *task)
-{
- unsigned long flags;
- struct uretprobe_instance *ri;
- struct hlist_head *head;
- struct hlist_node *tmp;
- DECLARE_NODE_PTR_FOR_HLIST(node);
-
- spin_lock_irqsave(&uretprobe_lock, flags);
-
- head = uretprobe_inst_table_head(task->mm);
- swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
- if (ri->task == task) {
- printk(KERN_INFO "%s (%d/%d): pending urp inst: %08lx\n",
- task->comm, task->tgid, task->pid,
- (unsigned long)ri->rp->up.addr);
- arch_disarm_urp_inst(ri, task);
- recycle_urp_inst(ri);
- }
- }
-
- spin_unlock_irqrestore(&uretprobe_lock, flags);
-}
-EXPORT_SYMBOL_GPL(swap_discard_pending_uretprobes);
-
-/**
* @brief Unregisters uretprobe.
*
* @param rp Pointer to the ureprobe.
struct uretprobe_instance *ri;
__swap_unregister_uprobe(&rp->up, disarm);
- spin_lock_irqsave(&uretprobe_lock, flags);
+ spin_lock_irqsave(&uretprobe_lock, flags);
while ((ri = get_used_urp_inst(rp)) != NULL) {
- if (arch_disarm_urp_inst(ri, ri->task) != 0)
+ bool is_current = ri->task == current;
+
+ if (is_current)
+ spin_unlock_irqrestore(&uretprobe_lock, flags);
+
+ /* FIXME: arch_disarm_urp_inst() for no current context */
+ if (arch_disarm_urp_inst(ri, ri->task, 0) != 0)
printk(KERN_INFO "%s (%d/%d): "
"cannot disarm urp instance (%08lx)\n",
ri->task->comm, ri->task->tgid, ri->task->pid,
(unsigned long)rp->up.addr);
+
+ if (is_current)
+ spin_lock_irqsave(&uretprobe_lock, flags);
+
recycle_urp_inst(ri);
}
ri->rp = NULL;
hlist_del(&ri->uflist);
}
-
spin_unlock_irqrestore(&uretprobe_lock, flags);
+
free_urp_inst(rp);
}
EXPORT_SYMBOL_GPL(__swap_unregister_uretprobe);
}
EXPORT_SYMBOL_GPL(swap_ujprobe_return);
+
+static struct urinst_info *urinst_info_create(struct uretprobe_instance *ri)
+{
+ struct urinst_info *urinst;
+
+ urinst = kmalloc(sizeof(*urinst), GFP_ATOMIC);
+ if (urinst) {
+ INIT_HLIST_NODE(&urinst->hlist);
+ urinst->task = ri->task;
+ urinst->sp = (unsigned long)ri->sp;
+ urinst->tramp = arch_tramp_by_ri(ri);
+ urinst->ret_addr = (unsigned long)ri->ret_addr;
+ } else {
+ pr_err("Cannot allocate memory for urinst\n");
+ }
+
+ return urinst;
+}
+
+static void urinst_info_destroy(struct urinst_info *urinst)
+{
+ kfree(urinst);
+}
+
+static void urinst_info_disarm(struct urinst_info *urinst, struct task_struct *task)
+{
+ struct uretprobe_instance ri;
+ unsigned long tramp = urinst->tramp;
+
+ /* set necessary data*/
+ ri.task = urinst->task;
+ ri.sp = (kprobe_opcode_t *)urinst->sp;
+ ri.ret_addr = (kprobe_opcode_t *)urinst->ret_addr;
+
+ arch_disarm_urp_inst(&ri, task, tramp);
+}
+
+void urinst_info_get_current_hlist(struct hlist_head *head, bool recycle)
+{
+ unsigned long flags;
+ struct task_struct *task = current;
+ struct uretprobe_instance *ri;
+ struct hlist_head *hhead;
+ struct hlist_node *n;
+ struct hlist_node *last = NULL;
+ DECLARE_NODE_PTR_FOR_HLIST(node);
+
+ spin_lock_irqsave(&uretprobe_lock, flags);
+ hhead = uretprobe_inst_table_head(task->mm);
+ swap_hlist_for_each_entry_safe(ri, node, n, hhead, hlist) {
+ if (task == ri->task) {
+ struct urinst_info *urinst;
+
+ urinst = urinst_info_create(ri);
+ if (urinst) {
+ if (last)
+ hlist_add_after(last, &urinst->hlist);
+ else
+ hlist_add_head(&urinst->hlist, head);
+
+ last = &urinst->hlist;
+ }
+
+ if (recycle)
+ recycle_urp_inst(ri);
+ }
+ }
+ spin_unlock_irqrestore(&uretprobe_lock, flags);
+}
+EXPORT_SYMBOL_GPL(urinst_info_get_current_hlist);
+
+void urinst_info_put_current_hlist(struct hlist_head *head,
+ struct task_struct *task)
+{
+ struct urinst_info *urinst;
+ struct hlist_node *tmp;
+ DECLARE_NODE_PTR_FOR_HLIST(node);
+
+ swap_hlist_for_each_entry_safe(urinst, node, tmp, head, hlist) {
+ /* check on disarm */
+ if (task)
+ urinst_info_disarm(urinst, task);
+
+ hlist_del(&urinst->hlist);
+ urinst_info_destroy(urinst);
+ }
+}
+EXPORT_SYMBOL_GPL(urinst_info_put_current_hlist);
+
+
static int once(void)
{
init_uprobe_table();
struct arch_tramp atramp; /**< Stores trampoline */
struct task_struct *task; /**< Pointer to the task struct */
struct slot_manager *sm; /**< Pointer to slot manager */
+ bool atomic_ctx; /**< Handler context */
};
+struct uinst_info {
+ struct hlist_node hlist;
+
+ unsigned long vaddr;
+ kprobe_opcode_t opcode;
+};
+
+struct urinst_info {
+ struct hlist_node hlist;
+
+ struct task_struct *task;
+ unsigned long sp;
+ unsigned long tramp;
+ unsigned long ret_addr;
+};
+
+struct uinst_info *uinst_info_create(unsigned long vaddr,
+ kprobe_opcode_t opcode);
+void uinst_info_destroy(struct uinst_info *uinst);
+void uinst_info_disarm(struct uinst_info *uinst, struct task_struct *task);
+
+
+void urinst_info_get_current_hlist(struct hlist_head *head, bool recycle);
+void urinst_info_put_current_hlist(struct hlist_head *head,
+ struct task_struct *task);
+
+
/**
* @brief Uprobe pre-entry handler.
*/
void __swap_unregister_uretprobe(struct uretprobe *rp, int disarm);
void swap_unregister_all_uprobes(struct task_struct *task);
-void swap_discard_pending_uretprobes(struct task_struct *task);
void swap_ujprobe_return(void);
struct uprobe *get_uprobe(void *addr, pid_t tgid);
enum callback_t {
START_CB = 0,
- STOP_CB
+ STOP_CB,
+ STOP_CB_TD
};
/* Gets callback type (on start or on stop) and function pointer.
if (!sspt_proc_is_send_event(proc))
return;
- sprintf(pid_str, "%d", proc->tgid);
+ snprintf(pid_str, sizeof(pid_str), "%d", proc->tgid);
len = strlen(pid_str);
struct pf_data {
unsigned long addr;
+
+#if defined(CONFIG_ARM)
+ struct pt_regs *pf_regs;
+ unsigned long save_pc;
+#endif /* CONFIG_ARM */
};
static int entry_handler_pf(struct kretprobe_instance *ri, struct pt_regs *regs)
#if defined(CONFIG_ARM)
data->addr = swap_get_karg(regs, 0);
+ data->pf_regs = (struct pt_regs *)swap_get_karg(regs, 2);
+ data->save_pc = data->pf_regs->ARM_pc;
#elif defined(CONFIG_X86_32)
data->addr = read_cr2();
#else
#error "this architecture is not supported"
#endif /* CONFIG_arch */
+ if (data->addr) {
+ struct sspt_proc * proc = sspt_proc_get_by_task(current);
+
+ if (proc && (proc->r_state_addr == data->addr))
+ /* skip ret_handler_pf() for current task */
+ return 1;
+ }
+
return 0;
}
static int ret_handler_pf(struct kretprobe_instance *ri, struct pt_regs *regs)
{
struct task_struct *task = current;
+ struct pf_data *data = (struct pf_data *)ri->data;
unsigned long page_addr;
int ret;
if (is_kthread(task))
return 0;
+#if defined(CONFIG_ARM)
+ /* skip fixup page_fault */
+ if (data->save_pc != data->pf_regs->ARM_pc)
+ return 0;
+#endif /* CONFIG_ARM */
+
/* TODO: check return value */
- page_addr = ((struct pf_data *)ri->data)->addr & PAGE_MASK;
+ page_addr = data->addr & PAGE_MASK;
ret = set_jump_cb((unsigned long)ri->ret_addr, regs, cb_pf,
&page_addr, sizeof(page_addr));
static int ctx_task_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
+ int ret;
struct sspt_proc *proc;
- unsigned long page_addr;
struct task_struct *task = current;
if (is_kthread(task) || check_task_on_filters(task) == 0)
if (proc && proc->first_install)
return 0;
- page_addr = 0;
+ ret = set_kjump_cb(regs, cb_check_and_install, NULL, 0);
+ if (ret < 0)
+ pr_err("ctx_task_pre_handler: ret=%d\n", ret);
- return set_kjump_cb(regs, cb_check_and_install, NULL, 0);
+ return 0;
}
static struct kprobe ctx_task_kprobe = {
* copy_process() *
******************************************************************************
*/
-static atomic_t copy_process_cnt = ATOMIC_INIT(0);
+static void func_uinst_creare(struct us_ip *ip, void *data)
+{
+ struct hlist_head *head = (struct hlist_head *)data;
+ struct uprobe *up;
-static void recover_child(struct task_struct *child_task,
- struct sspt_proc *proc)
+ up = probe_info_get_uprobe(ip->desc->type, ip);
+ if (up) {
+ struct uinst_info *uinst;
+ unsigned long vaddr = (unsigned long)up->addr;
+
+ uinst = uinst_info_create(vaddr, up->opcode);
+ if (uinst)
+ hlist_add_head(&uinst->hlist, head);
+ }
+}
+
+static void disarm_for_task(struct task_struct *child, struct hlist_head *head)
{
- sspt_proc_uninstall(proc, child_task, US_DISARM);
- swap_disarm_urp_inst_for_task(current, child_task);
+ struct uinst_info *uinst;
+ struct hlist_node *tmp;
+ DECLARE_NODE_PTR_FOR_HLIST(node);
+
+ swap_hlist_for_each_entry_safe(uinst, node, tmp, head, hlist) {
+ uinst_info_disarm(uinst, child);
+ hlist_del(&uinst->hlist);
+ uinst_info_destroy(uinst);
+ }
}
-static void rm_uprobes_child(struct task_struct *task)
+struct clean_data {
+ struct task_struct *task;
+
+ struct hlist_head head;
+ struct hlist_head rhead;
+};
+
+static atomic_t rm_uprobes_child_cnt = ATOMIC_INIT(0);
+
+static unsigned long cb_clean_child(void *data)
+{
+ struct clean_data *cdata = (struct clean_data *)data;
+ struct task_struct *child = cdata->task;
+
+ /* disarm up for child */
+ disarm_for_task(child, &cdata->head);
+
+ /* disarm urp for child */
+ urinst_info_put_current_hlist(&cdata->rhead, child);
+
+ atomic_dec(&rm_uprobes_child_cnt);
+ return 0;
+}
+
+static void rm_uprobes_child(struct kretprobe_instance *ri,
+ struct pt_regs *regs, struct task_struct *child)
{
struct sspt_proc *proc;
+ struct clean_data cdata = {
+ .task = child,
+ .head = HLIST_HEAD_INIT,
+ .rhead = HLIST_HEAD_INIT
+ };
sspt_proc_write_lock();
+ proc = sspt_proc_get_by_task_no_lock(current);
+ if (proc) {
+ sspt_proc_on_each_ip(proc, func_uinst_creare, (void *)&cdata.head);
+ urinst_info_get_current_hlist(&cdata.rhead, false);
+ }
+ sspt_proc_write_unlock();
- proc = sspt_proc_get_by_task(current);
- if (proc)
- recover_child(task, proc);
+ if (proc) {
+ int ret;
- sspt_proc_write_unlock();
+ /* set jumper */
+ ret = set_jump_cb((unsigned long)ri->ret_addr, regs,
+ cb_clean_child, &cdata, sizeof(cdata));
+ if (ret == 0) {
+ atomic_inc(&rm_uprobes_child_cnt);
+ ri->ret_addr = (unsigned long *)get_jump_addr();
+ }
+ }
}
-static int entry_handler_cp(struct kretprobe_instance *ri, struct pt_regs *regs)
-{
- atomic_inc(©_process_cnt);
+static atomic_t pre_handler_cp_cnt = ATOMIC_INIT(0);
+
+static unsigned long cp_cb(void *data)
+{
if (atomic_read(&stop_flag))
call_mm_release(current);
+ atomic_dec(&pre_handler_cp_cnt);
+ return 0;
+}
+
+static int pre_handler_cp(struct kprobe *p, struct pt_regs *regs)
+{
+ int ret = 0;
+
+ if (is_kthread(current))
+ goto out;
+
+ if (!atomic_read(&stop_flag))
+ goto out;
+
+ ret = set_kjump_cb(regs, cp_cb, NULL, 0);
+ if (ret < 0) {
+ pr_err("set_kjump_cp, ret=%d\n", ret);
+ ret = 0;
+ } else {
+ atomic_inc(&pre_handler_cp_cnt);
+ }
+out:
+ return ret;
+}
+
+
+static atomic_t copy_process_cnt = ATOMIC_INIT(0);
+
+static int entry_handler_cp(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ atomic_inc(©_process_cnt);
+
return 0;
}
goto out;
if (task->mm != current->mm) { /* check flags CLONE_VM */
- rm_uprobes_child(task);
+ rm_uprobes_child(ri, regs, task);
}
out:
atomic_dec(©_process_cnt);
.handler = ret_handler_cp,
};
+static struct kprobe cp_kprobe = {
+ .pre_handler = pre_handler_cp
+};
+
static int register_cp(void)
{
int ret;
- ret = swap_register_kretprobe(&cp_kretprobe);
+
+ ret = swap_register_kprobe(&cp_kprobe);
if (ret)
- printk(KERN_INFO
- "swap_register_kretprobe(copy_process) ret=%d!\n", ret);
+ pr_err("swap_register_kprobe(copy_process) ret=%d!\n", ret);
+
+ ret = swap_register_kretprobe(&cp_kretprobe);
+ if (ret) {
+ pr_err("swap_register_kretprobe(copy_process) ret=%d!\n", ret);
+ swap_unregister_kprobe(&cp_kprobe);
+ }
return ret;
}
synchronize_sched();
} while (atomic_read(©_process_cnt));
swap_unregister_kretprobe_bottom(&cp_kretprobe);
+ swap_unregister_kprobe(&cp_kprobe);
+
+ do {
+ synchronize_sched();
+ } while (atomic_read(&rm_uprobes_child_cnt)
+ || atomic_read(&pre_handler_cp_cnt));
}
******************************************************************************
*/
+static atomic_t mm_release_cnt = ATOMIC_INIT(0);
+
+static unsigned long mr_cb(void *data)
+{
+ struct task_struct *task = *(struct task_struct **)data;
+ struct mm_struct *mm = task->mm;
+
+ if (mm == NULL) {
+ pr_err("mm is NULL\n");
+ return 0;
+ }
+
+ /* TODO: this lock for synchronizing to disarm urp */
+ down_write(&mm->mmap_sem);
+ if (task->tgid != task->pid) {
+ struct sspt_proc *proc;
+ struct hlist_head head = HLIST_HEAD_INIT;
+
+ if (task != current) {
+ pr_err("call mm_release in isn't current context\n");
+ return 0;
+ }
+
+ /* if the thread is killed we need to discard pending
+ * uretprobe instances which have not triggered yet */
+ sspt_proc_write_lock();
+ proc = sspt_proc_get_by_task_no_lock(task);
+ if (proc) {
+ urinst_info_get_current_hlist(&head, true);
+ }
+ sspt_proc_write_unlock();
+
+ if (proc) {
+ /* disarm urp for task */
+ urinst_info_put_current_hlist(&head, task);
+ }
+ } else {
+ call_mm_release(task);
+ }
+ up_write(&mm->mmap_sem);
+
+ atomic_dec(&mm_release_cnt);
+
+ return 0;
+}
+
/* Detects when target process removes IPs. */
static int mr_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
+ int ret = 0;
struct task_struct *task = (struct task_struct *)swap_get_karg(regs, 0);
if (is_kthread(task))
goto out;
- if (task->tgid != task->pid) {
- /* if the thread is killed we need to discard pending
- * uretprobe instances which have not triggered yet */
- swap_discard_pending_uretprobes(task);
- goto out;
+ ret = set_kjump_cb(regs, mr_cb, (void *)&task, sizeof(task));
+ if (ret < 0) {
+ printk("##### ERROR: mr_pre_handler, ret=%d\n", ret);
+ ret = 0;
+ } else {
+ atomic_inc(&mm_release_cnt);
}
- call_mm_release(task);
out:
- return 0;
+ return ret;
}
static struct kprobe mr_kprobe = {
static void unregister_mr(void)
{
swap_unregister_kprobe(&mr_kprobe);
+ do {
+ synchronize_sched();
+ } while (atomic_read(&mm_release_cnt));
}
sspt_proc_write_lock();
- proc = sspt_proc_get_by_task(task);
+ proc = sspt_proc_get_by_task_no_lock(task);
if (proc)
__remove_unmap_probes(proc, umd);
cp_kretprobe.kp.addr = (kprobe_opcode_t *)swap_ksyms_substr(sym);
if (cp_kretprobe.kp.addr == NULL)
goto not_found;
+ cp_kprobe.addr = cp_kretprobe.kp.addr;
sym = "mm_release";
mr_kprobe.addr = (kprobe_opcode_t *)swap_ksyms(sym);
{
struct img_file *file;
- file = kmalloc(sizeof(*file), GFP_KERNEL);
+ file = kmalloc(sizeof(*file), GFP_ATOMIC);
+ if (file == NULL) {
+ pr_err("%s: failed to allocate memory\n", __func__);
+ return NULL;
+ }
+
file->dentry = dentry;
INIT_LIST_HEAD(&file->ip_list);
INIT_LIST_HEAD(&file->list);
}
ip = create_img_ip(addr, pd);
+ if (ip == NULL)
+ return -ENOMEM;
img_add_ip_by_list(file, ip);
return 0;
{
struct img_proc *proc;
- proc = kmalloc(sizeof(*proc), GFP_KERNEL);
+ proc = kmalloc(sizeof(*proc), GFP_ATOMIC);
if (proc) {
INIT_LIST_HEAD(&proc->file_list);
rwlock_init(&proc->rwlock);
{
struct sspt_file *file;
struct img_file *i_file;
- struct img_ip *i_ip;
read_lock(&i_proc->rwlock);
list_for_each_entry(i_file, &i_proc->file_list, list) {
file = sspt_proc_find_file_or_new(proc, i_file->dentry);
+ if (file) {
+ struct img_ip *i_ip;
- list_for_each_entry(i_ip, &i_file->ip_list, list)
- sspt_file_add_ip(file, i_ip);
+ list_for_each_entry(i_ip, &i_file->ip_list, list)
+ sspt_file_add_ip(file, i_ip);
+ }
}
read_unlock(&i_proc->rwlock);
}
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/namei.h>
-
+#include <linux/mman.h>
+#include <linux/spinlock.h>
#include "pf_group.h"
#include "proc_filters.h"
#include "../sspt/sspt_filter.h"
+#include "../us_manager_common.h"
#include <us_manager/img/img_proc.h>
#include <us_manager/img/img_file.h>
#include <us_manager/img/img_ip.h>
struct pfg_msg_cb *msg_cb;
atomic_t usage;
- /* TODO: proc_list*/
+ spinlock_t pl_lock; /* for proc_list */
struct list_head proc_list;
};
/* struct pl_struct */
static struct pl_struct *create_pl_struct(struct sspt_proc *proc)
{
- struct pl_struct *pls = kmalloc(sizeof(*pls), GFP_KERNEL);
+ struct pl_struct *pls = kmalloc(sizeof(*pls), GFP_ATOMIC);
- INIT_LIST_HEAD(&pls->list);
- pls->proc = proc;
+ if (pls) {
+ INIT_LIST_HEAD(&pls->list);
+ pls->proc = sspt_proc_get(proc);
+ }
return pls;
}
static void free_pl_struct(struct pl_struct *pls)
{
+ sspt_proc_put(pls->proc);
kfree(pls);
}
-
-static void add_pl_struct(struct pf_group *pfg, struct pl_struct *pls)
-{
- list_add(&pls->list, &pfg->proc_list);
-}
-
-static void del_pl_struct(struct pl_struct *pls)
-{
- list_del(&pls->list);
-}
-
-static struct pl_struct *find_pl_struct(struct pf_group *pfg,
- struct task_struct *task)
-{
- struct pl_struct *pls;
-
- list_for_each_entry(pls, &pfg->proc_list, list) {
- if (pls->proc->tgid == task->tgid)
- return pls;
- }
-
- return NULL;
-}
/* struct pl_struct */
-static struct pf_group *create_pfg(void)
+static struct pf_group *pfg_create(void)
{
- struct pf_group *pfg = kmalloc(sizeof(*pfg), GFP_KERNEL);
+ struct pf_group *pfg = kmalloc(sizeof(*pfg), GFP_ATOMIC);
if (pfg == NULL)
return NULL;
INIT_LIST_HEAD(&pfg->list);
memset(&pfg->filter, 0, sizeof(pfg->filter));
+ spin_lock_init(&pfg->pl_lock);
INIT_LIST_HEAD(&pfg->proc_list);
pfg->msg_cb = NULL;
atomic_set(&pfg->usage, 1);
return NULL;
}
-static void free_pfg(struct pf_group *pfg)
+static void pfg_free(struct pf_group *pfg)
{
- struct pl_struct *pl;
+ struct pl_struct *pl, *n;
free_img_proc(pfg->i_proc);
free_pf(&pfg->filter);
- list_for_each_entry(pl, &pfg->proc_list, list)
+ list_for_each_entry_safe(pl, n, &pfg->proc_list, list) {
sspt_proc_del_filter(pl->proc, pfg);
+ free_pl_struct(pl);
+ }
+
kfree(pfg);
}
+static int pfg_add_proc(struct pf_group *pfg, struct sspt_proc *proc)
+{
+ struct pl_struct *pls;
+
+ pls = create_pl_struct(proc);
+ if (pls == NULL)
+ return -ENOMEM;
+
+ spin_lock(&pfg->pl_lock);
+ list_add(&pls->list, &pfg->proc_list);
+ spin_unlock(&pfg->pl_lock);
+
+ return 0;
+}
+
+
/* called with pfg_list_lock held */
-static void add_pfg_by_list(struct pf_group *pfg)
+static void pfg_add_to_list(struct pf_group *pfg)
{
list_add(&pfg->list, &pfg_list);
}
/* called with pfg_list_lock held */
-static void del_pfg_by_list(struct pf_group *pfg)
+static void pfg_del_from_list(struct pf_group *pfg)
{
list_del(&pfg->list);
}
static void first_install(struct task_struct *task, struct sspt_proc *proc)
{
+ sspt_proc_priv_create(proc);
+
down_write(&task->mm->mmap_sem);
sspt_proc_on_each_filter(proc, msg_info, NULL);
sspt_proc_install(proc);
}
}
- pfg = create_pfg();
+ pfg = pfg_create();
if (pfg == NULL)
goto unlock;
set_pf_by_dentry(&pfg->filter, dentry, priv);
- add_pfg_by_list(pfg);
+ pfg_add_to_list(pfg);
unlock:
write_unlock(&pfg_list_lock);
}
}
- pfg = create_pfg();
+ pfg = pfg_create();
if (pfg == NULL)
goto unlock;
set_pf_by_tgid(&pfg->filter, tgid, priv);
- add_pfg_by_list(pfg);
+ pfg_add_to_list(pfg);
unlock:
write_unlock(&pfg_list_lock);
}
}
- pfg = create_pfg();
+ pfg = pfg_create();
if (pfg == NULL)
goto unlock;
ret = set_pf_by_comm(&pfg->filter, comm, priv);
if (ret) {
printk(KERN_ERR "ERROR: set_pf_by_comm, ret=%d\n", ret);
- free_pfg(pfg);
+ pfg_free(pfg);
pfg = NULL;
goto unlock;
}
- add_pfg_by_list(pfg);
+ pfg_add_to_list(pfg);
unlock:
write_unlock(&pfg_list_lock);
return pfg;
}
}
- pfg = create_pfg();
+ pfg = pfg_create();
if (pfg == NULL)
goto unlock;
set_pf_dumb(&pfg->filter, priv);
- add_pfg_by_list(pfg);
+ pfg_add_to_list(pfg);
unlock:
write_unlock(&pfg_list_lock);
{
if (atomic_dec_and_test(&pfg->usage)) {
write_lock(&pfg_list_lock);
- del_pfg_by_list(pfg);
+ pfg_del_from_list(pfg);
write_unlock(&pfg_list_lock);
- free_pfg(pfg);
+ pfg_free(pfg);
}
}
EXPORT_SYMBOL_GPL(put_pf_group);
return ret;
}
-static int pfg_add_proc(struct pf_group *pfg, struct sspt_proc *proc)
-{
- struct pl_struct *pls;
-
- pls = create_pl_struct(proc);
- if (pls == NULL)
- return -ENOMEM;
-
- add_pl_struct(pfg, pls);
-
- return 0;
-}
-
enum pf_inst_flag {
PIF_NONE,
PIF_FIRST,
flag = PIF_FIRST;
}
- if (proc && sspt_proc_is_filter_new(proc, pfg)) {
- img_proc_copy_to_sspt(pfg->i_proc, proc);
- sspt_proc_add_filter(proc, pfg);
- pfg_add_proc(pfg, proc);
- flag = flag == PIF_FIRST ? flag : PIF_ADD_PFG;
+ if (proc) {
+ write_lock(&proc->filter_lock);
+ if (sspt_proc_is_filter_new(proc, pfg)) {
+ img_proc_copy_to_sspt(pfg->i_proc, proc);
+ sspt_proc_add_filter(proc, pfg);
+ pfg_add_proc(pfg, proc);
+ flag = flag == PIF_FIRST ? flag : PIF_ADD_PFG;
+ }
+ write_unlock(&proc->filter_lock);
}
}
read_unlock(&pfg_list_lock);
case PIF_FIRST:
case PIF_ADD_PFG:
proc = sspt_proc_get_by_task(task);
- first_install(task, proc);
+ if (proc)
+ first_install(task, proc);
break;
case PIF_NONE:
case PIF_FIRST:
case PIF_ADD_PFG:
proc = sspt_proc_get_by_task(task);
- first_install(task, proc);
+ if (proc)
+ first_install(task, proc);
break;
case PIF_SECOND:
proc = sspt_proc_get_by_task(task);
- subsequent_install(task, proc, page_addr);
+ if (proc)
+ subsequent_install(task, proc, page_addr);
break;
case PIF_NONE:
void uninstall_proc(struct sspt_proc *proc)
{
struct task_struct *task = proc->task;
- struct pf_group *pfg;
- struct pl_struct *pls;
- read_lock(&pfg_list_lock);
- list_for_each_entry(pfg, &pfg_list, list) {
- pls = find_pl_struct(pfg, task);
- if (pls) {
- del_pl_struct(pls);
- free_pl_struct(pls);
- }
- }
- read_unlock(&pfg_list_lock);
-
- task_lock(task);
- BUG_ON(task->mm == NULL);
sspt_proc_uninstall(proc, task, US_UNREGS_PROBE);
- task_unlock(task);
-
- sspt_proc_del_all_filters(proc);
- sspt_proc_free(proc);
+ sspt_proc_cleanup(proc);
}
/**
struct sspt_proc *proc;
sspt_proc_write_lock();
+ proc = sspt_proc_get_by_task_no_lock(task);
+ if (proc)
+ list_del(&proc->list);
+ sspt_proc_write_unlock();
- proc = sspt_proc_get_by_task(task);
if (proc)
- /* TODO: uninstall_proc - is not atomic context */
uninstall_proc(proc);
-
- sspt_proc_write_unlock();
}
/**
/* TODO: to be implemented */
}
-static void on_each_uninstall_proc(struct sspt_proc *proc, void *data)
-{
- uninstall_proc(proc);
-}
-
/**
* @brief Uninstall probes from all processes
*
*/
void uninstall_all(void)
{
+ struct list_head *proc_list = sspt_proc_list();
+
sspt_proc_write_lock();
- on_each_proc_no_lock(on_each_uninstall_proc, NULL);
+ while (!list_empty(proc_list)) {
+ struct sspt_proc *proc;
+ proc = list_first_entry(proc_list, struct sspt_proc, list);
+
+ list_del(&proc->list);
+
+ sspt_proc_write_unlock();
+ uninstall_proc(proc);
+ sspt_proc_write_lock();
+ }
sspt_proc_write_unlock();
}
+static void __do_get_proc(struct sspt_proc *proc, void *data)
+{
+ get_task_struct(proc->task);
+ proc->__task = proc->task;
+ proc->__mm = get_task_mm(proc->task);
+}
+
+static void __do_put_proc(struct sspt_proc *proc, void *data)
+{
+ if (proc->__mm) {
+ mmput(proc->__mm);
+ proc->__mm = NULL;
+ }
+
+ if (proc->__task) {
+ put_task_struct(proc->__task);
+ proc->__task = NULL;
+ }
+}
+
+void get_all_procs(void)
+{
+ sspt_proc_read_lock();
+ on_each_proc_no_lock(__do_get_proc, NULL);
+ sspt_proc_read_unlock();
+}
+
+void put_all_procs(void)
+{
+ sspt_proc_read_lock();
+ on_each_proc_no_lock(__do_put_proc, NULL);
+ sspt_proc_read_unlock();
+}
+
/**
* @brief For debug
*
void install_all(void);
void uninstall_all(void);
+void get_all_procs(void);
+void put_all_procs(void);
+
int check_task_on_filters(struct task_struct *task);
void call_page_fault(struct task_struct *task, unsigned long page_addr);
void call_mm_release(struct task_struct *task);
up->addr = (kprobe_opcode_t *)ip->orig_addr;
up->task = ip->page->file->proc->task;
up->sm = ip->page->file->proc->sm;
+ up->atomic_ctx = true;
ret = probe_info_register(ip->desc->type, ip);
if (ret) {
break;
case US_DISARM:
up = probe_info_get_uprobe(ip->desc->type, ip);
- disarm_uprobe(up, task);
+ if (up)
+ disarm_uprobe(up, task);
break;
case US_UNINSTALL:
probe_info_unregister(ip->desc->type, ip, 0);
spin_lock_irqsave(&feature_img_lock, flags);
list_for_each_entry(fi, &feature_img_list, list) {
fd = create_feature_data(fi);
-
- /* add to list */
- list_add(&fd->list, &f->feature_list);
+ if (fd) /* add to list */
+ list_add(&fd->list, &f->feature_list);
}
spin_unlock_irqrestore(&feature_img_lock, flags);
}
*/
struct sspt_file *sspt_file_create(struct dentry *dentry, int page_cnt)
{
+ int i, table_size;
struct sspt_file *obj = kmalloc(sizeof(*obj), GFP_ATOMIC);
- if (obj) {
- int i, table_size;
- INIT_LIST_HEAD(&obj->list);
- obj->proc = NULL;
- obj->dentry = dentry;
- obj->loaded = 0;
- obj->vm_start = 0;
- obj->vm_end = 0;
+ if (obj == NULL)
+ return NULL;
+
+ INIT_LIST_HEAD(&obj->list);
+ obj->proc = NULL;
+ obj->dentry = dentry;
+ obj->loaded = 0;
+ obj->vm_start = 0;
+ obj->vm_end = 0;
- obj->page_probes_hash_bits = calculation_hash_bits(page_cnt);
- table_size = (1 << obj->page_probes_hash_bits);
+ obj->page_probes_hash_bits = calculation_hash_bits(page_cnt);
+ table_size = (1 << obj->page_probes_hash_bits);
- obj->page_probes_table =
+ obj->page_probes_table =
kmalloc(sizeof(*obj->page_probes_table)*table_size,
GFP_ATOMIC);
- for (i = 0; i < table_size; ++i)
- INIT_HLIST_HEAD(&obj->page_probes_table[i]);
- }
+ if (obj->page_probes_table == NULL)
+ goto err;
+
+ for (i = 0; i < table_size; ++i)
+ INIT_HLIST_HEAD(&obj->page_probes_table[i]);
return obj;
+
+err:
+ kfree(obj);
+ return NULL;
}
/**
if (page == NULL) {
page = sspt_page_create(offset);
- sspt_add_page(file, page);
+ if (page)
+ sspt_add_page(file, page);
}
return page;
probe_info_init(ip->desc->type, ip);
}
+void sspt_file_on_each_ip(struct sspt_file *file,
+ void (*func)(struct us_ip *, void *), void *data)
+{
+ int i;
+ const int table_size = (1 << file->page_probes_hash_bits);
+ struct sspt_page *page;
+ struct hlist_head *head;
+ DECLARE_NODE_PTR_FOR_HLIST(node);
+
+ for (i = 0; i < table_size; ++i) {
+ head = &file->page_probes_table[i];
+ swap_hlist_for_each_entry(page, node, head, hlist)
+ sspt_page_on_each_ip(page, func, data);
+ }
+}
+
/**
* @brief Get sspt_page from sspt_file (look)
*
unsigned long page);
void sspt_file_add_ip(struct sspt_file *file, struct img_ip *img_ip);
+void sspt_file_on_each_ip(struct sspt_file *file,
+ void (*func)(struct us_ip *, void *), void *data);
+
struct sspt_page *sspt_get_page(struct sspt_file *file,
unsigned long offset_addr);
void sspt_put_page(struct sspt_page *page);
{
struct sspt_filter *fl;
- fl = kmalloc(sizeof(*fl), GFP_KERNEL);
+ fl = kmalloc(sizeof(*fl), GFP_ATOMIC);
if (fl == NULL)
return NULL;
INIT_LIST_HEAD(&fl->list);
- list_add(&fl->list, &proc->filter_list);
fl->proc = proc;
fl->pfg = pfg;
return err;
}
+
+void sspt_page_on_each_ip(struct sspt_page *page,
+ void (*func)(struct us_ip *, void *), void *data)
+{
+ struct us_ip *ip;
+
+ spin_lock(&page->lock);
+ list_for_each_entry(ip, &page->ip_list_inst, list)
+ func(ip, data);
+
+ spin_unlock(&page->lock);
+}
enum US_FLAGS flag,
struct task_struct *task);
+void sspt_page_on_each_ip(struct sspt_page *page,
+ void (*func)(struct us_ip *, void *), void *data);
+
#endif /* __SSPT_PAGE__ */
#include <linux/list.h>
#include <us_manager/us_slot_manager.h>
-
static LIST_HEAD(proc_probes_list);
static DEFINE_RWLOCK(sspt_proc_rwlock);
+struct list_head *sspt_proc_list()
+{
+ return &proc_probes_list;
+}
+
/**
* @brief Global read lock for sspt_proc
*
*/
struct sspt_proc *sspt_proc_create(struct task_struct *task)
{
- struct sspt_proc *proc = kmalloc(sizeof(*proc), GFP_ATOMIC);
+ struct sspt_proc *proc = kzalloc(sizeof(*proc), GFP_ATOMIC);
if (proc) {
proc->feature = sspt_create_feature();
proc->tgid = task->tgid;
proc->task = task->group_leader;
proc->sm = create_sm_us(task);
- proc->first_install = 0;
- proc->private_data = NULL;
INIT_LIST_HEAD(&proc->file_list);
+ rwlock_init(&proc->filter_lock);
INIT_LIST_HEAD(&proc->filter_list);
+ atomic_set(&proc->usage, 1);
+
+ get_task_struct(proc->task);
/* add to list */
list_add(&proc->list, &proc_probes_list);
*/
/* called with sspt_proc_write_lock() */
-void sspt_proc_free(struct sspt_proc *proc)
+void sspt_proc_cleanup(struct sspt_proc *proc)
{
struct sspt_file *file, *n;
- /* delete from list */
- list_del(&proc->list);
+ sspt_proc_del_all_filters(proc);
list_for_each_entry_safe(file, n, &proc->file_list, list) {
list_del(&file->list);
sspt_destroy_feature(proc->feature);
free_sm_us(proc->sm);
- kfree(proc);
+ sspt_proc_put(proc);
+}
+
+struct sspt_proc *sspt_proc_get(struct sspt_proc *proc)
+{
+ atomic_inc(&proc->usage);
+
+ return proc;
+}
+
+void sspt_proc_put(struct sspt_proc *proc)
+{
+ if (atomic_dec_and_test(&proc->usage)) {
+ if (proc->__mm) {
+ mmput(proc->__mm);
+ proc->__mm = NULL;
+ }
+ if (proc->__task) {
+ put_task_struct(proc->__task);
+ proc->__task = NULL;
+ }
+
+ put_task_struct(proc->task);
+ kfree(proc);
+ }
+}
+
+struct sspt_proc *sspt_proc_get_by_task(struct task_struct *task)
+{
+ struct sspt_proc *proc;
+
+ sspt_proc_read_lock();
+ proc = sspt_proc_get_by_task_no_lock(task);
+ sspt_proc_read_unlock();
+
+ return proc;
}
+EXPORT_SYMBOL_GPL(sspt_proc_get_by_task);
/**
* @brief Get sspt_proc by task
* @param task Pointer on the task_struct struct
* @return Pointer on the sspt_proc struct
*/
-struct sspt_proc *sspt_proc_get_by_task(struct task_struct *task)
+struct sspt_proc *sspt_proc_get_by_task_no_lock(struct task_struct *task)
{
struct sspt_proc *proc, *tmp;
return NULL;
}
-EXPORT_SYMBOL_GPL(sspt_proc_get_by_task);
+EXPORT_SYMBOL_GPL(sspt_proc_get_by_task_no_lock);
/**
* @brief Call func() on each proc (no lock)
*/
struct sspt_proc *sspt_proc_get_by_task_or_new(struct task_struct *task)
{
- struct sspt_proc *proc = sspt_proc_get_by_task(task);
+ struct sspt_proc *proc;
+
+ sspt_proc_write_lock();
+ proc = sspt_proc_get_by_task_no_lock(task);
if (proc == NULL)
proc = sspt_proc_create(task);
+ sspt_proc_write_unlock();
return proc;
}
void sspt_proc_free_all(void)
{
struct sspt_proc *proc, *n;
+
list_for_each_entry_safe(proc, n, &proc_probes_list, list) {
- sspt_proc_del_all_filters(proc);
- sspt_proc_free(proc);
+ list_del(&proc->list);
+ sspt_proc_cleanup(proc);
}
}
file = sspt_proc_find_file(proc, dentry);
if (file == NULL) {
file = sspt_file_create(dentry, 10);
- sspt_proc_add_file(proc, file);
+ if (file)
+ sspt_proc_add_file(proc, file);
}
return file;
*/
void sspt_proc_add_filter(struct sspt_proc *proc, struct pf_group *pfg)
{
- sspt_filter_create(proc, pfg);
+ struct sspt_filter *f;
+
+ f = sspt_filter_create(proc, pfg);
+ if (f)
+ list_add(&f->list, &proc->filter_list);
}
/**
{
struct sspt_filter *fl, *tmp;
+ write_lock(&proc->filter_lock);
list_for_each_entry_safe(fl, tmp, &proc->filter_list, list) {
if (fl->pfg == pfg) {
list_del(&fl->list);
sspt_filter_free(fl);
}
}
+ write_unlock(&proc->filter_lock);
}
/**
{
struct sspt_filter *fl, *tmp;
+ write_lock(&proc->filter_lock);
list_for_each_entry_safe(fl, tmp, &proc->filter_list, list) {
list_del(&fl->list);
sspt_filter_free(fl);
}
+ write_unlock(&proc->filter_lock);
}
/**
func(fl, data);
}
+void sspt_proc_on_each_ip(struct sspt_proc *proc,
+ void (*func)(struct us_ip *, void *), void *data)
+{
+ struct sspt_file *file;
+
+ list_for_each_entry(file, &proc->file_list, list)
+ sspt_file_on_each_ip(file, func, data);
+}
+
static void is_send_event(struct sspt_filter *f, void *data)
{
bool *is_send = (bool *)data;
{
bool is_send = false;
+ /* FIXME: add read lock (deadlock in sampler) */
sspt_proc_on_each_filter(proc, is_send_event, (void *)&is_send);
return is_send;
}
+
+
+static struct sspt_proc_cb *proc_cb;
+
+int sspt_proc_cb_set(struct sspt_proc_cb *cb)
+{
+ if (cb && proc_cb)
+ return -EBUSY;
+
+ proc_cb = cb;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sspt_proc_cb_set);
+
+void sspt_proc_priv_create(struct sspt_proc *proc)
+{
+ if (proc_cb && proc_cb->priv_create)
+ proc->private_data = proc_cb->priv_create(proc);
+}
+
+void sspt_proc_priv_destroy(struct sspt_proc *proc)
+{
+ if (proc->first_install && proc_cb && proc_cb->priv_destroy)
+ proc_cb->priv_destroy(proc, proc->private_data);
+}
struct task_struct;
struct pf_group;
struct sspt_filter;
+struct us_ip;
/** Flags for sspt_*_uninstall() */
enum US_FLAGS {
struct list_head list; /**< For global process list */
pid_t tgid; /**< Thread group ID */
struct task_struct *task; /**< Ptr to the task */
+ struct mm_struct *__mm;
+ struct task_struct *__task;
+ unsigned long r_state_addr; /**< address of r_state */
struct slot_manager *sm; /**< Ptr to the manager slot */
struct list_head file_list; /**< For sspt_file */
+ rwlock_t filter_lock;
struct list_head filter_list; /**< Filter list */
unsigned first_install:1; /**< Install flag */
struct sspt_feature *feature; /**< Ptr to the feature */
+ atomic_t usage;
+
+ /* FIXME: for preload (remove those fields) */
void *private_data; /**< Process private data */
};
+struct sspt_proc_cb {
+ void *(*priv_create)(struct sspt_proc *);
+ void (*priv_destroy)(struct sspt_proc *, void *);
+};
+
+
+struct list_head *sspt_proc_list(void);
struct sspt_proc *sspt_proc_create(struct task_struct *task);
-void sspt_proc_free(struct sspt_proc *proc);
+void sspt_proc_cleanup(struct sspt_proc *proc);
+struct sspt_proc *sspt_proc_get(struct sspt_proc *proc);
+void sspt_proc_put(struct sspt_proc *proc);
void on_each_proc_no_lock(void (*func)(struct sspt_proc *, void *),
void *data);
void on_each_proc(void (*func)(struct sspt_proc *, void *), void *data);
struct sspt_proc *sspt_proc_get_by_task(struct task_struct *task);
+struct sspt_proc *sspt_proc_get_by_task_no_lock(struct task_struct *task);
struct sspt_proc *sspt_proc_get_by_task_or_new(struct task_struct *task);
void sspt_proc_free_all(void);
void (*func)(struct sspt_filter *, void *),
void *data);
+void sspt_proc_on_each_ip(struct sspt_proc *proc,
+ void (*func)(struct us_ip *, void *), void *data);
+
bool sspt_proc_is_send_event(struct sspt_proc *proc);
+int sspt_proc_cb_set(struct sspt_proc_cb *cb);
+void sspt_proc_priv_create(struct sspt_proc *proc);
+void sspt_proc_priv_destroy(struct sspt_proc *proc);
+
#endif /* __SSPT_PROC__ */
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/stop_machine.h>
#include "pf/pf_group.h"
#include "sspt/sspt_proc.h"
#include "probes/probe_info_new.h"
static enum status_type status = ST_OFF;
+static int __do_usm_stop(void *data)
+{
+ get_all_procs();
+
+ return 0;
+}
+
static void do_usm_stop(void)
{
- exec_cbs(STOP_CB);
+ int ret;
+ exec_cbs(STOP_CB);
unregister_helper_top();
+
+ ret = stop_machine(__do_usm_stop, NULL, NULL);
+ if (ret)
+ printk("do_usm_stop failed: %d\n", ret);
+
uninstall_all();
unregister_helper_bottom();
sspt_proc_free_all();
+ exec_cbs(STOP_CB_TD);
+
}
static int do_usm_start(void)
{
struct sspt_proc *proc;
- proc = sspt_proc_get_by_task(task);
+ /* FIXME: add read lock (deadlock in sampler) */
+ proc = sspt_proc_get_by_task_no_lock(task);
if (proc)
return sspt_proc_is_send_event(proc);
struct slot_manager *create_sm_us(struct task_struct *task)
{
struct slot_manager *sm = kmalloc(sizeof(*sm), GFP_ATOMIC);
+
+ if (sm == NULL)
+ return NULL;
+
sm->slot_size = UPROBES_TRAMP_LEN;
sm->alloc = sm_alloc_us;
sm->free = sm_free_us;
static ssize_t write_app_info(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
- int ret;
+ int ret = 0;
char *buf, *path, *id;
int n;
}
web_prof_data_set(path, id);
- sprintf(app_info, "%s\n", buf);
+ snprintf(app_info, sizeof(app_info), "%s\n", buf);
free_app_info:
kfree(id);
ssize_t ret;
buf = kmalloc(len + 2, GFP_KERNEL);
- memcpy(buf, name, len);
+ if (buf == NULL)
+ return -ENOMEM;
+ memcpy(buf, name, len);
buf[len] = '\0';
buf[len + 1] = '\n';
switch (fmt[fmt_i]) {
case 'b': /* 1 byte(bool) */
- if (len < 1)
- return -ENOMEM;
*buf = (char)!!get_arg(regs, i);
buf += 1;
len -= 1;
break;
case 'c': /* 1 byte(char) */
- if (len < 1)
- return -ENOMEM;
*buf = (char)get_arg(regs, i);
buf += 1;
len -= 1;
};
-static const char webapp_path[] = "/usr/bin/WebProcess";
+/* TODO: configure this from outside (using debugfs) */
+static const char webapp_path[] = "/usr/bin/wrt_launchpad_daemon";
static const char ewebkit_path[] = "/usr/lib/libewebkit2.so";