*/
/* For all memory allocation/deallocation operations, except buffer memory
- * allocation/deallocation should be used
+ * allocation/deallocation should be used
* memory_allocation(size_t memory_size)
* memory_free(void *ptr)
* defines.
};
/* Pointers array. Points to busy buffers */
-static struct swap_subbuffer **queue_busy = NULL;
+static struct swap_subbuffer **queue_busy;
/* Store last busy element */
static unsigned int queue_busy_last_element;
/* Subbuffers count */
-static unsigned int queue_subbuffer_count = 0;
+static unsigned int queue_subbuffer_count;
/* One subbuffer size */
-static size_t queue_subbuffer_size = 0;
+static size_t queue_subbuffer_size;
/* Busy list sync */
static struct sync_t buffer_busy_sync = {
};
/* Memory pages count in one subbuffer */
-static int pages_order_in_subbuffer = 0;
+static int pages_order_in_subbuffer;
/**
* @brief Allocates memory for swap_subbuffer structures and subbuffers.
sync_init(&buffer_busy_sync);
/* Memory allocation for queue_busy */
- queue_busy = memory_allocation(sizeof(*queue_busy) * queue_subbuffer_count);
+ queue_busy =
+ memory_allocation(sizeof(*queue_busy) * queue_subbuffer_count);
if (!queue_busy) {
result = -E_SB_NO_MEM_QUEUE_BUSY;
/* Memory allocation for swap_subbuffer structures */
/* Allocation for first structure. */
- write_queue.start_ptr = memory_allocation(sizeof(*write_queue.start_ptr));
+ write_queue.start_ptr =
+ memory_allocation(sizeof(*write_queue.start_ptr));
if (!write_queue.start_ptr) {
result = -E_SB_NO_MEM_BUFFER_STRUCT;
write_queue.end_ptr->next_in_queue = NULL;
write_queue.end_ptr->full_buffer_part = 0;
- write_queue.end_ptr->data_buffer = buffer_allocation(queue_subbuffer_size);
+ write_queue.end_ptr->data_buffer =
+ buffer_allocation(queue_subbuffer_size);
if (!write_queue.end_ptr->data_buffer) {
print_err("Cannot allocate memory for buffer 1\n");
result = -E_SB_NO_MEM_DATA_BUFFER;
sync_init(&write_queue.end_ptr->buffer_sync);
/* Buffer initialization */
- memset(buffer_address(write_queue.end_ptr->data_buffer), 0, queue_subbuffer_size);
+ memset(buffer_address(write_queue.end_ptr->data_buffer), 0,
+ queue_subbuffer_size);
/* Allocation for other structures. */
for (i = 1; i < queue_subbuffer_count; i++) {
write_queue.end_ptr->next_in_queue =
- memory_allocation(sizeof(*write_queue.end_ptr->next_in_queue));
+ memory_allocation(
+ sizeof(*write_queue.end_ptr->next_in_queue));
if (!write_queue.end_ptr->next_in_queue) {
result = -E_SB_NO_MEM_BUFFER_STRUCT;
goto buffer_allocation_error_free;
write_queue.end_ptr->next_in_queue = NULL;
write_queue.end_ptr->full_buffer_part = 0;
- write_queue.end_ptr->data_buffer =
+ write_queue.end_ptr->data_buffer =
buffer_allocation(queue_subbuffer_size);
if (!write_queue.end_ptr->data_buffer) {
result = -E_SB_NO_MEM_DATA_BUFFER;
for (j = 0; j < allocated_structs; j++) {
clean_tmp_struct = write_queue.start_ptr;
if (allocated_buffers) {
- buffer_free(clean_tmp_struct->data_buffer, queue_subbuffer_size);
+ buffer_free(clean_tmp_struct->data_buffer,
+ queue_subbuffer_size);
allocated_buffers--;
}
if (write_queue.start_ptr != write_queue.end_ptr)
- write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
+ write_queue.start_ptr =
+ write_queue.start_ptr->next_in_queue;
memory_free(clean_tmp_struct);
}
write_queue.end_ptr = NULL;
{
struct swap_subbuffer *buffer = read_queue.start_ptr;
- /* Check if there are some subbuffers in busy list. If so - return error */
+ /* Check if there are some subbuffers in busy list.
+ * If so - return error */
if (get_busy_buffers_count())
return -E_SB_UNRELEASED_BUFFERS;
/* Lock read sync primitive */
sync_lock(&read_queue.queue_sync);
- /* Set all subbuffers in read list to write list and reinitialize them */
+ /* Set all subbuffers in read list to write list
+ * and reinitialize them */
while (read_queue.start_ptr) {
- /* Lock buffer sync primitive to prevent writing to buffer if it had
- * been selected for writing, but still wasn't wrote. */
+ /* Lock buffer sync primitive to prevent writing to buffer if it
+ * had been selected for writing, but still wasn't wrote. */
sync_lock(&buffer->buffer_sync);
buffer = read_queue.start_ptr;
/* If we reached end of the list */
- if (read_queue.start_ptr == read_queue.end_ptr) {
+ if (read_queue.start_ptr == read_queue.end_ptr)
read_queue.end_ptr = NULL;
- }
+
read_queue.start_ptr = read_queue.start_ptr->next_in_queue;
/* Reinit full buffer part */
static unsigned int is_buffer_enough(struct swap_subbuffer *subbuffer,
size_t size)
{
- /* XXX Think about checking full_buffer_part for correctness
- * (<queue_subbuffer_size). It should be true, but if isn't (due to sources
- * chaning, etc.) this function should be true! */
- return ((queue_subbuffer_size-subbuffer->full_buffer_part) >= size) ? 1 : 0;
+ /* XXX Think about checking full_buffer_part for correctness
+ * (<queue_subbuffer_size). It should be true, but if isn't (due to
+ * sources chaning, etc.) this function should be true! */
+ return ((queue_subbuffer_size-subbuffer->full_buffer_part) >= size) ?
+ 1 : 0;
}
static void next_queue_element(struct queue_t *queue)
int result = 0;
add_to_read_list(subbuffer);
- // TODO Handle ret value
+ /* TODO Handle ret value */
result = swap_buffer_callback(subbuffer);
return result;
* @brief Get first writable subbuffer from write list.
*
* @param size Minimum amount of free space in subbuffer.
- * @param[out] ptr_to_write Pointer to the variable where pointer to the beginning
- * of memory for writing should be stored.
+ * @param[out] ptr_to_write Pointer to the variable where pointer to the
+ * beginning of memory for writing should be stored.
* @return Found swap_subbuffer.
*/
struct swap_subbuffer *get_from_write_list(size_t size, void **ptr_to_write)
{
struct swap_subbuffer *result = NULL;
- /* Callbacks are called at the end of the function to prevent deadlocks */
+ /* Callbacks are called at the end of the function
+ * to prevent deadlocks */
struct queue_t callback_queue = {
.start_ptr = NULL,
.end_ptr = NULL,
if (is_buffer_enough(write_queue.start_ptr, size)) {
result = write_queue.start_ptr;
- *ptr_to_write = (void *)((unsigned long)
- (buffer_address(result->data_buffer)) +
- result->full_buffer_part);
+ *ptr_to_write =
+ (void *)((unsigned long)
+ (buffer_address(result->data_buffer)) +
+ result->full_buffer_part);
- /* Add data size to full_buffer_part. Very important to do it in
+ /* Add data size to full_buffer_part.
+ * Very important to do it in
* write_queue.queue_sync spinlock */
write_queue.start_ptr->full_buffer_part += size;
- /* Lock rw sync. Should be unlocked in swap_buffer_write() */
+ /* Lock rw sync.
+ * Should be unlocked in swap_buffer_write() */
sync_lock_no_flags(&result->buffer_sync);
break;
/* This subbuffer is not enough => it goes to read list */
callback_queue.end_ptr = NULL;
tmp_buffer = callback_queue.start_ptr;
- callback_queue.start_ptr = callback_queue.start_ptr->next_in_queue;
+ callback_queue.start_ptr =
+ callback_queue.start_ptr->next_in_queue;
add_to_read_list_with_callback(tmp_buffer);
}
*/
int remove_from_busy_list(struct swap_subbuffer *subbuffer)
{
- int result = -E_SB_NO_SUBBUFFER_IN_BUSY; // For sanitization
+ int result = -E_SB_NO_SUBBUFFER_IN_BUSY; /* For sanitization */
int i;
/* Lock busy list sync primitive */
while (write_queue.start_ptr &&
write_queue.start_ptr->full_buffer_part) {
- /* Lock buffer sync primitive to prevent writing to buffer if it had
- * been selected for writing, but still wasn't wrote. */
+ /* Lock buffer sync primitive to prevent writing to buffer if it
+ * had been selected for writing, but still wasn't wrote. */
sync_lock(&buffer->buffer_sync);
buffer = write_queue.start_ptr;
*/
int get_pages_count_in_subbuffer(void)
{
-/* Return 1 if pages order 0, or 2 of power pages_order_in_subbuffer otherwise */
- return (pages_order_in_subbuffer) ? 2 << (pages_order_in_subbuffer - 1) : 1;
+/* Return 1 if pages order 0,
+ * or 2 of power pages_order_in_subbuffer otherwise */
+ return (pages_order_in_subbuffer) ?
+ 2 << (pages_order_in_subbuffer - 1) : 1;
}
#include "buffer_description.h"
-int buffer_queue_allocation(size_t subbuffer_size, unsigned int subbuffers_count);
+int buffer_queue_allocation(size_t subbuffer_size,
+ unsigned int subbuffers_count);
void buffer_queue_free(void);
int buffer_queue_reset(void);
void buffer_queue_flush(void);
typedef int(*subbuffer_callback_type)(void);
/* Callback that is called when full subbuffer appears */
-static subbuffer_callback_type subbuffer_callback = NULL;
+static subbuffer_callback_type subbuffer_callback;
/* One subbuffer size */
-static size_t subbuffers_size = 0;
+static size_t subbuffers_size;
/* Subbuffers count */
-static unsigned int subbuffers_num = 0;
+static unsigned int subbuffers_num;
-static unsigned int enough_writable_bufs = 0;
-static unsigned int min_writable_bufs = 0;
-static int (*low_mem_cb)(void) = NULL;
-static int (*enough_mem_cb)(void) = NULL;
+static unsigned int enough_writable_bufs;
+static unsigned int min_writable_bufs;
+static int (*low_mem_cb)(void);
+static int (*enough_mem_cb)(void);
-static inline int areas_overlap(const void *area1,const void *area2, size_t size)
+static inline int areas_overlap(const void *area1,
+ const void *area2,
+ size_t size)
{
int i;
swap_buffer_status &= ~BUFFER_WORK;
print_debug("status buffer stop = %d\n", swap_buffer_status);
- if ((buf_init->top_threshold > 100) || (buf_init->lower_threshold > 100) ||
+ if ((buf_init->top_threshold > 100) ||
+ (buf_init->lower_threshold > 100) ||
(buf_init->top_threshold < buf_init->lower_threshold))
return -E_SB_WRONG_THRESHOLD;
}
/* Copy data to buffer */
- /* XXX Think of using memmove instead - useless, anyway overlapping means
- * that something went wrong. */
+ /* XXX Think of using memmove instead - useless, anyway overlapping
+ * means that something went wrong. */
memcpy(ptr_to_write, data, size);
result = size;
{
int result;
- if (!subbuffer_callback) {
+ if (!subbuffer_callback)
return -E_SB_NO_CALLBACK;
- }
result = subbuffer_callback();
if (result < 0)
int swap_buffer_init(struct buffer_init_t *buf_init);
/* SWAP Buffer uninitialization function. Call it every time before removing
- * this module.
+ * this module.
* Returns E_SB_SUCCESS (0) on success, otherwise error code. */
int swap_buffer_uninit(void);
/* SWAP Buffer write function. Pass it size of the data and pointer to the data.
- * On success returns number of bytes written (>=0) or error code (<0) otherwise */
-ssize_t swap_buffer_write(void* data, size_t size);
+ * On success returns number of bytes written (>=0) or error code (<0)
+ * otherwise */
+ssize_t swap_buffer_write(void *data, size_t size);
-/* SWAP Buffer get. Put subbuffer pointer to the variable *subbuffer.
+/* SWAP Buffer get. Put subbuffer pointer to the variable *subbuffer.
* Return pages count in subbuffer. */
int swap_buffer_get(struct swap_subbuffer **subbuffer);
/* swap_device driver routines */
static ssize_t swap_device_read(struct file *filp, char __user *buf,
- size_t count, loff_t *f_pos);
+ size_t count, loff_t *f_pos);
static long swap_device_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg);
+ unsigned long arg);
static ssize_t swap_device_splice_read(struct file *filp, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t len,
- unsigned int flags);
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags);
/**
* @var swap_device_fops
typedef int(*splice_grow_spd_p_t)(const struct pipe_inode_info *pipe,
struct splice_pipe_desc *spd);
-static splice_to_pipe_p_t splice_to_pipe_p = NULL;
-static splice_grow_spd_p_t splice_grow_spd_p = NULL;
+static splice_to_pipe_p_t splice_to_pipe_p;
+static splice_grow_spd_p_t splice_grow_spd_p;
-static msg_handler_t msg_handler = NULL;
+static msg_handler_t msg_handler;
/* Device numbers */
-static dev_t swap_device_no = 0;
+static dev_t swap_device_no;
/* Device cdev struct */
-static struct cdev *swap_device_cdev = NULL;
+static struct cdev *swap_device_cdev;
/* Device class struct */
-static struct class *swap_device_class = NULL;
+static struct class *swap_device_class;
/* Device device struct */
-static struct device *swap_device_device = NULL;
+static struct device *swap_device_device;
/* Reading tasks queue */
static DECLARE_WAIT_QUEUE_HEAD(swap_device_wait);
* @return Void.
*/
void swap_device_splice_shrink_spd(struct pipe_inode_info *pipe,
- struct splice_pipe_desc *spd)
+ struct splice_pipe_desc *spd)
{
if (pipe->buffers <= PIPE_DEF_BUFFERS)
return;
*
* @return 0 on success, negative error code otherwise.
*/
- int swap_device_init(void)
+int swap_device_init(void)
{
int result;
goto init_fail;
}
- /* Creating device class. Using IS_ERR, because class_create returns ERR_PTR
- * on error. */
+ /* Creating device class. Using IS_ERR, because class_create
+ * returns ERR_PTR on error. */
swap_device_class = class_create(THIS_MODULE, SWAP_DEVICE_NAME);
if (IS_ERR(swap_device_class)) {
print_crit("Class creation has failed\n");
}
/* Create device struct */
- swap_device_device = device_create(swap_device_class, NULL, swap_device_no,
- "%s", SWAP_DEVICE_NAME);
+ swap_device_device = device_create(swap_device_class, NULL,
+ swap_device_no,
+ "%s", SWAP_DEVICE_NAME);
if (IS_ERR(swap_device_device)) {
print_crit("Device struct creating has failed\n");
result = -E_SD_DEVICE_CREATE_FAIL;
return 0;
init_fail:
- if (swap_device_cdev) {
+ if (swap_device_cdev)
cdev_del(swap_device_cdev);
- }
- if (swap_device_class) {
+ if (swap_device_class)
class_destroy(swap_device_class);
- }
- if (swap_device_no) {
+ if (swap_device_no)
unregister_chrdev_region(swap_device_no, 1);
- }
return result;
}
}
static ssize_t swap_device_read(struct file *filp, char __user *buf,
- size_t count, loff_t *f_pos)
+ size_t count, loff_t *f_pos)
{
/* Wait queue item that consists current task. It is used to be added in
* swap_device_wait queue if there is no data to be read. */
DEFINE_WAIT(wait);
int result;
- //TODO : Think about spin_locks to prevent reading race condition.
- while((result = driver_to_buffer_next_buffer_to_read()) != E_SD_SUCCESS) {
+ /* TODO : Think about spin_locks to prevent reading race condition. */
+ while ((result =
+ driver_to_buffer_next_buffer_to_read()) != E_SD_SUCCESS) {
- /* Add process to the swap_device_wait queue and set the current task
- * state TASK_INTERRUPTIBLE. If there is any data to be read, then the
- * current task is removed from the swap_device_wait queue and its state
- * is changed to this. */
+ /* Add process to the swap_device_wait queue and set the current
+ * task state TASK_INTERRUPTIBLE. If there is any data to be
+ * read, then the current task is removed from the
+ * swap_device_wait queue and its state is changed to this. */
prepare_to_wait(&swap_device_wait, &wait, TASK_INTERRUPTIBLE);
if (result < 0) {
result = 0;
goto swap_device_read_error;
} else if (result == E_SD_NO_DATA_TO_READ) {
- /* Yes, E_SD_NO_DATA_TO_READ should be positive, cause it's not
- * really an error */
+ /* Yes, E_SD_NO_DATA_TO_READ should be positive,
+ * cause it's not really an error */
if (filp->f_flags & O_NONBLOCK) {
result = -EAGAIN;
goto swap_device_read_error;
{
int result;
- switch(cmd) {
- case SWAP_DRIVER_BUFFER_INITIALIZE:
- {
- struct buffer_initialize initialize_struct;
-
- result = copy_from_user(&initialize_struct, (void*)arg,
- sizeof(struct buffer_initialize));
- if (result) {
- break;
- }
-
- if (initialize_struct.size > MAXIMUM_SUBBUFFER_SIZE) {
- print_err("Wrong subbuffer size\n");
- result = -E_SD_WRONG_ARGS;
- break;
- }
+ switch (cmd) {
+ case SWAP_DRIVER_BUFFER_INITIALIZE:
+ {
+ struct buffer_initialize initialize_struct;
- result = driver_to_buffer_initialize(initialize_struct.size,
- initialize_struct.count);
- if (result < 0) {
- print_err("Buffer initialization failed %d\n", result);
- break;
- }
- result = E_SD_SUCCESS;
-
- break;
- }
- case SWAP_DRIVER_BUFFER_UNINITIALIZE:
- {
- result = driver_to_buffer_uninitialize();
- if (result < 0)
- print_err("Buffer uninitialization failed %d\n", result);
+ result = copy_from_user(&initialize_struct, (void *)arg,
+ sizeof(struct buffer_initialize));
+ if (result)
break;
- }
- case SWAP_DRIVER_NEXT_BUFFER_TO_READ:
- {
- /* Use this carefully */
- result = driver_to_buffer_next_buffer_to_read();
- if (result == E_SD_NO_DATA_TO_READ) {
- /* TODO Do what we usually do when there are no subbuffers to
- * read (make daemon sleep ?) */
- }
- break;
- }
- case SWAP_DRIVER_FLUSH_BUFFER:
- {
- result = driver_to_buffer_flush();
+
+ if (initialize_struct.size > MAXIMUM_SUBBUFFER_SIZE) {
+ print_err("Wrong subbuffer size\n");
+ result = -E_SD_WRONG_ARGS;
break;
}
- case SWAP_DRIVER_MSG:
- {
- if (msg_handler) {
- result = msg_handler((void __user *)arg);
- } else {
- print_warn("msg_handler() is not register\n");
- result = -EINVAL;
- }
+
+ result = driver_to_buffer_initialize(initialize_struct.size,
+ initialize_struct.count);
+ if (result < 0) {
+ print_err("Buffer initialization failed %d\n", result);
break;
}
- case SWAP_DRIVER_WAKE_UP:
- {
- swap_device_wake_up_process();
- result = E_SD_SUCCESS;
- break;
+ result = E_SD_SUCCESS;
+
+ break;
+ }
+ case SWAP_DRIVER_BUFFER_UNINITIALIZE:
+ {
+ result = driver_to_buffer_uninitialize();
+ if (result < 0)
+ print_err("Buffer uninitialization failed %d\n",
+ result);
+ break;
+ }
+ case SWAP_DRIVER_NEXT_BUFFER_TO_READ:
+ {
+ /* Use this carefully */
+ result = driver_to_buffer_next_buffer_to_read();
+ if (result == E_SD_NO_DATA_TO_READ) {
+ /* TODO Do what we usually do when there are no
+ * subbuffers to read (make daemon sleep ?) */
}
- default:
- print_warn("Unknown command %d\n", cmd);
+ break;
+ }
+ case SWAP_DRIVER_FLUSH_BUFFER:
+ {
+ result = driver_to_buffer_flush();
+ break;
+ }
+ case SWAP_DRIVER_MSG:
+ {
+ if (msg_handler) {
+ result = msg_handler((void __user *)arg);
+ } else {
+ print_warn("msg_handler() is not register\n");
result = -EINVAL;
- break;
+ }
+ break;
+ }
+ case SWAP_DRIVER_WAKE_UP:
+ {
+ swap_device_wake_up_process();
+ result = E_SD_SUCCESS;
+ break;
+ }
+ default:
+ print_warn("Unknown command %d\n", cmd);
+ result = -EINVAL;
+ break;
}
return result;
}
static void swap_device_pipe_buf_release(struct pipe_inode_info *inode,
- struct pipe_buffer *pipe)
+ struct pipe_buffer *pipe)
{
__free_page(pipe->page);
}
static void swap_device_page_release(struct splice_pipe_desc *spd,
- unsigned int i)
+ unsigned int i)
{
__free_page(spd->pages[i]);
}
};
static ssize_t swap_device_splice_read(struct file *filp, loff_t *ppos,
- struct pipe_inode_info *pipe,
- size_t len, unsigned int flags)
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags)
{
/* Wait queue item that consists current task. It is used to be added in
* swap_device_wait queue if there is no data to be read. */
};
/* Get next buffer to read */
- //TODO : Think about spin_locks to prevent reading race condition.
- while((result = driver_to_buffer_next_buffer_to_read()) != E_SD_SUCCESS) {
-
- /* Add process to the swap_device_wait queue and set the current task
- * state TASK_INTERRUPTIBLE. If there is any data to be read, then the
- * current task is removed from the swap_device_wait queue and its state
- * is changed. */
+ /* TODO : Think about spin_locks to prevent reading race condition.*/
+ while ((result =
+ driver_to_buffer_next_buffer_to_read()) != E_SD_SUCCESS) {
+
+ /* Add process to the swap_device_wait queue and set the current
+ * task state TASK_INTERRUPTIBLE. If there is any data to be
+ * read, then the current task is removed from the
+ * swap_device_wait queue and its state is changed. */
prepare_to_wait(&swap_device_wait, &wait, TASK_INTERRUPTIBLE);
if (result < 0) {
- print_err("driver_to_buffer_next_buffer_to_read error %d\n", result);
- //TODO Error return to OS
+ print_err("driver_to_buffer_next_buffer_to_read error "
+ "%d\n", result);
+ /* TODO Error return to OS */
result = 0;
goto swap_device_splice_read_error;
} else if (result == E_SD_NO_DATA_TO_READ) {
/** Prints debug message.*/
#define print_debug(msg, args...) \
- printk(KERN_DEBUG "SWAP_DRIVER DEBUG : " msg, ##args)
+ printk(KERN_DEBUG "SWAP_DRIVER DEBUG : " msg, ##args)
/** Prints info message.*/
#define print_msg(msg, args...) \
- printk(KERN_INFO "SWAP_DRIVER : " msg, ##args)
+ printk(KERN_INFO "SWAP_DRIVER : " msg, ##args)
/** Prints warning message.*/
#define print_warn(msg, args...) \
- printk(KERN_WARNING "SWAP_DRIVER WARNING : " msg, ##args)
+ printk(KERN_WARNING "SWAP_DRIVER WARNING : " msg, ##args)
/** Prints error message.*/
#define print_err(msg, args...) \
- printk(KERN_ERR "SWAP_DRIVER ERROR : " msg, ##args)
+ printk(KERN_ERR "SWAP_DRIVER ERROR : " msg, ##args)
/** Prints critical error message.*/
#define print_crit(msg, args...) \
- printk(KERN_CRIT "SWAP_DRIVER CRITICAL : " msg, ##args)
+ printk(KERN_CRIT "SWAP_DRIVER CRITICAL : " msg, ##args)
#endif /* __SWAP_DRIVER_DEVICE_DEFS_H__ */
#include "app_manage.h"
/* Current busy buffer */
-static struct swap_subbuffer *busy_buffer = NULL;
+static struct swap_subbuffer *busy_buffer;
/* Buffers count ready to be read */
-static int buffers_to_read = 0;
+static int buffers_to_read;
/* Pages count in one subbuffer */
-static int pages_per_buffer = 0;
+static int pages_per_buffer;
/* Used to sync changes of the buffers_to_read var */
static spinlock_t buf_to_read;
size_t bytes_to_read = 0;
int page_counter = 0;
- /* Reading from swap_device means reading only current busy_buffer. So, if
- * there is no busy_buffer, we don't get next to read, we just read nothing.
- * In this case, or if there is nothing to read from busy_buffer - return
- * -E_SD_NO_DATA_TO_READ. It should be correctly handled in device_driver */
+ /* Reading from swap_device means reading only current busy_buffer.
+ * So, if there is no busy_buffer, we don't get next to read, we just
+ * read nothing. In this case, or if there is nothing to read from
+ * busy_buffer - return -E_SD_NO_DATA_TO_READ. It should be correctly
+ * handled in device_driver */
if (!busy_buffer || !busy_buffer->full_buffer_part)
return -E_SD_NO_DATA_TO_READ;
busy_buffer->full_buffer_part : count;
/* Copy data from each page to buffer */
- while(bytes_to_copy > 0) {
+ while (bytes_to_copy > 0) {
/* Get size that should be copied from current page */
- size_t read_from_this_page = (bytes_to_copy > PAGE_SIZE) ? PAGE_SIZE
- : bytes_to_copy;
+ size_t read_from_this_page =
+ (bytes_to_copy > PAGE_SIZE) ? PAGE_SIZE
+ : bytes_to_copy;
/* Copy and add size to copied bytes count */
- // TODO Check with more than one page
+ /* TODO Check with more than one page */
bytes_to_read += read_from_this_page -
- copy_to_user(buf, page_address(busy_buffer->data_buffer) +
- (sizeof(struct page*) *
- page_counter),
- read_from_this_page);
+ copy_to_user(
+ buf, page_address(busy_buffer->data_buffer) +
+ (sizeof(struct page *) *
+ page_counter),
+ read_from_this_page);
bytes_to_copy -= read_from_this_page;
page_counter++;
}
struct partial_page *partial = spd->partial;
while (data_to_splice) {
- size_t read_from_current_page = min(data_to_splice, (size_t)PAGE_SIZE);
+ size_t read_from_current_page = min(data_to_splice,
+ (size_t)PAGE_SIZE);
pages[spd->nr_pages] = alloc_page(GFP_KERNEL);
if (!pages[spd->nr_pages]) {
/* TODO: add check for pipe->buffers exceeding */
/* if (spd->nr_pages == pipe->buffers) { */
- /* break; */
+ /* break; */
/* } */
}
return 0;
.enough_mem_cb = app_manage_cont_apps,
};
- if (size == 0 && count == 0) {
+ if (size == 0 && count == 0)
return -E_SD_WRONG_ARGS;
- }
result = swap_buffer_init(&buf_init);
if (result == -E_SB_NO_MEM_QUEUE_BUSY
return -E_SD_NO_MEMORY;
}
- // TODO Race condition: buffer can be used in other thread till we're in
- // this func
+ /* TODO Race condition: buffer can be used in other thread till */
+ /* we're in this func */
/* Initialize driver_to_buffer variables */
pages_per_buffer = result;
busy_buffer = NULL;
/* Release occupied buffer */
if (busy_buffer) {
result = driver_to_buffer_release();
- // TODO Maybe release anyway
- if (result < 0) {
+ /* TODO Maybe release anyway */
+ if (result < 0)
return result;
- }
busy_buffer = NULL;
}
/* If there is no buffers to read, return E_SD_NO_DATA_TO_READ.
* SHOULD BE POSITIVE, cause there is no real error. */
- if (!something_to_read()) {
+ if (!something_to_read())
return E_SD_NO_DATA_TO_READ;
- }
/* Get next buffer to read */
result = driver_to_buffer_get();
static const char cn_swap_name[] = "cn_swap";
/* Send messages counter */
-static u32 msg_counter = 0;
+static u32 msg_counter;
/**
* @brief Sends message to userspace via netlink.
{
int res;
- res = cn_add_callback(&cn_swap_id, cn_swap_name, us_interaction_recv_msg);
+ res = cn_add_callback(&cn_swap_id,
+ cn_swap_name,
+ us_interaction_recv_msg);
if (res)
return -E_SD_NL_INIT_ERR;
#define CN_SWAP_IDX 0x22 /**< Should be unique throughout the system */
#define CN_SWAP_VAL 0x1 /**< Just the same in kernel and user */
-#define CN_DAEMON_GROUP 0x1 /**< Listener group. Connector works a bit faster
- * when using one */
+#define CN_DAEMON_GROUP 0x1 /**< Listener group. Connector works a bit
+ * faster when using one */
/**
* @enum us_interaction_k2u_msg_t
* === INIT/EXIT ===
* ============================================================================
*/
-static struct dentry *energy_dir = NULL;
+static struct dentry *energy_dir;
/**
* @brief Destroy debugfs for LCD
#define SWAP_DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
static int __fops ## _open(struct inode *inode, struct file *file) \
{ \
- int ret; \
+ int ret; \
\
ret = swap_init_simple_open(inode, file); \
if (ret) \
struct tm_stat *tm = &ct->tm[cpu];
if (unlikely(tm_stat_timestamp(tm))) /* should never happen */
- printk("XXX %s[%d/%d]: WARNING tmstamp(%p) set on cpu(%d)\n",
+ printk(KERN_INFO "XXX %s[%d/%d]: WARNING tmstamp(%p) set on cpu(%d)\n",
current->comm, current->tgid, current->pid, tm, cpu);
tm_stat_set_timestamp(&ct->tm[cpu], time);
}
struct tm_stat *tm = &ct->tm[cpu];
if (unlikely(tm_stat_timestamp(tm) == 0)) {
- /* not initialized. should happen only once per cpu/task */
- printk("XXX %s[%d/%d]: nnitializing tmstamp(%p) on cpu(%d)\n",
+ /* not initialized. should happen only once per cpu/task */
+ printk(KERN_INFO "XXX %s[%d/%d]: nnitializing tmstamp(%p) "
+ "on cpu(%d)\n",
current->comm, current->tgid, current->pid, tm, cpu);
tm_stat_set_timestamp(tm, start_time);
}
* = __switch_to =
* ============================================================================
*/
-static int entry_handler_switch(struct kretprobe_instance *ri, struct pt_regs *regs)
+static int entry_handler_switch(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
{
int cpu;
struct cpus_time *ct;
cpu = smp_processor_id();
- ct = current->tgid ? &ed_system.ct: &ct_idle;
+ ct = current->tgid ? &ed_system.ct : &ct_idle;
cpus_time_lock(ct, flags);
cpus_time_update_running(ct, cpu, get_ntime(), start_time);
cpus_time_unlock(ct, flags);
return 0;
}
-static int ret_handler_switch(struct kretprobe_instance *ri, struct pt_regs *regs)
+static int ret_handler_switch(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
{
int cpu;
struct cpus_time *ct;
cpu = smp_processor_id();
- ct = current->tgid ? &ed_system.ct: &ct_idle;
+ ct = current->tgid ? &ed_system.ct : &ct_idle;
cpus_time_lock(ct, flags);
cpus_time_save_entry(ct, cpu, get_ntime());
cpus_time_unlock(ct, flags);
int fd;
};
-static int entry_handler_sys_read(struct kretprobe_instance *ri, struct pt_regs *regs)
+static int entry_handler_sys_read(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
{
struct sys_read_data *srd = (struct sys_read_data *)ri->data;
* = sys_write =
* ============================================================================
*/
-static int entry_handler_sys_write(struct kretprobe_instance *ri, struct pt_regs *regs)
+static int entry_handler_sys_write(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
{
struct sys_read_data *srd = (struct sys_read_data *)ri->data;
return 0;
}
-static int ret_handler_sys_write(struct kretprobe_instance *ri, struct pt_regs *regs)
+static int ret_handler_sys_write(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
{
int ret = regs_return_value(regs);
ret = swap_register_kretprobe(&sys_read_krp);
if (ret) {
- printk("swap_register_kretprobe(sys_read) result=%d!\n", ret);
+ printk(KERN_INFO "swap_register_kretprobe(sys_read) "
+ "result=%d!\n", ret);
return ret;
}
ret = swap_register_kretprobe(&sys_write_krp);
if (ret != 0) {
- printk("swap_register_kretprobe(sys_write) result=%d!\n", ret);
+ printk(KERN_INFO "swap_register_kretprobe(sys_write) "
+ "result=%d!\n", ret);
goto unregister_sys_read;
}
ret = swap_register_kretprobe(&switch_to_krp);
if (ret) {
- printk("swap_register_kretprobe(__switch_to) result=%d!\n",
+ printk(KERN_INFO "swap_register_kretprobe(__switch_to) "
+ "result=%d!\n",
ret);
goto unregister_sys_write;
}
}
static DEFINE_MUTEX(mutex_enable);
-static int energy_enable = 0;
+static int energy_enable;
/**
* @brief Start measuring the energy consumption
mutex_lock(&mutex_enable);
if (energy_enable) {
- printk("energy profiling is already run!\n");
+ printk(KERN_INFO "energy profiling is already run!\n");
goto unlock;
}
mutex_lock(&mutex_enable);
if (energy_enable == 0) {
- printk("energy profiling is not running!\n");
+ printk(KERN_INFO "energy profiling is not running!\n");
ret = -EINVAL;
goto unlock;
}
return 0;
not_found:
- printk("ERROR: symbol '%s' not found\n", sym);
+ printk(KERN_INFO "ERROR: symbol '%s' not found\n", sym);
return -ESRCH;
}
ret = init_feature();
if (ret) {
- printk("Cannot init feature\n");
+ printk(KERN_INFO "Cannot init feature\n");
return ret;
}
ret = lcd_init();
if (ret)
- printk("Cannot init LCD, ret=%d\n", ret);
+ printk(KERN_INFO "Cannot init LCD, ret=%d\n", ret);
return 0;
}
f = filp_open(path, O_RDONLY, 0);
if (IS_ERR(f)) {
- printk("cannot open file \'%s\'", path);
+ printk(KERN_INFO "cannot open file \'%s\'", path);
return PTR_ERR(f);
}
struct lcd_priv_data *lcd;
if (tms_brt_cnt <= 0) {
- printk("error variable tms_brt_cnt=%d\n", tms_brt_cnt);
+ printk(KERN_INFO "error variable tms_brt_cnt=%d\n",
+ tms_brt_cnt);
return NULL;
}
lcd = kmalloc(sizeof(*lcd) + sizeof(*lcd->tms_brt) * tms_brt_cnt,
GFP_KERNEL);
if (lcd == NULL) {
- printk("error: %s - out of memory\n", __func__);
+ printk(KERN_INFO "error: %s - out of memory\n", __func__);
return NULL;
}
static int get_brt_num_of_array(struct lcd_priv_data *lcd, int brt)
{
if (brt > lcd->max_brt || brt < lcd->min_brt) {
- printk("LCD energy error: set brightness=%d, "
+ printk(KERN_INFO "LCD energy error: set brightness=%d, "
"when brightness[%d..%d]\n",
brt, lcd->min_brt, lcd->max_brt);
brt = brt > lcd->max_brt ? lcd->max_brt : lcd->min_brt;
set_power_off(lcd);
break;
default:
- printk("LCD energy error: set power=%d\n", val);
+ printk(KERN_INFO "LCD energy error: set power=%d\n", val);
break;
}
set_power(ops, (int)data);
break;
default:
- printk("LCD energy error: action=%d\n", action);
+ printk(KERN_INFO "LCD energy error: action=%d\n", action);
return -EINVAL;
}
for (i = 0; i < lcd_ops_cnt; ++i) {
ops = lcd_ops[i]();
if (ops == NULL) {
- printk("error %s [ops == NULL]\n", __func__);
+ printk(KERN_INFO "error %s [ops == NULL]\n", __func__);
continue;
}
if (0 == ops->check(ops)) {
- printk("error checking %s\n", ops->name);
+ printk(KERN_INFO "error checking %s\n", ops->name);
continue;
}
ret = register_lcd(ops);
if (ret) {
- printk("error register_lcd %s\n", ops->name);
+ printk(KERN_INFO "error register_lcd %s\n", ops->name);
continue;
}
energy_dir = get_energy_dir();
if (energy_dir == NULL) {
- printk("Cannot energy_dir\n");
+ printk(KERN_INFO "Cannot energy_dir\n");
return -ENOENT;
}
ret = do_lcd_init();
if (ret) {
- printk("LCD is not supported\n");
+ printk(KERN_INFO "LCD is not supported\n");
exit_lcd_debugfs();
}
if (stat_lcd_ops[i] & SLO_REGISTER) {
ret = ops->set(ops);
if (ret) {
- printk("error %s set LCD energy", ops->name);
+ printk(KERN_INFO "error %s set LCD energy",
+ ops->name);
continue;
}
if (stat_lcd_ops[i] & SLO_SET) {
ret = ops->unset(ops);
if (ret)
- printk("error %s unset LCD energy", ops->name);
+ printk(KERN_INFO "error %s unset LCD energy",
+ ops->name);
clean_brightness(ops);
stat_lcd_ops[i] &= ~SLO_SET;
SWAP_DEFINE_SIMPLE_ATTRIBUTE(fops_get_system, get_system, NULL, "%llu\n");
-static struct dentry *lcd_dir = NULL;
+static struct dentry *lcd_dir;
/**
* @brief Register LCD in debugfs
-static const char path_backlight[] = "/sys/class/backlight/emulator/brightness";
-static const char path_backlight_min[] = "/sys/class/backlight/emulator/min_brightness";
-static const char path_backlight_max[] = "/sys/class/backlight/emulator/max_brightness";
-static const char path_power[] = "/sys/class/lcd/emulator/lcd_power";
-
-static const char *all_path[] = {
+static const char path_backlight[] =
+ "/sys/class/backlight/emulator/brightness";
+static const char path_backlight_min[] =
+ "/sys/class/backlight/emulator/min_brightness";
+static const char path_backlight_max[] =
+ "/sys/class/backlight/emulator/max_brightness";
+static const char path_power[] =
+ "/sys/class/lcd/emulator/lcd_power";
+
+static const char * const all_path[] = {
path_backlight,
path_backlight_min,
path_backlight_max,
#include "lcd_base.h"
-static const char path_backlight[] = "/sys/class/backlight/s6e8aa0-bl/brightness";
-static const char path_backlight_min[] = "/sys/class/backlight/s6e8aa0-bl/min_brightness";
-static const char path_backlight_max[] = "/sys/class/backlight/s6e8aa0-bl/max_brightness";
-static const char path_power[] = "/sys/class/lcd/s6e8aa0/lcd_power";
-
-static const char *all_path[] = {
+static const char path_backlight[] =
+ "/sys/class/backlight/s6e8aa0-bl/brightness";
+static const char path_backlight_min[] =
+ "/sys/class/backlight/s6e8aa0-bl/min_brightness";
+static const char path_backlight_max[] =
+ "/sys/class/backlight/s6e8aa0-bl/max_brightness";
+static const char path_power[] =
+ "/sys/class/lcd/s6e8aa0/lcd_power";
+
+static const char * const all_path[] = {
path_backlight,
path_backlight_min,
path_backlight_max,
#include "lcd_base.h"
-static const char path_backlight[] = "/sys/class/backlight/s6e8aa0-bl/brightness";
-static const char path_backlight_max[] = "/sys/class/backlight/s6e8aa0-bl/max_brightness";
-static const char path_power[] = "/sys/class/lcd/s6e8aa0/lcd_power";
-
-static const char *all_path[] = {
+static const char path_backlight[] =
+ "/sys/class/backlight/s6e8aa0-bl/brightness";
+static const char path_backlight_max[] =
+ "/sys/class/backlight/s6e8aa0-bl/max_brightness";
+static const char path_power[] =
+ "/sys/class/lcd/s6e8aa0/lcd_power";
+
+static const char * const all_path[] = {
path_backlight,
path_backlight_max,
path_power
static inline u64 tm_stat_current_running(struct tm_stat *tm, u64 now)
{
if (unlikely(now < tm->timestamp))
- printk("XXX %p WARNING now(%llu) < tmstmp(%llu)\n",
+ printk(KERN_INFO "XXX %p WARNING now(%llu) < tmstmp(%llu)\n",
tm, now, tm->timestamp);
- return tm->timestamp ? tm->running + now - tm->timestamp: tm->running;
+ return tm->timestamp ? tm->running + now - tm->timestamp : tm->running;
}
#endif /* _TM_STAT_H */
#include <ksyms/ksyms.h>
-static struct mm_struct *swap_init_mm = NULL;
-static int (*swap_set_memory_ro)(unsigned long addr, int numpages) = NULL;
-static int (*swap_set_memory_rw)(unsigned long addr, int numpages) = NULL;
+static struct mm_struct *swap_init_mm;
+static int (*swap_set_memory_ro)(unsigned long addr, int numpages);
+static int (*swap_set_memory_rw)(unsigned long addr, int numpages);
static int get_pte_cb(pte_t *ptep, pgtable_t token,
*maddr = val;
swap_set_memory_ro(page_addr, 1);
} else {
- printk("RWX: failed to write memory %08lx (%08lx)\n",
+ printk(KERN_INFO "RWX: failed to write memory %08lx (%08lx)\n",
addr, val);
}
spin_unlock_irqrestore(&mem_lock, flags);
return 0;
not_found:
- printk("ERROR: symbol '%s' not found\n", sym);
+ printk(KERN_INFO "ERROR: symbol '%s' not found\n", sym);
return -ESRCH;
}
if (uregs & 0x10) {
int reg_mask = 0x1;
- //search in reg list
+ /* search in reg list */
for (i = 0; i < 13; i++, reg_mask <<= 1) {
if (!(insn & reg_mask))
break;
}
if (i == 13) {
- DBPRINTF ("there are no free register %x in insn %lx!", uregs, insn);
+ DBPRINTF("there are no free register %x in insn %lx!",
+ uregs, insn);
return -EINVAL;
}
- DBPRINTF ("prep_pc_dep_insn_execbuf: using R%d, changing regs %x", i, uregs);
+ DBPRINTF("prep_pc_dep_insn_execbuf: using R%d, changing regs %x",
+ i, uregs);
- // set register to save
+ /* set register to save */
ARM_INSN_REG_SET_RD(insns[0], i);
- // set register to load address to
+ /* set register to load address to */
ARM_INSN_REG_SET_RD(insns[1], i);
- // set instruction to execute and patch it
+ /* set instruction to execute and patch it */
if (uregs & 0x10) {
ARM_INSN_REG_CLEAR_MR(insn, 15);
ARM_INSN_REG_SET_MR(insn, i);
}
insns[UPROBES_TRAMP_INSN_IDX] = insn;
- // set register to restore
+ /* set register to restore */
ARM_INSN_REG_SET_RD(insns[3], i);
return 0;
int ret, uregs, pc_dep;
if (addr & 0x03) {
- printk("Error in %s at %d: attempt to register uprobe "
+ printk(KERN_INFO "Error in %s at %d: attempt to register uprobe "
"at an unaligned address\n", __FILE__, __LINE__);
return -EINVAL;
}
/* register list */
} else if (ARM_INSN_MATCH(SM, insn)) {
uregs = 0x10;
- if (ARM_INSN_REG_MR(insn, 15)) {
+ if (ARM_INSN_REG_MR(insn, 15))
pc_dep = 1;
- }
}
/* check instructions that can write result to SP and uses PC */
if (pc_dep && (ARM_INSN_REG_RD(insn) == 13)) {
- printk("Error in %s at %d: instruction check failed (arm)\n",
+ printk(KERN_INFO "Error in %s at %d: instruction check failed (arm)\n",
__FILE__, __LINE__);
return -EFAULT;
}
if (unlikely(uregs && pc_dep)) {
memcpy(tramp, pc_dep_insn_execbuf, KPROBES_TRAMP_LEN);
if (prep_pc_dep_insn_execbuf(tramp, insn, uregs) != 0) {
- printk("Error in %s at %d: failed "
+ printk(KERN_INFO "Error in %s at %d: failed "
"to prepare exec buffer for insn %lx!",
__FILE__, __LINE__, insn);
return -EINVAL;
* @param kcb Pointer to kprobe_ctlblk.
* @return Void.
*/
-void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
+void set_current_kprobe(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
{
__get_cpu_var(swap_current_kprobe) = p;
- DBPRINTF ("set_current_kprobe: p=%p addr=%p\n", p, p->addr);
+ DBPRINTF("set_current_kprobe: p=%p addr=%p\n", p, p->addr);
}
static int kprobe_handler(struct pt_regs *regs)
return 0;
no_kprobe:
- printk("no_kprobe: Not one of ours: let kernel handle it %p\n",
+ printk(KERN_INFO "no_kprobe: Not one of ours: let kernel handle it %p\n",
(unsigned long *)regs->ARM_pc);
return 1;
}
int swap_setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
- kprobe_pre_entry_handler_t pre_entry = (kprobe_pre_entry_handler_t)jp->pre_entry;
+ kprobe_pre_entry_handler_t pre_entry =
+ (kprobe_pre_entry_handler_t)jp->pre_entry;
entry_point_t entry = (entry_point_t)jp->entry;
pre_entry = (kprobe_pre_entry_handler_t)jp->pre_entry;
void __naked swap_kretprobe_trampoline(void)
{
__asm__ __volatile__ (
- "stmdb sp!, {r0 - r11} \n\t"
- "mov r1, sp \n\t"
- "mov r0, #0 \n\t"
- "bl trampoline_probe_handler\n\t"
- "mov lr, r0 \n\t"
- "ldmia sp!, {r0 - r11} \n\t"
- "bx lr \n\t"
+ "stmdb sp!, {r0 - r11}\n"
+ "mov r1, sp\n"
+ "mov r0, #0\n"
+ "bl trampoline_probe_handler\n"
+ "mov lr, r0\n"
+ "ldmia sp!, {r0 - r11}\n"
+ "bx lr\n"
: : : "memory");
}
*/
void kjump_trampoline(void);
__asm(
- "kjump_trampoline: \n"
+ "kjump_trampoline:\n"
- "mov r0, r10 \n"
- "bl kjump_handler \n"
- "nop \n" /* for kjump_kprobe */
+ "mov r0, r10\n"
+ "bl kjump_handler\n"
+ "nop\n" /* for kjump_kprobe */
);
/**
ret = swap_register_kprobe(&kjump_kprobe);
if (ret)
- printk("ERROR: kjump_init(), ret=%d\n", ret);
+ printk(KERN_INFO "ERROR: kjump_init(), ret=%d\n", ret);
return ret;
}
*/
void jump_trampoline(void);
__asm(
- "jump_trampoline: \n"
-
- "push {r0 - r12} \n"
- "mov r1, r0 \n" /* data --> r1 */
- "bl get_r0 \n"
- "str r0, [sp] \n" /* restore r0 */
- "mov r0, r1 \n" /* data --> r0 */
- "bl jump_handler \n"
- "mov lr, r0 \n"
- "pop {r0 - r12} \n"
- "bx lr \n"
+ "jump_trampoline:\n"
+
+ "push {r0 - r12}\n"
+ "mov r1, r0\n" /* data --> r1 */
+ "bl get_r0\n"
+ "str r0, [sp]\n" /* restore r0 */
+ "mov r0, r1\n" /* data --> r0 */
+ "bl jump_handler\n"
+ "mov lr, r0\n"
+ "pop {r0 - r12}\n"
+ "bx lr\n"
);
/**
}
EXPORT_SYMBOL_GPL(swap_unregister_undef_hook);
-// kernel probes hook
+/* kernel probes hook */
static struct undef_hook undef_ho_k = {
.instr_mask = 0xffffffff,
.instr_val = BREAKPOINT_INSTRUCTION,
return 0;
not_found:
- printk("ERROR: symbol '%s' not found\n", sym);
+ printk(KERN_INFO "ERROR: symbol '%s' not found\n", sym);
return -ESRCH;
}
/**
* @file kprobe/arch/asm-arm/swap_kprobes.h
- * @author Ekaterina Gorelkina <e.gorelkina@samsung.com>: initial implementation for ARM/MIPS
- * @author Alexey Gerenkov <a.gerenkov@samsung.com> User-Space Probes initial implementation; Support x86/ARM/MIPS for both user and kernel spaces.
- * @author Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for separating core and arch parts
- * @author Alexander Shirshikov <a.shirshikov@samsung.com>: initial implementation for Thumb
+ * @author Ekaterina Gorelkina <e.gorelkina@samsung.com>:
+ * initial implementation for ARM/MIPS
+ * @author Alexey Gerenkov <a.gerenkov@samsung.com>:
+ * User-Space Probes initial implementation;
+ * Support x86/ARM/MIPS for both user and kernel spaces.
+ * @author Ekaterina Gorelkina <e.gorelkina@samsung.com>:
+ * redesign module for separating core and arch parts
+ * @author Alexander Shirshikov <a.shirshikov@samsung.com>:
+ * initial implementation for Thumb
*
* @section LICENSE
*
#define MAX_INSN_SIZE 1
/** Uprobes trampoline length */
-#define UPROBES_TRAMP_LEN 9 * 4
+#define UPROBES_TRAMP_LEN (9 * 4)
/** Uprobes trampoline insn idx */
#define UPROBES_TRAMP_INSN_IDX 2
/** Uprobes trampoline ss break idx */
/** Uprobes trampoline ret break idx */
#define UPROBES_TRAMP_RET_BREAK_IDX 5
/** Kprobes trampoline length */
-#define KPROBES_TRAMP_LEN 9 * 4
+#define KPROBES_TRAMP_LEN (9 * 4)
/** Kprobes trampoline insn idx */
-# define KPROBES_TRAMP_INSN_IDX UPROBES_TRAMP_INSN_IDX
+#define KPROBES_TRAMP_INSN_IDX UPROBES_TRAMP_INSN_IDX
/** Kprobes trampoline ss break idx */
-# define KPROBES_TRAMP_SS_BREAK_IDX UPROBES_TRAMP_SS_BREAK_IDX
+#define KPROBES_TRAMP_SS_BREAK_IDX UPROBES_TRAMP_SS_BREAK_IDX
/* TODO: remove (not needed for kprobe) */
-# define KPROBES_TRAMP_RET_BREAK_IDX UPROBES_TRAMP_RET_BREAK_IDX
+#define KPROBES_TRAMP_RET_BREAK_IDX UPROBES_TRAMP_RET_BREAK_IDX
/** User register offset */
#define UREGS_OFFSET 8
/**
* @brief Gets task pc.
*
- * @param p Pointer to task_struct
+ * @param p Pointer to task_struct
* @return Value in pc.
*/
static inline unsigned long arch_get_task_pc(struct task_struct *p)
regs->uregs[num] = val;
}
-// undefined
-# define MASK_ARM_INSN_UNDEF 0x0FF00000 // xxxx1111 1111xxxx xxxxxxxx xxxxxxxx
-# define PTRN_ARM_INSN_UNDEF 0x03000000 // cccc0011 0000xxxx xxxxxxxx xxxxxxxx
+/* undefined */
+#define MASK_ARM_INSN_UNDEF 0x0FF00000
+#define PTRN_ARM_INSN_UNDEF 0x03000000
-# define MASK_THUMB_INSN_UNDEF 0xFE00 // 11111111xxxxxxxx
-# define PTRN_THUMB_INSN_UNDEF 0xDE00 // 11011110xxxxxxxx
+#define MASK_THUMB_INSN_UNDEF 0xFE00
+#define PTRN_THUMB_INSN_UNDEF 0xDE00
-// architecturally undefined
-# define MASK_ARM_INSN_AUNDEF 0x0FF000F0
-# define PTRN_ARM_INSN_AUNDEF 0x07F000F0
+/* architecturally undefined */
+#define MASK_ARM_INSN_AUNDEF 0x0FF000F0
+#define PTRN_ARM_INSN_AUNDEF 0x07F000F0
-// branches
-# define MASK_ARM_INSN_B 0x0F000000 // xxxx1111xxxxxxxxxxxxxxxxxxxxxxxx
-# define PTRN_ARM_INSN_B 0x0A000000 // cccc1010xxxxxxxxxxxxxxxxxxxxxxxx
+/* branches */
+#define MASK_ARM_INSN_B 0x0F000000
+#define PTRN_ARM_INSN_B 0x0A000000
-# define MASK_THUMB_INSN_B1 0xF000 // 1111xxxxxxxxxxxx
-# define PTRN_THUMB_INSN_B1 0xD000 // 1101xxxxxxxxxxxx // b<cond> label
+#define MASK_THUMB_INSN_B1 0xF000
+#define PTRN_THUMB_INSN_B1 0xD000 /* b<cond> label */
-# define MASK_THUMB_INSN_B2 0xF800 // 11111xxxxxxxxxxx
-# define PTRN_THUMB_INSN_B2 0xE000 // 11100xxxxxxxxxxx // b label
+#define MASK_THUMB_INSN_B2 0xF800
+#define PTRN_THUMB_INSN_B2 0xE000 /* b label */
-# define MASK_THUMB_INSN_CBZ 0xF500 // 1111x1x1xxxxxxxx
-# define PTRN_THUMB_INSN_CBZ 0xB100 // 1011x0x1xxxxxxxx // CBZ/CBNZ
+#define MASK_THUMB_INSN_CBZ 0xF500
+#define PTRN_THUMB_INSN_CBZ 0xB100 /* CBZ/CBNZ */
-# define MASK_THUMB2_INSN_B1 0xD000F800 // 11x1xxxxxxxxxxxx 11111xxxxxxxxxxx // swapped
-# define PTRN_THUMB2_INSN_B1 0x8000F000 // 10x0xxxxxxxxxxxx 11110xxxxxxxxxxx // swapped
+#define MASK_THUMB2_INSN_B1 0xD000F800
+#define PTRN_THUMB2_INSN_B1 0x8000F000
-# define MASK_THUMB2_INSN_B2 0xD000F800 // 11x1xxxxxxxxxxxx 11111xxxxxxxxxxx // swapped
-# define PTRN_THUMB2_INSN_B2 0x9000F000 // 10x1xxxxxxxxxxxx 11110xxxxxxxxxxx // swapped
+#define MASK_THUMB2_INSN_B2 0xD000F800
+#define PTRN_THUMB2_INSN_B2 0x9000F000
-# define MASK_ARM_INSN_BL 0x0F000000 // xxxx1111xxxxxxxxxxxxxxxxxxxxxxxx
-# define PTRN_ARM_INSN_BL 0x0B000000 // cccc1011xxxxxxxxxxxxxxxxxxxxxxxx
+#define MASK_ARM_INSN_BL 0x0F000000
+#define PTRN_ARM_INSN_BL 0x0B000000
-//# define MASK_THUMB_INSN_BL 0xF800 // 11111xxxxxxxxxxx
-//# define PTRN_THUMB_INSN_BL 0xF000 // 11110xxxxxxxxxxx // shared between BL and BLX
-//# define PTRN_THUMB_INSN_BL 0xF800 // 11111xxxxxxxxxxx
+/* #define MASK_THUMB_INSN_BL 0xF800 */
+/* #define PTRN_THUMB_INSN_BL 0xF000 shared between BL and BLX */
+/* #define PTRN_THUMB_INSN_BL 0xF800 */
-# define MASK_THUMB2_INSN_BL 0xD000F800 // 11x1xxxxxxxxxxxx 11111xxxxxxxxxxx // swapped
-# define PTRN_THUMB2_INSN_BL 0xD000F000 // 11x1xxxxxxxxxxxx 11110xxxxxxxxxxx // bl imm swapped
+#define MASK_THUMB2_INSN_BL 0xD000F800
+#define PTRN_THUMB2_INSN_BL 0xD000F000 /* bl imm swapped */
-# define MASK_ARM_INSN_BLX1 0xFE000000 // 1111111axxxxxxxxxxxxxxxxxxxxxxxx
-# define PTRN_ARM_INSN_BLX1 0xFA000000 // 1111101axxxxxxxxxxxxxxxxxxxxxxxx
+#define MASK_ARM_INSN_BLX1 0xFE000000
+#define PTRN_ARM_INSN_BLX1 0xFA000000
-//# define MASK_THUMB_INSN_BLX1 0xF800 // 11111xxxxxxxxxxx / blx imm
-//# define PTRN_THUMB_INSN_BLX1 0xF000 // 11101xxxxxxxxxxx
+/* #define MASK_THUMB_INSN_BLX1 0xF800 */
+/* #define PTRN_THUMB_INSN_BLX1 0xF000 */
-# define MASK_THUMB2_INSN_BLX1 0xD001F800 // 11x1xxxxxxxxxxx1 11111xxxxxxxxxxx // swapped
-# define PTRN_THUMB2_INSN_BLX1 0xC000F000 // 11x0xxxxxxxxxxx0 11110xxxxxxxxxxx // swapped
+#define MASK_THUMB2_INSN_BLX1 0xD001F800
+#define PTRN_THUMB2_INSN_BLX1 0xC000F000
-# define MASK_ARM_INSN_BLX2 0x0FF000F0 // xxxx11111111xxxxxxxxxxxx1111xxxx
-# define PTRN_ARM_INSN_BLX2 0x01200030 // cccc00010010xxxxxxxxxxxx0011xxxx
+#define MASK_ARM_INSN_BLX2 0x0FF000F0
+#define PTRN_ARM_INSN_BLX2 0x01200030
-# define MASK_THUMB_INSN_BLX2 0xFF80 // 111111111xxxxxxx / blx reg
-# define PTRN_THUMB_INSN_BLX2 0x4780 // 010001111xxxxxxx
+#define MASK_THUMB_INSN_BLX2 0xFF80 /* blx reg */
+#define PTRN_THUMB_INSN_BLX2 0x4780
-# define MASK_ARM_INSN_BX 0x0FF000F0 // cccc11111111xxxxxxxxxxxx1111xxxx
-# define PTRN_ARM_INSN_BX 0x01200010 // cccc00010010xxxxxxxxxxxx0001xxxx
+#define MASK_ARM_INSN_BX 0x0FF000F0
+#define PTRN_ARM_INSN_BX 0x01200010
-# define MASK_THUMB_INSN_BX 0xFF80 // 111111111xxxxxxx
-# define PTRN_THUMB_INSN_BX 0x4700 // 010001110xxxxxxx
+#define MASK_THUMB_INSN_BX 0xFF80
+#define PTRN_THUMB_INSN_BX 0x4700
-# define MASK_ARM_INSN_BXJ 0x0FF000F0 // xxxx11111111xxxxxxxxxxxx1111xxxx
-# define PTRN_ARM_INSN_BXJ 0x01200020 // cccc00010010xxxxxxxxxxxx0010xxxx
+#define MASK_ARM_INSN_BXJ 0x0FF000F0
+#define PTRN_ARM_INSN_BXJ 0x01200020
-# define MASK_THUMB2_INSN_BXJ 0xD000FFF0 // 11x1xxxxxxxxxxxx 111111111111xxxx // swapped
-# define PTRN_THUMB2_INSN_BXJ 0x8000F3C0 // 10x0xxxxxxxxxxxx 111100111100xxxx // swapped
+#define MASK_THUMB2_INSN_BXJ 0xD000FFF0
+#define PTRN_THUMB2_INSN_BXJ 0x8000F3C0
-// software interrupts
-# define MASK_ARM_INSN_SWI 0x0F000000 // cccc1111xxxxxxxxxxxxxxxxxxxxxxxx
-# define PTRN_ARM_INSN_SWI 0x0F000000 // cccc1111xxxxxxxxxxxxxxxxxxxxxxxx
+/* software interrupts */
+#define MASK_ARM_INSN_SWI 0x0F000000
+#define PTRN_ARM_INSN_SWI 0x0F000000
-# define MASK_THUMB_INSN_SWI 0xFF00 // 11111111xxxxxxxx
-# define PTRN_THUMB_INSN_SWI 0xDF00 // 11011111xxxxxxxx
+#define MASK_THUMB_INSN_SWI 0xFF00
+#define PTRN_THUMB_INSN_SWI 0xDF00
-// break
-# define MASK_ARM_INSN_BREAK 0xFFF000F0 // 111111111111xxxxxxxxxxxx1111xxxx
-# define PTRN_ARM_INSN_BREAK 0xE1200070 // 111000010010xxxxxxxxxxxx0111xxxx /? A8-56 ARM DDI 046B if cond != ‘1110’ then UNPREDICTABLE;
+/* break */
+#define MASK_ARM_INSN_BREAK 0xFFF000F0
+#define PTRN_ARM_INSN_BREAK 0xE1200070
+/* A8-56 ARM DDI 046B if cond != ‘1110’ then UNPREDICTABLE; */
-# define MASK_THUMB_INSN_BREAK 0xFF00 // 11111111xxxxxxxx
-# define PTRN_THUMB_INSN_BREAK 0xBE00 // 10111110xxxxxxxx
+#define MASK_THUMB_INSN_BREAK 0xFF00
+#define PTRN_THUMB_INSN_BREAK 0xBE00
-// CLZ
-# define MASK_ARM_INSN_CLZ 0x0FFF0FF0 // xxxx111111111111xxxx11111111xxxx
-# define PTRN_ARM_INSN_CLZ 0x016F0F10 // cccc000101101111xxxx11110001xxxx
+/* CLZ */
+#define MASK_ARM_INSN_CLZ 0x0FFF0FF0
+#define PTRN_ARM_INSN_CLZ 0x016F0F10
-// Data processing immediate shift
-# define MASK_ARM_INSN_DPIS 0x0E000010
-# define PTRN_ARM_INSN_DPIS 0x00000000
-// Data processing register shift
-# define MASK_ARM_INSN_DPRS 0x0E000090
-# define PTRN_ARM_INSN_DPRS 0x00000010
+/* Data processing immediate shift */
+#define MASK_ARM_INSN_DPIS 0x0E000010
+#define PTRN_ARM_INSN_DPIS 0x00000000
+/* Data processing register shift */
+#define MASK_ARM_INSN_DPRS 0x0E000090
+#define PTRN_ARM_INSN_DPRS 0x00000010
-# define MASK_THUMB2_INSN_DPRS 0xFFE00000 // 11111111111xxxxxxxxxxxxxxxxxxxxx
-# define PTRN_THUMB2_INSN_DPRS 0xEA000000 // 1110101xxxxxxxxxxxxxxxxxxxxxxxxx
+#define MASK_THUMB2_INSN_DPRS 0xFFE00000
+#define PTRN_THUMB2_INSN_DPRS 0xEA000000
-// Data processing immediate
-# define MASK_ARM_INSN_DPI 0x0E000000
-# define PTRN_ARM_INSN_DPI 0x02000000
+/* Data processing immediate */
+#define MASK_ARM_INSN_DPI 0x0E000000
+#define PTRN_ARM_INSN_DPI 0x02000000
-# define MASK_THUMB_INSN_DP 0xFC00 // 111111xxxxxxxxxx
-# define PTRN_THUMB_INSN_DP 0x4000 // 010000xxxxxxxxxx
+#define MASK_THUMB_INSN_DP 0xFC00
+#define PTRN_THUMB_INSN_DP 0x4000
-# define MASK_THUMB_INSN_APC 0xF800 // 11111xxxxxxxxxxx
-# define PTRN_THUMB_INSN_APC 0xA000 // 10100xxxxxxxxxxx ADD Rd, [PC, #<imm8> * 4]
+#define MASK_THUMB_INSN_APC 0xF800
+#define PTRN_THUMB_INSN_APC 0xA000 /* ADD Rd, [PC, #<imm8> * 4] */
-# define MASK_THUMB2_INSN_DPI 0xFBE08000 // 11111x11111xxxxx 1xxxxxxxxxxxxxxx
-//# define PTRN_THUMB2_INSN_DPI 0xF0000000 // 11110x0xxxxxxxxx 0xxxxxxxxxxxxxxx /? A6-19 ARM DDI 0406B
-# define PTRN_THUMB2_INSN_DPI 0xF2000000 // 11110x1xxxxxxxxx 0xxxxxxxxxxxxxxx /? A6-19 ARM DDI 0406B
+#define MASK_THUMB2_INSN_DPI 0xFBE08000
+/* #define PTRN_THUMB2_INSN_DPI 0xF0000000 */
+/* A6-19 ARM DDI 0406B */
+#define PTRN_THUMB2_INSN_DPI 0xF2000000
+/* A6-19 ARM DDI 0406B */
-# define MASK_THUMB_INSN_MOV3 0xFF00 // 11111111xxxxxxxx
-# define PTRN_THUMB_INSN_MOV3 0x4600 // 01000110xxxxxxxx MOV Rd, PC
+#define MASK_THUMB_INSN_MOV3 0xFF00
+#define PTRN_THUMB_INSN_MOV3 0x4600 /* MOV Rd, PC */
-# define MASK_THUMB2_INSN_RSBW 0x8000fbe0 // 1xxxxxxxxxxxxxxx 11111x11111xxxxx // swapped
-# define PTRN_THUMB2_INSN_RSBW 0x0000f1c0 // 0xxxxxxxxxxxxxxx 11110x01110xxxxx RSB{S}.W Rd, Rn, #<const> // swapped
+#define MASK_THUMB2_INSN_RSBW 0x8000fbe0
+#define PTRN_THUMB2_INSN_RSBW 0x0000f1c0 /* RSB{S}.W Rd,Rn,#<const> */
-# define MASK_THUMB2_INSN_RORW 0xf0f0ffe0 // 1111xxxx1111xxxx 11111111111xxxxx // swapped
-# define PTRN_THUMB2_INSN_RORW 0xf000fa60 // 1111xxxx0000xxxx 11111010011xxxxx ROR{S}.W Rd, Rn, Rm // swapped
+#define MASK_THUMB2_INSN_RORW 0xf0f0ffe0
+#define PTRN_THUMB2_INSN_RORW 0xf000fa60 /* ROR{S}.W Rd, Rn, Rm */
-# define MASK_THUMB2_INSN_ROR 0x0030ffef // xxxxxxxxxx11xxxx 11111111111x1111 // swapped
-# define PTRN_THUMB2_INSN_ROR 0x0030ea4f // xxxxxxxxxx11xxxx 11101010010x1111 ROR{S} Rd, Rm, #<imm> // swapped
+#define MASK_THUMB2_INSN_ROR 0x0030ffef
+#define PTRN_THUMB2_INSN_ROR 0x0030ea4f /* ROR{S} Rd, Rm, #<imm> */
-# define MASK_THUMB2_INSN_LSLW1 0xf0f0ffe0 // 1111xxxx1111xxxx 11111111111xxxxx // swapped
-# define PTRN_THUMB2_INSN_LSLW1 0xf000fa00 // 1111xxxx0000xxxx 11111010000xxxxx LSL{S}.W Rd, Rn, Rm // swapped
+#define MASK_THUMB2_INSN_LSLW1 0xf0f0ffe0
+#define PTRN_THUMB2_INSN_LSLW1 0xf000fa00 /* LSL{S}.W Rd, Rn, Rm */
-# define MASK_THUMB2_INSN_LSLW2 0x0030ffef // xxxxxxxxxx11xxxx 11111111111x1111 // swapped
-# define PTRN_THUMB2_INSN_LSLW2 0x0000ea4f // xxxxxxxxxx00xxxx 11101010010x1111 LSL{S}.W Rd, Rm, #<imm5> // swapped
+#define MASK_THUMB2_INSN_LSLW2 0x0030ffef
+#define PTRN_THUMB2_INSN_LSLW2 0x0000ea4f /* LSL{S}.W Rd, Rm, #<imm5>*/
-# define MASK_THUMB2_INSN_LSRW1 0xf0f0ffe0 // 1111xxxx1111xxxx 11111111111xxxxx // swapped
-# define PTRN_THUMB2_INSN_LSRW1 0xf000fa20 // 1111xxxx0000xxxx 11111010001xxxxx LSR{S}.W Rd, Rn, Rm // swapped
+#define MASK_THUMB2_INSN_LSRW1 0xf0f0ffe0
+#define PTRN_THUMB2_INSN_LSRW1 0xf000fa20 /* LSR{S}.W Rd, Rn, Rm */
-# define MASK_THUMB2_INSN_LSRW2 0x0030ffef // xxxxxxxxxx11xxxx 11111111111x1111 // swapped
-# define PTRN_THUMB2_INSN_LSRW2 0x0010ea4f // xxxxxxxxxx01xxxx 11101010010x1111 LSR{S}.W Rd, Rm, #<imm5> // swapped
+#define MASK_THUMB2_INSN_LSRW2 0x0030ffef
+#define PTRN_THUMB2_INSN_LSRW2 0x0010ea4f /* LSR{S}.W Rd, Rm, #<imm5> */
-# define MASK_THUMB2_INSN_TEQ1 0x8f00fbf0 // 1xxx1111xxxxxxxx 11111x111111xxxx // swapped
-# define PTRN_THUMB2_INSN_TEQ1 0x0f00f090 // 0xxx1111xxxxxxxx 11110x001001xxxx TEQ Rn, #<const> // swapped
+#define MASK_THUMB2_INSN_TEQ1 0x8f00fbf0
+#define PTRN_THUMB2_INSN_TEQ1 0x0f00f090 /* TEQ Rn, #<const> */
-# define MASK_THUMB2_INSN_TEQ2 0x0f00fff0 // xxxx1111xxxxxxxx 111111111111xxxx // swapped
-# define PTRN_THUMB2_INSN_TEQ2 0x0f00ea90 // xxxx1111xxxxxxxx 111010101001xxxx TEQ Rn, Rm{,<shift>} // swapped
+#define MASK_THUMB2_INSN_TEQ2 0x0f00fff0
+#define PTRN_THUMB2_INSN_TEQ2 0x0f00ea90 /* TEQ Rn, Rm{,<shift>} */
-# define MASK_THUMB2_INSN_TST1 0x8f00fbf0 // 1xxx1111xxxxxxxx 11111x111111xxxx // swapped
-# define PTRN_THUMB2_INSN_TST1 0x0f00f010 // 0xxx1111xxxxxxxx 11110x000001xxxx TST Rn, #<const> // swapped
+#define MASK_THUMB2_INSN_TST1 0x8f00fbf0
+#define PTRN_THUMB2_INSN_TST1 0x0f00f010 /* TST Rn, #<const> */
-# define MASK_THUMB2_INSN_TST2 0x0f00fff0 // xxxx1111xxxxxxxx 111111111111xxxx // swapped
-# define PTRN_THUMB2_INSN_TST2 0x0f00ea10 // xxxx1111xxxxxxxx 111010100001xxxx TST Rn, Rm{,<shift>} // swapped
+#define MASK_THUMB2_INSN_TST2 0x0f00fff0
+#define PTRN_THUMB2_INSN_TST2 0x0f00ea10 /* TST Rn, Rm{,<shift>} */
-// Load immediate offset
-# define MASK_ARM_INSN_LIO 0x0E100000
-# define PTRN_ARM_INSN_LIO 0x04100000
+/* Load immediate offset */
+#define MASK_ARM_INSN_LIO 0x0E100000
+#define PTRN_ARM_INSN_LIO 0x04100000
-# define MASK_THUMB_INSN_LIO1 0xF800 // 11111xxxxxxxxxxx
-# define PTRN_THUMB_INSN_LIO1 0x6800 // 01101xxxxxxxxxxx LDR
+#define MASK_THUMB_INSN_LIO1 0xF800
+#define PTRN_THUMB_INSN_LIO1 0x6800 /* LDR */
-# define MASK_THUMB_INSN_LIO2 MASK_THUMB_INSN_LIO1
-# define PTRN_THUMB_INSN_LIO2 0x7800 // 01111xxxxxxxxxxx LDRB
+#define MASK_THUMB_INSN_LIO2 MASK_THUMB_INSN_LIO1
+#define PTRN_THUMB_INSN_LIO2 0x7800 /* LDRB */
-# define MASK_THUMB_INSN_LIO3 MASK_THUMB_INSN_LIO1
-# define PTRN_THUMB_INSN_LIO3 0x8800 // 10001xxxxxxxxxxx LDRH
+#define MASK_THUMB_INSN_LIO3 MASK_THUMB_INSN_LIO1
+#define PTRN_THUMB_INSN_LIO3 0x8800 /* LDRH */
-# define MASK_THUMB_INSN_LIO4 MASK_THUMB_INSN_LIO1
-# define PTRN_THUMB_INSN_LIO4 0x9800 // 10011xxxxxxxxxxx LDR SP relative
+#define MASK_THUMB_INSN_LIO4 MASK_THUMB_INSN_LIO1
+#define PTRN_THUMB_INSN_LIO4 0x9800 /* LDR SP relative */
-# define MASK_THUMB2_INSN_LDRW 0x0000fff0 // xxxxxxxxxxxxxxxx 111111111111xxxx // swapped
-# define PTRN_THUMB2_INSN_LDRW 0x0000f850 // xxxxxxxxxxxxxxxx 111110000101xxxx LDR.W Rt, [Rn, #-<imm12>]// swapped
+#define MASK_THUMB2_INSN_LDRW 0x0000fff0
+#define PTRN_THUMB2_INSN_LDRW 0x0000f850 /* LDR.W Rt, [Rn, #-<imm12>] */
-# define MASK_THUMB2_INSN_LDRW1 MASK_THUMB2_INSN_LDRW
-# define PTRN_THUMB2_INSN_LDRW1 0x0000f8d0 // xxxxxxxxxxxxxxxx 111110001101xxxx LDR.W Rt, [Rn, #<imm12>]// swapped
+#define MASK_THUMB2_INSN_LDRW1 MASK_THUMB2_INSN_LDRW
+#define PTRN_THUMB2_INSN_LDRW1 0x0000f8d0 /* LDR.W Rt, [Rn, #<imm12>] */
-# define MASK_THUMB2_INSN_LDRBW MASK_THUMB2_INSN_LDRW
-# define PTRN_THUMB2_INSN_LDRBW 0x0000f810 // xxxxxxxxxxxxxxxx 111110000001xxxx LDRB.W Rt, [Rn, #-<imm8>]// swapped
+#define MASK_THUMB2_INSN_LDRBW MASK_THUMB2_INSN_LDRW
+#define PTRN_THUMB2_INSN_LDRBW 0x0000f810 /* LDRB.W Rt, [Rn, #-<imm8>] */
-# define MASK_THUMB2_INSN_LDRBW1 MASK_THUMB2_INSN_LDRW
-# define PTRN_THUMB2_INSN_LDRBW1 0x0000f890 // xxxxxxxxxxxxxxxx 111110001001xxxx LDRB.W Rt, [Rn, #<imm12>]// swapped
+#define MASK_THUMB2_INSN_LDRBW1 MASK_THUMB2_INSN_LDRW
+#define PTRN_THUMB2_INSN_LDRBW1 0x0000f890 /* LDRB.W Rt, [Rn, #<imm12>] */
-# define MASK_THUMB2_INSN_LDRHW MASK_THUMB2_INSN_LDRW
-# define PTRN_THUMB2_INSN_LDRHW 0x0000f830 // xxxxxxxxxxxxxxxx 111110000011xxxx LDRH.W Rt, [Rn, #-<imm8>]// swapped
+#define MASK_THUMB2_INSN_LDRHW MASK_THUMB2_INSN_LDRW
+#define PTRN_THUMB2_INSN_LDRHW 0x0000f830 /* LDRH.W Rt, [Rn, #-<imm8>] */
-# define MASK_THUMB2_INSN_LDRHW1 MASK_THUMB2_INSN_LDRW
-# define PTRN_THUMB2_INSN_LDRHW1 0x0000f8b0 // xxxxxxxxxxxxxxxx 111110001011xxxx LDRH.W Rt, [Rn, #<imm12>]// swapped
+#define MASK_THUMB2_INSN_LDRHW1 MASK_THUMB2_INSN_LDRW
+#define PTRN_THUMB2_INSN_LDRHW1 0x0000f8b0 /* LDRH.W Rt, [Rn, #<imm12>] */
-# define MASK_THUMB2_INSN_LDRD 0x0000fed0 // xxxxxxxxxxxxxxxx 1111111x11x1xxxx // swapped
-# define PTRN_THUMB2_INSN_LDRD 0x0000e850 // xxxxxxxxxxxxxxxx 1110100x01x1xxxx LDRD Rt, Rt2, [Rn, #-<imm8>]// swapped
+#define MASK_THUMB2_INSN_LDRD 0x0000fed0
+#define PTRN_THUMB2_INSN_LDRD 0x0000e850 /* LDRD Rt, Rt2, [Rn, #-<imm8>] */
-# define MASK_THUMB2_INSN_LDRD1 MASK_THUMB2_INSN_LDRD
-# define PTRN_THUMB2_INSN_LDRD1 0x0000e8d0 // xxxxxxxxxxxxxxxx 1110100x11x1xxxx LDRD Rt, Rt2, [Rn, #<imm8>]// swapped
+#define MASK_THUMB2_INSN_LDRD1 MASK_THUMB2_INSN_LDRD
+#define PTRN_THUMB2_INSN_LDRD1 0x0000e8d0 /* LDRD Rt, Rt2, [Rn, #<imm8>] */
-# define MASK_THUMB2_INSN_LDRWL 0x0fc0fff0 // xxxx111111xxxxxx 111111111111xxxx // swapped
-# define PTRN_THUMB2_INSN_LDRWL 0x0000f850 // xxxxxxxxxxxxxxxx 111110000101xxxx LDR.W Rt, [Rn, Rm, LSL #<imm2>]// swapped
+#define MASK_THUMB2_INSN_LDRWL 0x0fc0fff0
+#define PTRN_THUMB2_INSN_LDRWL 0x0000f850 /* LDR.W Rt, [Rn,Rm,LSL #<imm2>] */
-# define MASK_THUMB2_INSN_LDREX 0x0f00ffff // xxxx1111xxxxxxxx 1111111111111111 // swapped
-# define PTRN_THUMB2_INSN_LDREX 0x0f00e85f // xxxx1111xxxxxxxx 1110100001011111 LDREX Rt, [PC, #<imm8>]// swapped
+#define MASK_THUMB2_INSN_LDREX 0x0f00ffff
+#define PTRN_THUMB2_INSN_LDREX 0x0f00e85f /* LDREX Rt, [PC, #<imm8>] */
-# define MASK_THUMB2_INSN_MUL 0xf0f0fff0 // 1111xxxx1111xxxx 111111111111xxxx // swapped
-# define PTRN_THUMB2_INSN_MUL 0xf000fb00 // 1111xxxx0000xxxx 111110110000xxxx MUL Rd, Rn, Rm// swapped
+#define MASK_THUMB2_INSN_MUL 0xf0f0fff0
+#define PTRN_THUMB2_INSN_MUL 0xf000fb00 /* MUL Rd, Rn, Rm */
-# define MASK_THUMB2_INSN_DP 0x0000ff00 // xxxxxxxxxxxxxxxx 11111111xxxxxxxx // swapped
-# define PTRN_THUMB2_INSN_DP 0x0000eb00 // xxxxxxxxxxxxxxxx 11101011xxxxxxxx // swapped ADD/SUB/SBC/...Rd, Rn, Rm{,<shift>}
+#define MASK_THUMB2_INSN_DP 0x0000ff00
+#define PTRN_THUMB2_INSN_DP 0x0000eb00 /* ADD/SUB/SBC/...Rd,Rn,Rm{,<shift>} */
-// Store immediate offset
-# define MASK_ARM_INSN_SIO MASK_ARM_INSN_LIO
-# define PTRN_ARM_INSN_SIO 0x04000000
+/* Store immediate offset */
+#define MASK_ARM_INSN_SIO MASK_ARM_INSN_LIO
+#define PTRN_ARM_INSN_SIO 0x04000000
-# define MASK_THUMB_INSN_SIO1 MASK_THUMB_INSN_LIO1
-# define PTRN_THUMB_INSN_SIO1 0x6000 // 01100xxxxxxxxxxx STR
+#define MASK_THUMB_INSN_SIO1 MASK_THUMB_INSN_LIO1
+#define PTRN_THUMB_INSN_SIO1 0x6000 /* STR */
-# define MASK_THUMB_INSN_SIO2 MASK_THUMB_INSN_LIO1
-# define PTRN_THUMB_INSN_SIO2 0x7000 // 01110xxxxxxxxxxx STRB
+#define MASK_THUMB_INSN_SIO2 MASK_THUMB_INSN_LIO1
+#define PTRN_THUMB_INSN_SIO2 0x7000 /* STRB */
-# define MASK_THUMB_INSN_SIO3 MASK_THUMB_INSN_LIO1
-# define PTRN_THUMB_INSN_SIO3 0x8000 // 10000xxxxxxxxxxx STRH
+#define MASK_THUMB_INSN_SIO3 MASK_THUMB_INSN_LIO1
+#define PTRN_THUMB_INSN_SIO3 0x8000 /* STRH */
-# define MASK_THUMB_INSN_SIO4 MASK_THUMB_INSN_LIO1
-# define PTRN_THUMB_INSN_SIO4 0x9000 // 10010xxxxxxxxxxx STR SP relative
+#define MASK_THUMB_INSN_SIO4 MASK_THUMB_INSN_LIO1
+#define PTRN_THUMB_INSN_SIO4 0x9000 /* STR SP relative */
-# define MASK_THUMB2_INSN_STRW 0x0fc0fff0 // xxxx111111xxxxxx 111111111111xxxx // swapped
-# define PTRN_THUMB2_INSN_STRW 0x0000f840 // xxxx000000xxxxxx 111110000100xxxx STR.W Rt, [Rn, Rm, {LSL #<imm2>}]// swapped
+#define MASK_THUMB2_INSN_STRW 0x0fc0fff0
+#define PTRN_THUMB2_INSN_STRW 0x0000f840 /* STR.W Rt,[Rn,Rm,{LSL #<imm2>}] */
-# define MASK_THUMB2_INSN_STRW1 0x0000fff0 // xxxxxxxxxxxxxxxx 111111111111xxxx // swapped
-# define PTRN_THUMB2_INSN_STRW1 0x0000f8c0 // xxxxxxxxxxxxxxxx 111110001100xxxx STR.W Rt, [Rn, #imm12]// swapped // STR.W Rt, [PC, #imm12] shall be skipped, because it hangs on Tegra. WTF
+#define MASK_THUMB2_INSN_STRW1 0x0000fff0
+#define PTRN_THUMB2_INSN_STRW1 0x0000f8c0 /* STR.W Rt, [Rn, #imm12]
+ * STR.W Rt, [PC, #imm12] shall be
+ * skipped, because it hangs
+ * on Tegra. WTF */
-# define MASK_THUMB2_INSN_STRHW MASK_THUMB2_INSN_STRW
-# define PTRN_THUMB2_INSN_STRHW 0x0000f820 // xxxx000000xxxxxx 111110000010xxxx STRH.W Rt, [Rn, Rm, {LSL #<imm2>}]// swapped
+#define MASK_THUMB2_INSN_STRHW MASK_THUMB2_INSN_STRW
+#define PTRN_THUMB2_INSN_STRHW 0x0000f820 /* STRH.W Rt,[Rn,Rm,{LSL #<imm2>}] */
-# define MASK_THUMB2_INSN_STRHW1 0x0000fff0 // xxxxxxxxxxxxxxxx 111111111111xxxx // swapped
-# define PTRN_THUMB2_INSN_STRHW1 0x0000f8a0 // xxxxxxxxxxxxxxxx 111110001010xxxx STRH.W Rt, [Rn, #<imm12>]// swapped
+#define MASK_THUMB2_INSN_STRHW1 0x0000fff0
+#define PTRN_THUMB2_INSN_STRHW1 0x0000f8a0 /* STRH.W Rt, [Rn, #<imm12>] */
-# define MASK_THUMB2_INSN_STRHT 0x0f00fff0 // xxxx1111xxxxxxxx 111111111111xxxx // swapped // strht r1, [pc, #imm] illegal instruction on Tegra. WTF
-# define PTRN_THUMB2_INSN_STRHT 0x0e00f820 // xxxx1110xxxxxxxx 111110000010xxxx STRHT Rt, [Rn, #<imm8>]// swapped
+#define MASK_THUMB2_INSN_STRHT 0x0f00fff0 /* strht r1, [pc, #imm] illegal
+ * instruction on Tegra. WTF */
+#define PTRN_THUMB2_INSN_STRHT 0x0e00f820 /* STRHT Rt, [Rn, #<imm8>] */
-# define MASK_THUMB2_INSN_STRT 0x0f00fff0 // xxxx1111xxxxxxxx 111111111111xxxx // swapped
-# define PTRN_THUMB2_INSN_STRT 0x0e00f840 // xxxx1110xxxxxxxx 111110000100xxxx STRT Rt, [Rn, #<imm8>]// swapped
+#define MASK_THUMB2_INSN_STRT 0x0f00fff0
+#define PTRN_THUMB2_INSN_STRT 0x0e00f840 /* STRT Rt, [Rn, #<imm8>] */
-# define MASK_THUMB2_INSN_STRBW MASK_THUMB2_INSN_STRW // xxxx111111xxxxxx 111111111111xxxx // swapped
-# define PTRN_THUMB2_INSN_STRBW 0x0000f800 // xxxx000000xxxxxx 111110000100xxxx STRB.W Rt, [Rn, Rm, {LSL #<imm2>}]// swapped
+#define MASK_THUMB2_INSN_STRBW MASK_THUMB2_INSN_STRW
+#define PTRN_THUMB2_INSN_STRBW 0x0000f800 /* STRB.W Rt,[Rn,Rm,{LSL #<imm2>}] */
-# define MASK_THUMB2_INSN_STRBW1 0x0000fff0 // xxxxxxxxxxxxxxxx 111111111111xxxx // swapped
-# define PTRN_THUMB2_INSN_STRBW1 0x0000f880 // xxxxxxxxxxxxxxxx 111110001000xxxx STRB.W Rt, [Rn, #<imm12>]// swapped // STRB.W Rt, [PC, #imm12] shall be skipped, because it hangs on Tegra. WTF
+#define MASK_THUMB2_INSN_STRBW1 0x0000fff0
+#define PTRN_THUMB2_INSN_STRBW1 0x0000f880 /* STRB.W Rt, [Rn, #<imm12>]
+ * STRB.W Rt, [PC, #imm12] shall be
+ * skipped, because it hangs
+ * on Tegra. WTF */
-# define MASK_THUMB2_INSN_STRBT 0x0f00fff0 // xxxx1111xxxxxxxx 111111111111xxxx // swapped
-# define PTRN_THUMB2_INSN_STRBT 0x0e00f800 // xxxx1110xxxxxxxx 111110000000xxxx STRBT Rt, [Rn, #<imm8>}]// swapped
+#define MASK_THUMB2_INSN_STRBT 0x0f00fff0
+#define PTRN_THUMB2_INSN_STRBT 0x0e00f800 /* STRBT Rt, [Rn, #<imm8>}] */
-# define MASK_THUMB2_INSN_STRD 0x0000fe50 // xxxxxxxxxxxxxxxx 1111111xx1x1xxxx // swapped
-# define PTRN_THUMB2_INSN_STRD 0x0000e840 // xxxxxxxxxxxxxxxx 1110100xx1x0xxxx STR{D, EX, EXB, EXH, EXD} Rt, Rt2, [Rn, #<imm8>]// swapped
+#define MASK_THUMB2_INSN_STRD 0x0000fe50
+/* STR{D,EX,EXB,EXH,EXD} Rt, Rt2, [Rn, #<imm8>] */
+#define PTRN_THUMB2_INSN_STRD 0x0000e840
-// Load register offset
-# define MASK_ARM_INSN_LRO 0x0E100010
-# define PTRN_ARM_INSN_LRO 0x06100000
+/* Load register offset */
+#define MASK_ARM_INSN_LRO 0x0E100010
+#define PTRN_ARM_INSN_LRO 0x06100000
-# define MASK_THUMB_INSN_LRO1 0xFE00 // 1111111xxxxxxxxx
-# define PTRN_THUMB_INSN_LRO1 0x5600 // 0101011xxxxxxxxx LDRSB
+#define MASK_THUMB_INSN_LRO1 0xFE00
+#define PTRN_THUMB_INSN_LRO1 0x5600 /* LDRSB */
-# define MASK_THUMB_INSN_LRO2 MASK_THUMB_INSN_LRO1
-# define PTRN_THUMB_INSN_LRO2 0x5800 // 0101100xxxxxxxxx LDR
+#define MASK_THUMB_INSN_LRO2 MASK_THUMB_INSN_LRO1
+#define PTRN_THUMB_INSN_LRO2 0x5800 /* LDR */
-# define MASK_THUMB_INSN_LRO3 0xf800 // 11111xxxxxxxxxxx
-# define PTRN_THUMB_INSN_LRO3 0x4800 // 01001xxxxxxxxxxx LDR Rd, [PC, #<imm8> * 4]
+#define MASK_THUMB_INSN_LRO3 0xf800
+#define PTRN_THUMB_INSN_LRO3 0x4800 /* LDR Rd, [PC, #<imm8> * 4] */
-# define MASK_THUMB_INSN_LRO4 MASK_THUMB_INSN_LRO1
-# define PTRN_THUMB_INSN_LRO4 0x5A00 // 0101101xxxxxxxxx LDRH
+#define MASK_THUMB_INSN_LRO4 MASK_THUMB_INSN_LRO1
+#define PTRN_THUMB_INSN_LRO4 0x5A00 /* LDRH */
-# define MASK_THUMB_INSN_LRO5 MASK_THUMB_INSN_LRO1
-# define PTRN_THUMB_INSN_LRO5 0x5C00 // 0101110xxxxxxxxx LDRB
+#define MASK_THUMB_INSN_LRO5 MASK_THUMB_INSN_LRO1
+#define PTRN_THUMB_INSN_LRO5 0x5C00 /* LDRB */
-# define MASK_THUMB_INSN_LRO6 MASK_THUMB_INSN_LRO1
-# define PTRN_THUMB_INSN_LRO6 0x5E00 // 0101111xxxxxxxxx LDRSH
+#define MASK_THUMB_INSN_LRO6 MASK_THUMB_INSN_LRO1
+#define PTRN_THUMB_INSN_LRO6 0x5E00 /* LDRSH */
-# define MASK_THUMB2_INSN_ADR 0x8000fa1f // 1xxxxxxxxxxxxxxx 11111x1xxxx11111 // swapped
-# define PTRN_THUMB2_INSN_ADR 0x0000f20f // 0xxxxxxxxxxxxxxx 11110x1xxxx01111 // swapped
+#define MASK_THUMB2_INSN_ADR 0x8000fa1f
+#define PTRN_THUMB2_INSN_ADR 0x0000f20f
-// Store register offset
-# define MASK_ARM_INSN_SRO MASK_ARM_INSN_LRO
-# define PTRN_ARM_INSN_SRO 0x06000000
+/* Store register offset */
+#define MASK_ARM_INSN_SRO MASK_ARM_INSN_LRO
+#define PTRN_ARM_INSN_SRO 0x06000000
-# define MASK_THUMB_INSN_SRO1 MASK_THUMB_INSN_LRO1
-# define PTRN_THUMB_INSN_SRO1 0x5000 // 0101000xxxxxxxxx STR
+#define MASK_THUMB_INSN_SRO1 MASK_THUMB_INSN_LRO1
+#define PTRN_THUMB_INSN_SRO1 0x5000 /* STR */
-# define MASK_THUMB_INSN_SRO2 MASK_THUMB_INSN_LRO1
-# define PTRN_THUMB_INSN_SRO2 0x5200 // 0101001xxxxxxxxx STRH
+#define MASK_THUMB_INSN_SRO2 MASK_THUMB_INSN_LRO1
+#define PTRN_THUMB_INSN_SRO2 0x5200 /* STRH */
-# define MASK_THUMB_INSN_SRO3 MASK_THUMB_INSN_LRO1
-# define PTRN_THUMB_INSN_SRO3 0x5400 // 0101010xxxxxxxxx STRB
+#define MASK_THUMB_INSN_SRO3 MASK_THUMB_INSN_LRO1
+#define PTRN_THUMB_INSN_SRO3 0x5400 /* STRB */
-// Load multiple
-# define MASK_ARM_INSN_LM 0x0E100000
-# define PTRN_ARM_INSN_LM 0x08100000
+/* Load multiple */
+#define MASK_ARM_INSN_LM 0x0E100000
+#define PTRN_ARM_INSN_LM 0x08100000
-# define MASK_THUMB2_INSN_LDMIA 0x8000ffd0 // 1xxxxxxxxxxxxxxx 1111111111x1xxxx // swapped
-# define PTRN_THUMB2_INSN_LDMIA 0x8000e890 // 1xxxxxxxxxxxxxxx 1110100010x1xxxx LDMIA(.W) Rn(!), {Rx, ..., PC}// swapped
+#define MASK_THUMB2_INSN_LDMIA 0x8000ffd0
+#define PTRN_THUMB2_INSN_LDMIA 0x8000e890 /* LDMIA(.W) Rn(!),{Rx-PC} */
-# define MASK_THUMB2_INSN_LDMDB 0x8000ffd0 // 1xxxxxxxxxxxxxxx 1111111111x1xxxx // swapped
-# define PTRN_THUMB2_INSN_LDMDB 0x8000e910 // 1xxxxxxxxxxxxxxx 1110100100x1xxxx LDMDB(.W) Rn(!), {Rx, ..., PC}// swapped
+#define MASK_THUMB2_INSN_LDMDB 0x8000ffd0
+#define PTRN_THUMB2_INSN_LDMDB 0x8000e910 /* LDMDB(.W) Rn(!), {Rx-PC} */
-// Store multiple
-# define MASK_ARM_INSN_SM MASK_ARM_INSN_LM
-# define PTRN_ARM_INSN_SM 0x08000000
+/* Store multiple */
+#define MASK_ARM_INSN_SM MASK_ARM_INSN_LM
+#define PTRN_ARM_INSN_SM 0x08000000
-// Coprocessor load/store and double register transfers
-# define MASK_ARM_INSN_CLS 0x0E000000
-# define PTRN_ARM_INSN_CLS 0x0C000000
-// Coprocessor register transfers
-# define MASK_ARM_INSN_CRT 0x0F000010
-# define PTRN_ARM_INSN_CRT 0x0E000010
+/* Coprocessor load/store and double register transfers */
+#define MASK_ARM_INSN_CLS 0x0E000000
+#define PTRN_ARM_INSN_CLS 0x0C000000
+/* Coprocessor register transfers */
+#define MASK_ARM_INSN_CRT 0x0F000010
+#define PTRN_ARM_INSN_CRT 0x0E000010
-# define ARM_INSN_MATCH(name, insn) ((insn & MASK_ARM_INSN_##name) == PTRN_ARM_INSN_##name)
-# define THUMB_INSN_MATCH(name, insn) (((insn & 0x0000FFFF) & MASK_THUMB_INSN_##name) == PTRN_THUMB_INSN_##name)
-# define THUMB2_INSN_MATCH(name, insn) ((insn & MASK_THUMB2_INSN_##name) == PTRN_THUMB2_INSN_##name)
+#define ARM_INSN_MATCH(name, insn) \
+ ((insn & MASK_ARM_INSN_##name) == PTRN_ARM_INSN_##name)
+#define THUMB_INSN_MATCH(name, insn) \
+ (((insn & 0x0000FFFF) & MASK_THUMB_INSN_##name) == \
+ PTRN_THUMB_INSN_##name)
+#define THUMB2_INSN_MATCH(name, insn) \
+ ((insn & MASK_THUMB2_INSN_##name) == PTRN_THUMB2_INSN_##name)
-# define ARM_INSN_REG_RN(insn) ((insn & 0x000F0000)>>16)
+#define ARM_INSN_REG_RN(insn) \
+ ((insn & 0x000F0000)>>16)
-# define ARM_INSN_REG_SET_RN(insn, nreg) {insn &= ~0x000F0000; insn |= nreg<<16;}
+#define ARM_INSN_REG_SET_RN(insn, nreg) \
+ { insn &= ~0x000F0000; insn |= nreg<<16; }
-# define ARM_INSN_REG_RD(insn) ((insn & 0x0000F000)>>12)
+#define ARM_INSN_REG_RD(insn) \
+ ((insn & 0x0000F000)>>12)
-# define ARM_INSN_REG_SET_RD(insn, nreg) {insn &= ~0x0000F000; insn |= nreg<<12;}
+#define ARM_INSN_REG_SET_RD(insn, nreg) \
+ { insn &= ~0x0000F000; insn |= nreg<<12; }
-# define ARM_INSN_REG_RS(insn) ((insn & 0x00000F00)>>8)
+#define ARM_INSN_REG_RS(insn) \
+ ((insn & 0x00000F00)>>8)
-# define ARM_INSN_REG_SET_RS(insn, nreg) {insn &= ~0x00000F00; insn |= nreg<<8;}
+#define ARM_INSN_REG_SET_RS(insn, nreg) \
+ { insn &= ~0x00000F00; insn |= nreg<<8; }
-# define ARM_INSN_REG_RM(insn) (insn & 0x0000000F)
+#define ARM_INSN_REG_RM(insn) \
+ (insn & 0x0000000F)
-# define ARM_INSN_REG_SET_RM(insn, nreg) {insn &= ~0x0000000F; insn |= nreg;}
+#define ARM_INSN_REG_SET_RM(insn, nreg) \
+ { insn &= ~0x0000000F; insn |= nreg; }
-# define ARM_INSN_REG_MR(insn, nreg) (insn & (1 << nreg))
+#define ARM_INSN_REG_MR(insn, nreg) \
+ (insn & (1 << nreg))
-# define ARM_INSN_REG_SET_MR(insn, nreg) {insn |= (1 << nreg);}
+#define ARM_INSN_REG_SET_MR(insn, nreg) \
+ { insn |= (1 << nreg); }
-# define ARM_INSN_REG_CLEAR_MR(insn, nreg) {insn &= ~(1 << nreg);}
+#define ARM_INSN_REG_CLEAR_MR(insn, nreg) \
+ { insn &= ~(1 << nreg); }
-# define THUMB2_INSN_REG_RT(insn) ((insn & 0xf0000000) >> 28)
-# define THUMB2_INSN_REG_RT2(insn) ((insn & 0x0f000000) >> 24)
-# define THUMB2_INSN_REG_RN(insn) (insn & 0x0000000f)
-# define THUMB2_INSN_REG_RD(insn) ((insn & 0x0f000000) >> 24)
-# define THUMB2_INSN_REG_RM(insn) ((insn & 0x000f0000) >> 16)
+#define THUMB2_INSN_REG_RT(insn) ((insn & 0xf0000000) >> 28)
+#define THUMB2_INSN_REG_RT2(insn) ((insn & 0x0f000000) >> 24)
+#define THUMB2_INSN_REG_RN(insn) (insn & 0x0000000f)
+#define THUMB2_INSN_REG_RD(insn) ((insn & 0x0f000000) >> 24)
+#define THUMB2_INSN_REG_RM(insn) ((insn & 0x000f0000) >> 16)
kprobe_opcode_t *insn;
};
-typedef kprobe_opcode_t (*entry_point_t) (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
+typedef kprobe_opcode_t (*entry_point_t) (unsigned long, unsigned long,
+ unsigned long, unsigned long,
+ unsigned long, unsigned long);
struct undef_hook;
void save_previous_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *cur_p);
void restore_previous_kprobe(struct kprobe_ctlblk *kcb);
-void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb);
+void set_current_kprobe(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb);
void __naked swap_kretprobe_trampoline(void);
int swap_arch_init_kprobes(void);
void swap_arch_exit_kprobes(void);
-//void gen_insn_execbuf (void);
-//void pc_dep_insn_execbuf (void);
-//void gen_insn_execbuf_holder (void);
-//void pc_dep_insn_execbuf_holder (void);
+/* void gen_insn_execbuf (void); */
+/* void pc_dep_insn_execbuf (void); */
+/* void gen_insn_execbuf_holder (void); */
+/* void pc_dep_insn_execbuf_holder (void); */
#endif /* _SWAP_ASM_ARM_KPROBES_H */
nop
nop //original instruction
nop
- ldr pc, [pc, #4] //ssbreak
+ ldr pc, [pc, #4] //ssbreak
nop //retbreak
nop
nop //stored PC-4(next insn addr)
* @file kprobe/arch/asm-arm/trampoline_arm.h
* @author Ekaterina Gorelkina <e.gorelkina@samsung.com>: initial implementation for ARM/MIPS
* @author Alexey Gerenkov <a.gerenkov@samsung.com> User-Space
- * Probes initial implementation; Support x86/ARM/MIPS for both user and kernel spaces.
- * @author Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for separating core and arch parts
- * @author Alexander Shirshikov <a.shirshikov@samsung.com>: initial implementation for Thumb
+ * Probes initial implementation;
+ * Support x86/ARM/MIPS for both user and kernel spaces.
+ * @author Ekaterina Gorelkina <e.gorelkina@samsung.com>:
+ * redesign module for separating core and arch parts
+ * @author Alexander Shirshikov <a.shirshikov@samsung.com>:
+ * initial implementation for Thumb
*
* @section LICENSE
*
*
* Copyright (C) Samsung Electronics, 2006-2010
*
- * 2006-2007 Ekaterina Gorelkina <e.gorelkina@samsung.com>: initial implementation for ARM/MIPS
+ * 2006-2007 Ekaterina Gorelkina <e.gorelkina@samsung.com>:
+ * initial implementation for ARM/MIPS
* 2008-2009 Alexey Gerenkov <a.gerenkov@samsung.com> User-Space
- * Probes initial implementation; Support x86/ARM/MIPS for both user-space and kernel space.
- * 2010 Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for separating core and arch parts
- * 2012 Stanislav Andreev <s.andreev@samsung.com>: added time debug profiling support; BUG() message fix
+ * Probes initial implementation; Support x86/ARM/MIPS for both
+ * user-space and kernel space.
+ * 2010 Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module
+ * for separating core and arch parts
+ * 2012 Stanislav Andreev <s.andreev@samsung.com>: added time debug
+ * profiling support; BUG() message fix
*/
#include "dbi_kprobes.h"
unsigned int *arr_traps_original;
-extern struct kprobe * per_cpu__current_kprobe;
-unsigned int arr_traps_template[] = { 0x3c010000, // lui a1 [0]
- 0x24210000, // addiu a1, a1 [1]
- 0x00200008, // jr a1 [2]
- 0x00000000, // nop
- 0xffffffff // end
+unsigned int arr_traps_template[] = { 0x3c010000, /* lui a1 [0] */
+ 0x24210000, /* addiu a1, a1 [1] */
+ 0x00200008, /* jr a1 [2] */
+ 0x00000000, /* nop */
+ 0xffffffff /* end */
};
-struct kprobe trampoline_p =
-{
- .addr = (kprobe_opcode_t *) & kretprobe_trampoline,
+struct kprobe trampoline_p = {
+ .addr = (kprobe_opcode_t *)&kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
};
void gen_insn_execbuf(void);
-void gen_insn_execbuf_holder (void)
+void gen_insn_execbuf_holder(void)
{
- asm volatile (".global gen_insn_execbuf\n"
- "gen_insn_execbuf:\n"
- "nop\n" // original instruction
- "nop\n" //ssbreak
- "nop\n"); //retbreak
+ asm volatile(".global gen_insn_execbuf\n"
+ "gen_insn_execbuf:\n"
+ "nop\n" /* original instruction */
+ "nop\n" /* ssbreak */
+ "nop\n"); /* retbreak */
}
-int arch_check_insn (struct arch_specific_insn *ainsn)
+int arch_check_insn(struct arch_specific_insn *ainsn)
{
int ret = 0;
- switch (MIPS_INSN_OPCODE (ainsn->insn[0]))
- {
- case MIPS_BEQ_OPCODE: //B, BEQ
- case MIPS_BEQL_OPCODE: //BEQL
- case MIPS_BNE_OPCODE: //BNE
- case MIPS_BNEL_OPCODE: //BNEL
- case MIPS_BGTZ_OPCODE: //BGTZ
- case MIPS_BGTZL_OPCODE: //BGTZL
- case MIPS_BLEZ_OPCODE: //BLEZ
- case MIPS_BLEZL_OPCODE: //BLEZL
- case MIPS_J_OPCODE: //J
- case MIPS_JAL_OPCODE: //JAL
- DBPRINTF ("arch_check_insn: opcode");
+ switch (MIPS_INSN_OPCODE(ainsn->insn[0])) {
+ case MIPS_BEQ_OPCODE: /* B, BEQ */
+ case MIPS_BEQL_OPCODE: /* BEQL */
+ case MIPS_BNE_OPCODE: /* BNE */
+ case MIPS_BNEL_OPCODE: /* BNEL */
+ case MIPS_BGTZ_OPCODE: /* BGTZ */
+ case MIPS_BGTZL_OPCODE: /* BGTZL */
+ case MIPS_BLEZ_OPCODE: /* BLEZ */
+ case MIPS_BLEZL_OPCODE: /* BLEZL */
+ case MIPS_J_OPCODE: /* J */
+ case MIPS_JAL_OPCODE: /* JAL */
+ DBPRINTF("arch_check_insn: opcode");
+ ret = -EFAULT;
+ break;
+ case MIPS_REGIMM_OPCODE:
+ /* BAL, BGEZ, BGEZAL, BGEZALL, BGEZL,
+ * BLTZ, BLTZAL, BLTZALL, BLTZL */
+ switch (MIPS_INSN_RT(ainsn->insn[0])) {
+ case MIPS_BLTZ_RT:
+ case MIPS_BGEZ_RT:
+ case MIPS_BLTZL_RT:
+ case MIPS_BGEZL_RT:
+ case MIPS_BLTZAL_RT:
+ case MIPS_BGEZAL_RT:
+ case MIPS_BLTZALL_RT:
+ case MIPS_BGEZALL_RT:
+ DBPRINTF("arch_check_insn: REGIMM opcode\n");
ret = -EFAULT;
break;
- case MIPS_REGIMM_OPCODE:
- //BAL, BGEZ, BGEZAL, BGEZALL, BGEZL, BLTZ, BLTZAL, BLTZALL, BLTZL
- switch (MIPS_INSN_RT (ainsn->insn[0]))
- {
- case MIPS_BLTZ_RT:
- case MIPS_BGEZ_RT:
- case MIPS_BLTZL_RT:
- case MIPS_BGEZL_RT:
- case MIPS_BLTZAL_RT:
- case MIPS_BGEZAL_RT:
- case MIPS_BLTZALL_RT:
- case MIPS_BGEZALL_RT:
- DBPRINTF ("arch_check_insn: REGIMM opcode\n");
- ret = -EFAULT;
- break;
- }
- break;
- //BC1F, BC1FL, BC1T, BC1TL
- case MIPS_COP1_OPCODE:
- //BC2F, BC2FL, BC2T, BC2TL
- case MIPS_COP2_OPCODE:
- if (MIPS_INSN_RS (ainsn->insn[0]) == MIPS_BC_RS)
- {
- DBPRINTF ("arch_check_insn: COP1 opcode\n");
- ret = -EFAULT;
- }
- break;
- case MIPS_SPECIAL_OPCODE:
- //BREAK, JALR, JALR.HB, JR, JR.HB
- switch (MIPS_INSN_FUNC (ainsn->insn[0]))
- {
- case MIPS_JR_FUNC:
- case MIPS_JALR_FUNC:
- case MIPS_BREAK_FUNC:
- case MIPS_SYSCALL_FUNC:
- DBPRINTF ("arch_check_insn: SPECIAL opcode\n");
- ret = -EFAULT;
- break;
- }
+ }
+ break;
+ /* BC1F, BC1FL, BC1T, BC1TL */
+ case MIPS_COP1_OPCODE:
+ /* BC2F, BC2FL, BC2T, BC2TL */
+ case MIPS_COP2_OPCODE:
+ if (MIPS_INSN_RS(ainsn->insn[0]) == MIPS_BC_RS) {
+ DBPRINTF("arch_check_insn: COP1 opcode\n");
+ ret = -EFAULT;
+ }
+ break;
+ case MIPS_SPECIAL_OPCODE:
+ /* BREAK, JALR, JALR.HB, JR, JR.HB */
+ switch (MIPS_INSN_FUNC(ainsn->insn[0])) {
+ case MIPS_JR_FUNC:
+ case MIPS_JALR_FUNC:
+ case MIPS_BREAK_FUNC:
+ case MIPS_SYSCALL_FUNC:
+ DBPRINTF("arch_check_insn: SPECIAL opcode\n");
+ ret = -EFAULT;
break;
+ }
+ break;
}
return ret;
}
-int arch_prepare_kprobe (struct kprobe *p)
+int arch_prepare_kprobe(struct kprobe *p)
{
kprobe_opcode_t insns[KPROBES_TRAMP_LEN];
int ret = 0;
- if (!ret)
- {
+ if (!ret) {
kprobe_opcode_t insn[MAX_INSN_SIZE];
struct arch_specific_insn ainsn;
/* insn: must be on special executable page on i386. */
- p->ainsn.insn = get_insn_slot (NULL, 0);
+ p->ainsn.insn = get_insn_slot(NULL, 0);
if (!p->ainsn.insn)
return -ENOMEM;
- memcpy (insn, p->addr, MAX_INSN_SIZE * sizeof (kprobe_opcode_t));
+ memcpy(insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
ainsn.insn = insn;
- ret = arch_check_insn (&ainsn);
- if (!ret)
- {
+ ret = arch_check_insn(&ainsn);
+ if (!ret) {
p->opcode = *p->addr;
p->ainsn.boostable = 0;
- memcpy (insns, gen_insn_execbuf, sizeof (insns));
+ memcpy(insns, gen_insn_execbuf, sizeof(insns));
insns[KPROBES_TRAMP_INSN_IDX] = insn[0];
- insns[KPROBES_TRAMP_SS_BREAK_IDX] = BREAKPOINT_INSTRUCTION;
+ insns[KPROBES_TRAMP_SS_BREAK_IDX] =
+ BREAKPOINT_INSTRUCTION;
insns[KPROBES_TRAMP_RET_BREAK_IDX] = UNDEF_INSTRUCTION;
- DBPRINTF ("arch_prepare_kprobe: insn %lx", insn[0]);
- DBPRINTF ("arch_prepare_kprobe: to %p - %lx %lx %lx",
- p->ainsn.insn, insns[0], insns[1], insns[2]);
- memcpy (p->ainsn.insn, insns, sizeof(insns));
- }
- else
- {
+ DBPRINTF("arch_prepare_kprobe: insn %lx", insn[0]);
+ DBPRINTF("arch_prepare_kprobe: to %p - %lx %lx %lx",
+ p->ainsn.insn, insns[0],
+ insns[1], insns[2]);
+ memcpy(p->ainsn.insn, insns, sizeof(insns));
+ } else {
free_insn_slot(&kprobe_insn_pages, NULL, p->ainsn.insn);
}
}
return ret;
}
-int arch_prepare_uprobe (struct kprobe *p, struct task_struct *task, int atomic)
+int arch_prepare_uprobe(struct kprobe *p, struct task_struct *task, int atomic)
{
int ret = 0;
kprobe_opcode_t insns[UPROBES_TRAMP_LEN];
- if ((unsigned long) p->addr & 0x01)
- {
- DBPRINTF ("Attempt to register kprobe at an unaligned address");
+ if ((unsigned long) p->addr & 0x01) {
+ DBPRINTF("Attempt to register kprobe at an unaligned address");
ret = -EINVAL;
}
- if (!ret)
- {
+ if (!ret) {
kprobe_opcode_t insn[MAX_INSN_SIZE];
struct arch_specific_insn ainsn;
- if (!read_proc_vm_atomic (task, (unsigned long) p->addr, &insn, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
- panic ("failed to read memory %p!\n", p->addr);
+ if (!read_proc_vm_atomic(task, (unsigned long) p->addr,
+ &insn,
+ MAX_INSN_SIZE *
+ sizeof(kprobe_opcode_t)))
+ panic("failed to read memory %p!\n", p->addr);
ainsn.insn = insn;
- ret = arch_check_insn (&ainsn);
- if (!ret)
- {
+ ret = arch_check_insn(&ainsn);
+ if (!ret) {
p->opcode = insn[0];
p->ainsn.insn = get_insn_slot(task, atomic);
if (!p->ainsn.insn)
return -ENOMEM;
p->ainsn.boostable = 0;
- memcpy (insns, gen_insn_execbuf, sizeof (insns));
+ memcpy(insns, gen_insn_execbuf, sizeof(insns));
insns[UPROBES_TRAMP_INSN_IDX] = insn[0];
- insns[UPROBES_TRAMP_SS_BREAK_IDX] = BREAKPOINT_INSTRUCTION;
+ insns[UPROBES_TRAMP_SS_BREAK_IDX] =
+ BREAKPOINT_INSTRUCTION;
insns[UPROBES_TRAMP_RET_BREAK_IDX] = UNDEF_INSTRUCTION;
- DBPRINTF ("arch_prepare_uprobe: insn %lx", insn[0]);
- DBPRINTF ("arch_prepare_uprobe: to %p - %lx %lx %lx",
- p->ainsn.insn, insns[0], insns[1], insns[2]);
-
- if (!write_proc_vm_atomic (task, (unsigned long) p->ainsn.insn, insns, sizeof (insns)))
- {
- panic("failed to write memory %p!\n", p->ainsn.insn);
- DBPRINTF ("failed to write insn slot to process memory: insn %p, addr %p, probe %p!", insn, p->ainsn.insn, p->addr);
- /*printk ("failed to write insn slot to process memory: %p/%d insn %lx, addr %p, probe %p!\n",
- task, task->pid, insn, p->ainsn.insn, p->addr);*/
- free_insn_slot(&uprobe_insn_pages, task, p->ainsn.insn);
+ DBPRINTF("arch_prepare_uprobe: insn %lx", insn[0]);
+ DBPRINTF("arch_prepare_uprobe: to %p - %lx %lx %lx",
+ p->ainsn.insn, insns[0], insns[1], insns[2]);
+
+ if (!write_proc_vm_atomic(task,
+ (unsigned long) p->ainsn.insn,
+ insns, sizeof(insns))) {
+ panic("failed to write memory %p!\n",
+ p->ainsn.insn);
+ DBPRINTF("failed to write insn slot to "
+ "process memory: insn %p, addr %p, "
+ "probe %p!",
+ insn, p->ainsn.insn, p->addr);
+ /* printk("failed to write insn slot to process
+ * memory: %p/%d insn %lx, addr %p,
+ * probe %p!\n",task, task->pid, insn,
+ * p->ainsn.insn, p->addr);*/
+ free_insn_slot(&uprobe_insn_pages, task,
+ p->ainsn.insn);
return -EINVAL;
}
}
return ret;
}
-void prepare_singlestep (struct kprobe *p, struct pt_regs *regs)
+void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
{
- if(p->ss_addr)
- {
- regs->cp0_epc = (unsigned long) p->ss_addr;
+ if (p->ss_addr) {
+ regs->cp0_epc = (unsigned long)p->ss_addr;
p->ss_addr = NULL;
- }
- else
- regs->cp0_epc = (unsigned long) p->ainsn.insn;
+ } else
+ regs->cp0_epc = (unsigned long)p->ainsn.insn;
}
-void save_previous_kprobe (struct kprobe_ctlblk *kcb, struct kprobe *cur_p)
+void save_previous_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *cur_p)
{
- if (kcb->prev_kprobe.kp != NULL)
- {
- panic ("no space to save new probe[]: task = %d/%s, prev %d/%p, current %d/%p, new %d/%p,",
- current->pid, current->comm, kcb->prev_kprobe.kp->tgid, kcb->prev_kprobe.kp->addr,
- kprobe_running()->tgid, kprobe_running()->addr, cur_p->tgid, cur_p->addr);
+ if (kcb->prev_kprobe.kp != NULL) {
+ panic("no space to save new probe[]: task = %d/%s, prev %d/%p,"
+ " current %d/%p, new %d/%p,",
+ current->pid, current->comm, kcb->prev_kprobe.kp->tgid,
+ kcb->prev_kprobe.kp->addr, kprobe_running()->tgid,
+ kprobe_running()->addr, cur_p->tgid, cur_p->addr);
}
- kcb->prev_kprobe.kp = kprobe_running ();
+ kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
}
-void restore_previous_kprobe (struct kprobe_ctlblk *kcb)
+void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- __get_cpu_var (current_kprobe) = kcb->prev_kprobe.kp;
+ __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
kcb->kprobe_status = kcb->prev_kprobe.status;
kcb->prev_kprobe.kp = NULL;
kcb->prev_kprobe.status = 0;
}
-void set_current_kprobe (struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
+void set_current_kprobe(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
{
- __get_cpu_var (current_kprobe) = p;
- DBPRINTF ("set_current_kprobe[]: p=%p addr=%p\n", p, p->addr);
+ __get_cpu_var(current_kprobe) = p;
+ DBPRINTF("set_current_kprobe[]: p=%p addr=%p\n", p, p->addr);
}
-int kprobe_handler (struct pt_regs *regs)
+int kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p = 0;
int ret = 0, pid = 0, retprobe = 0, reenter = 0;
/* We're in an interrupt, but this is clear and BUG()-safe. */
addr = (kprobe_opcode_t *) regs->cp0_epc;
- DBPRINTF ("regs->regs[ 31 ] = 0x%lx\n", regs->regs[31]);
+ DBPRINTF("regs->regs[ 31 ] = 0x%lx\n", regs->regs[31]);
#ifdef SUPRESS_BUG_MESSAGES
- // oops_in_progress used to avoid BUG() messages that slow down kprobe_handler() execution
+ /* oops_in_progress used to avoid BUG() messages that
+ * slow down kprobe_handler() execution */
swap_oops_in_progress = oops_in_progress;
oops_in_progress = 1;
#endif
- preempt_disable ();
+ preempt_disable();
- kcb = get_kprobe_ctlblk ();
+ kcb = get_kprobe_ctlblk();
- if (user_mode (regs))
- {
- //DBPRINTF("exception[%lu] from user mode %s/%u addr %p (%lx).", nCount, current->comm, current->pid, addr, regs->uregs[14]);
+ if (user_mode(regs)) {
+ /* DBPRINTF("exception[%lu] from user mode %s/%u addr %p(%lx).",
+ * nCount, current->comm,
+ * current->pid, addr, regs->uregs[14]); */
pid = current->tgid;
}
/* Check we're not actually recursing */
- if (kprobe_running ())
- {
- DBPRINTF ("lock???");
+ if (kprobe_running()) {
+ DBPRINTF("lock???");
p = get_kprobe(addr, pid);
- if (p)
- {
- if(!pid && (addr == (kprobe_opcode_t *)kretprobe_trampoline)){
- save_previous_kprobe (kcb, p);
+ if (p) {
+ if (!pid && (addr ==
+ (kprobe_opcode_t *)kretprobe_trampoline)) {
+ save_previous_kprobe(kcb, p);
kcb->kprobe_status = KPROBE_REENTER;
reenter = 1;
- }
- else {
+ } else {
/* We have reentered the kprobe_handler(), since
- * another probe was hit while within the handler.
- * We here save the original kprobes variables and
- * just single step on the instruction of the new probe
- * without calling any user handlers.
+ * another probe was hit while within the
+ * handler. We here save the original kprobes
+ * variables and just single step on the
+ * instruction of the new probe without calling
+ * any user handlers.
*/
- if(!p->ainsn.boostable){
- save_previous_kprobe (kcb, p);
- set_current_kprobe (p, regs, kcb);
+ if (!p->ainsn.boostable) {
+ save_previous_kprobe(kcb, p);
+ set_current_kprobe(p, regs, kcb);
}
- kprobes_inc_nmissed_count (p);
- prepare_singlestep (p, regs);
- if(!p->ainsn.boostable)
+ kprobes_inc_nmissed_count(p);
+ prepare_singlestep(p, regs);
+ if (!p->ainsn.boostable)
kcb->kprobe_status = KPROBE_REENTER;
- preempt_enable_no_resched ();
+ preempt_enable_no_resched();
#ifdef SUPRESS_BUG_MESSAGES
oops_in_progress = swap_oops_in_progress;
#endif
return 1;
}
- }
- else
- {
- if(pid) { //we can reenter probe upon uretprobe exception
- DBPRINTF ("check for UNDEF_INSTRUCTION %p\n", addr);
- // UNDEF_INSTRUCTION from user space
- p = get_kprobe_by_insn_slot (addr-UPROBES_TRAMP_RET_BREAK_IDX, pid, current);
+ } else {
+ if (pid) {
+ /* we can reenter probe upon
+ * uretprobe exception */
+ DBPRINTF("check for UNDEF_INSTRUCTION %p\n",
+ addr);
+ /* UNDEF_INSTRUCTION from user space */
+ p = get_kprobe_by_insn_slot(
+ addr-UPROBES_TRAMP_RET_BREAK_IDX,
+ pid, current);
if (p) {
- save_previous_kprobe (kcb, p);
+ save_previous_kprobe(kcb, p);
kcb->kprobe_status = KPROBE_REENTER;
reenter = 1;
retprobe = 1;
- DBPRINTF ("uretprobe %p\n", addr);
+ DBPRINTF("uretprobe %p\n", addr);
}
}
- if(!p) {
- p = __get_cpu_var (current_kprobe);
- DBPRINTF ("kprobe_running !!! p = 0x%p p->break_handler = 0x%p", p, p->break_handler);
- /*if (p->break_handler && p->break_handler(p, regs)) {
- DBPRINTF("kprobe_running !!! goto ss");
- goto ss_probe;
- } */
- DBPRINTF ("unknown uprobe at %p cur at %p/%p\n", addr, p->addr, p->ainsn.insn);
- if(pid)
- ssaddr = p->ainsn.insn + UPROBES_TRAMP_SS_BREAK_IDX;
+ if (!p) {
+ p = __get_cpu_var(current_kprobe);
+ DBPRINTF("kprobe_running !!! p = 0x%p "
+ "p->break_handler = 0x%p", p,
+ p->break_handler);
+ /* if (p->break_handler &&
+ * p->break_handler(p, regs)) {
+ * DBPRINTF("kprobe_running !!! goto ss");
+ * goto ss_probe;
+ * } */
+ DBPRINTF("unknown uprobe at %p cur at %p/%p\n",
+ addr, p->addr, p->ainsn.insn);
+ if (pid)
+ ssaddr = p->ainsn.insn +
+ UPROBES_TRAMP_SS_BREAK_IDX;
else
- ssaddr = p->ainsn.insn + KPROBES_TRAMP_SS_BREAK_IDX;
- if (addr == ssaddr)
- {
- regs->cp0_epc = (unsigned long) (p->addr + 1);
- DBPRINTF ("finish step at %p cur at %p/%p, redirect to %lx\n", addr, p->addr, p->ainsn.insn, regs->cp0_epc);
-
- if (kcb->kprobe_status == KPROBE_REENTER) {
- restore_previous_kprobe (kcb);
- }
- else {
- reset_current_kprobe ();
+ ssaddr = p->ainsn.insn +
+ KPROBES_TRAMP_SS_BREAK_IDX;
+ if (addr == ssaddr) {
+ regs->cp0_epc =
+ (unsigned long)(p->addr + 1);
+ DBPRINTF("finish step at %p cur at "
+ "%p/%p, redirect to %lx\n",
+ addr, p->addr,
+ p->ainsn.insn, regs->cp0_epc);
+
+ if (kcb->kprobe_status ==
+ KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ } else {
+ reset_current_kprobe();
}
}
- DBPRINTF ("kprobe_running !!! goto no");
+ DBPRINTF("kprobe_running !!! goto no");
ret = 1;
- /* If it's not ours, can't be delete race, (we hold lock). */
- DBPRINTF ("no_kprobe");
+ /* If it's not ours, can't be delete race,
+ * (we hold lock). */
+ DBPRINTF("no_kprobe");
goto no_kprobe;
}
}
}
- //if(einsn != UNDEF_INSTRUCTION) {
- DBPRINTF ("get_kprobe %p-%d", addr, pid);
+ /* if(einsn != UNDEF_INSTRUCTION) { */
+ DBPRINTF("get_kprobe %p-%d", addr, pid);
if (!p)
p = get_kprobe(addr, pid);
- if (!p)
- {
- if(pid) {
- DBPRINTF ("search UNDEF_INSTRUCTION %p\n", addr);
- // UNDEF_INSTRUCTION from user space
- p = get_kprobe_by_insn_slot (addr-UPROBES_TRAMP_RET_BREAK_IDX, pid, current);
+ if (!p) {
+ if (pid) {
+ DBPRINTF("search UNDEF_INSTRUCTION %p\n", addr);
+ /* UNDEF_INSTRUCTION from user space */
+ p = get_kprobe_by_insn_slot(
+ addr-UPROBES_TRAMP_RET_BREAK_IDX, pid, current);
if (!p) {
/* Not one of ours: let kernel handle it */
- DBPRINTF ("no_kprobe");
- //printk("no_kprobe2 ret = %d\n", ret);
+ DBPRINTF("no_kprobe");
+ /* printk("no_kprobe2 ret = %d\n", ret); */
goto no_kprobe;
}
retprobe = 1;
- DBPRINTF ("uretprobe %p\n", addr);
- }
- else {
+ DBPRINTF("uretprobe %p\n", addr);
+ } else {
/* Not one of ours: let kernel handle it */
- DBPRINTF ("no_kprobe");
- //printk("no_kprobe2 ret = %d\n", ret);
+ DBPRINTF("no_kprobe");
+ /* printk(KERN_INFO "no_kprobe2 ret = %d\n", ret); */
goto no_kprobe;
}
}
- set_current_kprobe (p, regs, kcb);
- if(!reenter)
+ set_current_kprobe(p, regs, kcb);
+ if (!reenter)
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
- if (retprobe) //(einsn == UNDEF_INSTRUCTION)
- ret = trampoline_probe_handler (p, regs);
- else if (p->pre_handler)
- {
- ret = p->pre_handler (p, regs);
- if(!p->ainsn.boostable)
+ if (retprobe) /* (einsn == UNDEF_INSTRUCTION) */
+ ret = trampoline_probe_handler(p, regs);
+ else if (p->pre_handler) {
+ ret = p->pre_handler(p, regs);
+ if (!p->ainsn.boostable)
kcb->kprobe_status = KPROBE_HIT_SS;
- else if(p->pre_handler != trampoline_probe_handler) {
+ else if (p->pre_handler != trampoline_probe_handler) {
#ifdef SUPRESS_BUG_MESSAGES
preempt_disable();
#endif
- reset_current_kprobe ();
+ reset_current_kprobe();
#ifdef SUPRESS_BUG_MESSAGES
preempt_enable_no_resched();
#endif
}
}
- if (ret)
- {
- DBPRINTF ("p->pre_handler[] 1");
+ if (ret) {
+ DBPRINTF("p->pre_handler[] 1");
#ifdef SUPRESS_BUG_MESSAGES
oops_in_progress = swap_oops_in_progress;
#endif
/* handler has already set things up, so skip ss setup */
return 1;
}
- DBPRINTF ("p->pre_handler 0");
+ DBPRINTF("p->pre_handler 0");
no_kprobe:
- preempt_enable_no_resched ();
+ preempt_enable_no_resched();
#ifdef SUPRESS_BUG_MESSAGES
oops_in_progress = swap_oops_in_progress;
#endif
DBPRINTF("patch_suspended_task_ret_addr is not implemented");
}
-int setjmp_pre_handler (struct kprobe *p, struct pt_regs *regs)
+int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
- struct jprobe *jp = container_of (p, struct jprobe, kp);
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
kprobe_pre_entry_handler_t pre_entry;
entry_point_t entry;
- DBPRINTF ("pjp = 0x%p jp->entry = 0x%p", jp, jp->entry);
+ DBPRINTF("pjp = 0x%p jp->entry = 0x%p", jp, jp->entry);
entry = (entry_point_t) jp->entry;
pre_entry = (kprobe_pre_entry_handler_t) jp->pre_entry;
- //if(!entry)
- // DIE("entry NULL", regs)
- DBPRINTF ("entry = 0x%p jp->entry = 0x%p", entry, jp->entry);
-
- //call handler for all kernel probes and user space ones which belong to current tgid
- if (!p->tgid || (p->tgid == current->tgid))
- {
- if(!p->tgid && (p->addr == sched_addr) && sched_rp){
+ /* if(!entry) */
+ /* DIE("entry NULL", regs) */
+ DBPRINTF("entry = 0x%p jp->entry = 0x%p", entry, jp->entry);
+
+ /* call handler for all kernel probes and user space
+ * ones which belong to current tgid */
+ if (!p->tgid || (p->tgid == current->tgid)) {
+ if (!p->tgid && (p->addr == sched_addr) && sched_rp) {
struct task_struct *p, *g;
rcu_read_lock();
- //swapper task
- if(current != &init_task)
- patch_suspended_task_ret_addr(&init_task, sched_rp);
- // other tasks
- do_each_thread(g, p){
- if(p == current)
+ /* swapper task */
+ if (current != &init_task)
+ patch_suspended_task_ret_addr(&init_task,
+ sched_rp);
+ /* other tasks */
+ do_each_thread(g, p) {
+ if (p == current)
continue;
patch_suspended_task_ret_addr(p, sched_rp);
} while_each_thread(g, p);
rcu_read_unlock();
}
if (pre_entry)
- p->ss_addr = (void *)pre_entry (jp->priv_arg, regs);
- if (entry){
- entry (regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7], regs->regs[8], regs->regs[9]);
- }
- else {
+ p->ss_addr = (void *)pre_entry(jp->priv_arg, regs);
+ if (entry) {
+ entry(regs->regs[4], regs->regs[5], regs->regs[6],
+ regs->regs[7], regs->regs[8], regs->regs[9]);
+ } else {
if (p->tgid)
arch_ujprobe_return();
else
- dbi_jprobe_return ();
+ dbi_jprobe_return();
}
- }
- else if (p->tgid)
+ } else if (p->tgid)
arch_ujprobe_return();
- prepare_singlestep (p, regs);
+ prepare_singlestep(p, regs);
return 1;
}
-void dbi_jprobe_return (void)
+void dbi_jprobe_return(void)
{
preempt_enable_no_resched();
}
preempt_enable_no_resched();
}
-int longjmp_break_handler (struct kprobe *p, struct pt_regs *regs)
+int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
return 0;
}
-void arch_arm_kprobe (struct kprobe *p)
+void arch_arm_kprobe(struct kprobe *p)
{
*p->addr = BREAKPOINT_INSTRUCTION;
- flush_icache_range ((unsigned long) p->addr, (unsigned long) p->addr + sizeof (kprobe_opcode_t));
+ flush_icache_range((unsigned long) p->addr,
+ (unsigned long) p->addr + sizeof(kprobe_opcode_t));
}
-void arch_disarm_kprobe (struct kprobe *p)
+void arch_disarm_kprobe(struct kprobe *p)
{
*p->addr = p->opcode;
- flush_icache_range ((unsigned long) p->addr, (unsigned long) p->addr + sizeof (kprobe_opcode_t));
+ flush_icache_range((unsigned long) p->addr,
+ (unsigned long) p->addr + sizeof(kprobe_opcode_t));
}
-int trampoline_probe_handler (struct kprobe *p, struct pt_regs *regs)
+int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
struct hlist_node *node, *tmp;
unsigned long flags, orig_ret_address = 0;
- unsigned long trampoline_address = (unsigned long) &kretprobe_trampoline;
+ unsigned long trampoline_address =
+ (unsigned long) &kretprobe_trampoline;
struct kretprobe *crp = NULL;
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk ();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- DBPRINTF ("start");
+ DBPRINTF("start");
- if (p && p->tgid){
- // in case of user space retprobe trampoline is at the Nth instruction of US tramp
- trampoline_address = (unsigned long)(p->ainsn.insn + UPROBES_TRAMP_RET_BREAK_IDX);
+ if (p && p->tgid) {
+ /* in case of user space retprobe trampoline
+ * is at the Nth instruction of US tramp */
+ trampoline_address =
+ (unsigned long)(p->ainsn.insn +
+ UPROBES_TRAMP_RET_BREAK_IDX);
}
- INIT_HLIST_HEAD (&empty_rp);
- spin_lock_irqsave (&kretprobe_lock, flags);
- head = kretprobe_inst_table_head (current);
+ INIT_HLIST_HEAD(&empty_rp);
+ spin_lock_irqsave(&kretprobe_lock, flags);
+ head = kretprobe_inst_table_head(current);
/*
* It is possible to have multiple instances associated with a given
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
- hlist_for_each_entry_safe (ri, node, tmp, head, hlist)
+ hlist_for_each_entry_safe(ri, node, tmp, head, hlist)
{
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
- if (ri->rp && ri->rp->handler){
- ri->rp->handler (ri, regs, ri->rp->priv_arg);
-
- }
+ if (ri->rp && ri->rp->handler)
+ ri->rp->handler(ri, regs, ri->rp->priv_arg);
orig_ret_address = (unsigned long) ri->ret_addr;
- recycle_rp_inst (ri);
+ recycle_rp_inst(ri);
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
*/
break;
}
- kretprobe_assert (ri, orig_ret_address, trampoline_address);
- //BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
- if (trampoline_address != (unsigned long) &kretprobe_trampoline){
- if (ri->rp) BUG_ON (ri->rp->kp.tgid == 0);
- }
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+ /* BUG_ON(!orig_ret_address ||
+ * (orig_ret_address == trampoline_address)); */
+ if (trampoline_address != (unsigned long) &kretprobe_trampoline)
+ if (ri->rp)
+ BUG_ON(ri->rp->kp.tgid == 0);
+
if (ri->rp && ri->rp->kp.tgid)
- BUG_ON (trampoline_address == (unsigned long) &kretprobe_trampoline);
+ BUG_ON(trampoline_address ==
+ (unsigned long) &kretprobe_trampoline);
regs->regs[31] = orig_ret_address;
- DBPRINTF ("regs->cp0_epc = 0x%lx", regs->cp0_epc);
+ DBPRINTF("regs->cp0_epc = 0x%lx", regs->cp0_epc);
if (trampoline_address != (unsigned long) &kretprobe_trampoline)
regs->cp0_epc = orig_ret_address;
else
regs->cp0_epc = regs->cp0_epc + 4;
- DBPRINTF ("regs->cp0_epc = 0x%lx", regs->cp0_epc);
- DBPRINTF ("regs->cp0_status = 0x%lx", regs->cp0_status);
+ DBPRINTF("regs->cp0_epc = 0x%lx", regs->cp0_epc);
+ DBPRINTF("regs->cp0_status = 0x%lx", regs->cp0_status);
- if(p){ // ARM, MIPS, X86 user space
+ if (p) { /* ARM, MIPS, X86 user space */
if (kcb->kprobe_status == KPROBE_REENTER)
- restore_previous_kprobe (kcb);
+ restore_previous_kprobe(kcb);
else
- reset_current_kprobe ();
+ reset_current_kprobe();
}
- spin_unlock_irqrestore (&kretprobe_lock, flags);
- hlist_for_each_entry_safe (ri, node, tmp, &empty_rp, hlist)
+ spin_unlock_irqrestore(&kretprobe_lock, flags);
+ hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist)
{
- hlist_del (&ri->hlist);
- kfree (ri);
+ hlist_del(&ri->hlist);
+ kfree(ri);
}
- preempt_enable_no_resched ();
+ preempt_enable_no_resched();
/*
* By returning a non-zero value, we are telling
* kprobe_handler() that we don't want the post_handler
struct kretprobe_instance *ri;
- DBPRINTF ("start\n");
- //TODO: test - remove retprobe after func entry but before its exit
- if ((ri = get_free_rp_inst (rp)) != NULL)
- {
+ DBPRINTF("start\n");
+ /* TODO: test - remove retprobe after func entry but before its exit */
+ ri = get_free_rp_inst(rp);
+ if (ri != NULL) {
ri->rp = rp;
ri->task = current;
ri->ret_addr = (kprobe_opcode_t *) regs->regs[31];
if (rp->kp.tgid)
- regs->regs[31] = (unsigned long) (rp->kp.ainsn.insn + UPROBES_TRAMP_RET_BREAK_IDX);
+ regs->regs[31] =
+ (unsigned long)(rp->kp.ainsn.insn +
+ UPROBES_TRAMP_RET_BREAK_IDX);
else /* Replace the return addr with trampoline addr */
regs->regs[31] = (unsigned long) &kretprobe_trampoline;
- add_rp_inst (ri);
- }
- else {
- DBPRINTF ("WARNING: missed retprobe %p\n", rp->kp.addr);
+ add_rp_inst(ri);
+ } else {
+ DBPRINTF("WARNING: missed retprobe %p\n", rp->kp.addr);
rp->nmissed++;
}
}
DECLARE_MOD_CB_DEP(flush_icache_range, \
void, unsigned long __user start, unsigned long __user end);
DECLARE_MOD_CB_DEP(flush_icache_page, \
- void, struct vm_area_struct * vma, struct page * page);
+ void, struct vm_area_struct *vma, struct page *page);
DECLARE_MOD_CB_DEP(flush_cache_page, \
- void, struct vm_area_struct * vma, unsigned long page);
+ void, struct vm_area_struct *vma, unsigned long page);
int arch_init_module_deps()
{
}
-int __init arch_init_kprobes (void)
+int __init arch_init_kprobes(void)
{
unsigned int do_bp_handler;
unsigned int kprobe_handler_addr;
int ret;
- if (arch_init_module_dependencies())
- {
- DBPRINTF ("Unable to init module dependencies\n");
+ if (arch_init_module_dependencies()) {
+ DBPRINTF("Unable to init module dependencies\n");
return -1;
}
do_bp_handler = (unsigned int)swap_ksyms("do_bp");
kprobe_handler_addr = (unsigned int) &kprobe_handler;
- insns_num = sizeof (arr_traps_template) / sizeof (arr_traps_template[0]);
- code_size = insns_num * sizeof (unsigned int);
- DBPRINTF ("insns_num = %d\n", insns_num);
- // Save original code
- arr_traps_original = kmalloc (code_size, GFP_KERNEL);
- if (!arr_traps_original)
- {
- DBPRINTF ("Unable to allocate space for original code of <do_bp>!\n");
+ insns_num = sizeof(arr_traps_template) / sizeof(arr_traps_template[0]);
+ code_size = insns_num * sizeof(unsigned int);
+ DBPRINTF("insns_num = %d\n", insns_num);
+ /* Save original code */
+ arr_traps_original = kmalloc(code_size, GFP_KERNEL);
+ if (!arr_traps_original) {
+ DBPRINTF("Unable to allocate space for "
+ "original code of <do_bp>!\n");
return -1;
}
- memcpy (arr_traps_original, (void *) do_bp_handler, code_size);
+ memcpy(arr_traps_original, (void *) do_bp_handler, code_size);
- reg_hi = HIWORD (kprobe_handler_addr);
- reg_lo = LOWORD (kprobe_handler_addr);
+ reg_hi = HIWORD(kprobe_handler_addr);
+ reg_lo = LOWORD(kprobe_handler_addr);
if (reg_lo >= 0x8000)
reg_hi += 0x0001;
arr_traps_template[REG_HI_INDEX] |= reg_hi;
arr_traps_template[REG_LO_INDEX] |= reg_lo;
- // Insert new code
- memcpy ((void *) do_bp_handler, arr_traps_template, code_size);
- flush_icache_range (do_bp_handler, do_bp_handler + code_size);
- if((ret = dbi_register_kprobe (&trampoline_p)) != 0){
- //dbi_unregister_jprobe(&do_exit_p, 0);
+ /* Insert new code */
+ memcpy((void *) do_bp_handler, arr_traps_template, code_size);
+ flush_icache_range(do_bp_handler, do_bp_handler + code_size);
+ ret = dbi_register_kprobe(&trampoline_p);
+ if (ret != 0) {
+ /* dbi_unregister_jprobe(&do_exit_p, 0); */
return ret;
}
}
-void __exit dbi_arch_exit_kprobes (void)
+void __exit dbi_arch_exit_kprobes(void)
{
unsigned int do_bp_handler;
unsigned int insns_num = 0;
unsigned int code_size = 0;
- // Get instruction address
+ /* Get instruction address */
do_bp_handler = (unsigned int)swap_ksyms("do_undefinstr");
- //dbi_unregister_jprobe(&do_exit_p, 0);
+ /* dbi_unregister_jprobe(&do_exit_p, 0); */
- // Replace back the original code
+ /* Replace back the original code */
- insns_num = sizeof (arr_traps_template) / sizeof (arr_traps_template[0]);
- code_size = insns_num * sizeof (unsigned int);
- memcpy ((void *) do_bp_handler, arr_traps_original, code_size);
- flush_icache_range (do_bp_handler, do_bp_handler + code_size);
- kfree (arr_traps_original);
+ insns_num = sizeof(arr_traps_template) / sizeof(arr_traps_template[0]);
+ code_size = insns_num * sizeof(unsigned int);
+ memcpy((void *) do_bp_handler, arr_traps_original, code_size);
+ flush_icache_range(do_bp_handler, do_bp_handler + code_size);
+ kfree(arr_traps_original);
arr_traps_original = NULL;
}
-//EXPORT_SYMBOL_GPL (dbi_arch_exit_kprobes);
*
* Copyright (C) Samsung Electronics, 2006-2010
*
- * 2006-2007 Ekaterina Gorelkina <e.gorelkina@samsung.com>: initial implementation for ARM/MIPS
+ * 2006-2007 Ekaterina Gorelkina <e.gorelkina@samsung.com>:
+ * initial implementation for ARM/MIPS
* 2008-2009 Alexey Gerenkov <a.gerenkov@samsung.com> User-Space
- * Probes initial implementation; Support x86/ARM/MIPS for both user-space and kernel space.
- * 2010 Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for separating core and arch parts
+ * Probes initial implementation; Support x86/ARM/MIPS for both
+ * user-space and kernel space.
+ * 2010 Ekaterina Gorelkina <e.gorelkina@samsung.com>:
+ * redesign module for separating core and arch parts
*
*/
#define MIPS_INSN_OPCODE_MASK 0xFC000000
#define MIPS_INSN_RS_MASK 0x03E00000
#define MIPS_INSN_RT_MASK 0x001F0000
-//#define MIPS_INSN_UN_MASK 0x0000FFC0
+/* #define MIPS_INSN_UN_MASK 0x0000FFC0 */
#define MIPS_INSN_FUNC_MASK 0x0000003F
#define MIPS_INSN_OPCODE(insn) (insn & MIPS_INSN_OPCODE_MASK)
#define MIPS_INSN_RS(insn) (insn & MIPS_INSN_RS_MASK)
#define MIPS_INSN_RT(insn) (insn & MIPS_INSN_RT_MASK)
#define MIPS_INSN_FUNC(insn) (insn & MIPS_INSN_FUNC_MASK)
-// opcodes 31..26
+/* opcodes 31..26 */
#define MIPS_BEQ_OPCODE 0x10000000
#define MIPS_BNE_OPCODE 0x14000000
#define MIPS_BLEZ_OPCODE 0x18000000
#define MIPS_J_OPCODE 0x08000000
#define MIPS_JAL_OPCODE 0x0C000000
#define MIPS_JALX_OPCODE 0x74000000
-// rs 25..21
+/* rs 25..21 */
#define MIPS_BC_RS 0x01000000
-// rt 20..16
+/* rt 20..16 */
#define MIPS_BLTZ_RT 0x00000000
#define MIPS_BGEZ_RT 0x00010000
#define MIPS_BLTZL_RT 0x00020000
#define MIPS_BGEZAL_RT 0x00110000
#define MIPS_BLTZALL_RT 0x00120000
#define MIPS_BGEZALL_RT 0x00130000
-// unnamed 15..6
-// function 5..0
+/* unnamed 15..6 */
+/* function 5..0 */
#define MIPS_JR_FUNC 0x00000008
#define MIPS_JALR_FUNC 0x00000009
#define MIPS_BREAK_FUNC 0x0000000D
int boostable;
};
-typedef kprobe_opcode_t (*entry_point_t) (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
+typedef kprobe_opcode_t (*entry_point_t) (unsigned long, unsigned long,
+ unsigned long, unsigned long,
+ unsigned long, unsigned long);
-void gen_insn_execbuf_holder (void);
+void gen_insn_execbuf_holder(void);
void patch_suspended_task_ret_addr(struct task_struct *p, struct kretprobe *rp);
int arch_init_module_deps(void);
#define SUPRESS_BUG_MESSAGES /**< Debug-off definition. */
-static int (*swap_fixup_exception)(struct pt_regs * regs);
+static int (*swap_fixup_exception)(struct pt_regs *regs);
static void *(*swap_text_poke)(void *addr, const void *opcode, size_t len);
-static void (*swap_show_registers)(struct pt_regs * regs);
+static void (*swap_show_registers)(struct pt_regs *regs);
/** Stack address. */
* - When the probed function returns, this probe
* causes the handlers to fire
*/
-void swap_kretprobe_trampoline(void);
__asm(
- ".global swap_kretprobe_trampoline \n"
- "swap_kretprobe_trampoline: \n"
- "pushf \n"
+ ".global swap_kretprobe_trampoline\n"
+ "swap_kretprobe_trampoline:\n"
+ "pushf\n"
SWAP_SAVE_REGS_STRING
- "movl %esp, %eax \n"
- "call trampoline_probe_handler_x86 \n"
+ "movl %esp, %eax\n"
+ "call trampoline_probe_handler_x86\n"
/* move eflags to cs */
- "movl 56(%esp), %edx \n"
- "movl %edx, 52(%esp) \n"
+ "movl 56(%esp), %edx\n"
+ "movl %edx, 52(%esp)\n"
/* replace saved flags with true return address. */
- "movl %eax, 56(%esp) \n"
+ "movl %eax, 56(%esp)\n"
SWAP_RESTORE_REGS_STRING
- "popf \n"
- "ret \n"
+ "popf\n"
+ "ret\n"
);
/* insert a jmp code */
-static __always_inline void set_jmp_op (void *from, void *to)
+static __always_inline void set_jmp_op(void *from, void *to)
{
- struct __arch_jmp_op
- {
+ struct __arch_jmp_op {
char op;
long raddr;
- } __attribute__ ((packed)) * jop;
+ } __packed * jop;
jop = (struct __arch_jmp_op *) from;
jop->raddr = (long) (to) - ((long) (from) + 5);
jop->op = RELATIVEJUMP_INSTRUCTION;
*/
int swap_can_boost(kprobe_opcode_t *opcodes)
{
-#define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf) \
+#define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) \
(((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
(b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
(b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
* Groups, and some special opcodes can not be boost.
*/
static const unsigned long twobyte_is_boostable[256 / 32] = {
- /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
- /* ------------------------------- */
- W (0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
- W (0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), /* 10 */
- W (0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
- W (0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), /* 30 */
- W (0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
- W (0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), /* 50 */
- W (0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
- W (0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1), /* 70 */
- W (0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
- W (0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), /* 90 */
- W (0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
- W (0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1), /* b0 */
- W (0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
- W (0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1), /* d0 */
- W (0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
- W (0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */
- /* ------------------------------- */
- /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+ W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) |
+ W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
+ W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) |
+ W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
+ W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
+ W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
+ W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) |
+ W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1),
+ W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) |
+ W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) |
+ W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1),
+ W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) |
+ W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1),
+ W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) |
+ W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0)
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+
};
#undef W
kprobe_opcode_t opcode;
opcode = *(opcodes++);
/* 2nd-byte opcode */
- if (opcode == 0x0f)
- {
+ if (opcode == 0x0f) {
if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
return 0;
- return test_bit (*opcodes, twobyte_is_boostable);
+ return test_bit(*opcodes, twobyte_is_boostable);
}
- switch (opcode & 0xf0)
- {
- case 0x60:
- if (0x63 < opcode && opcode < 0x67)
- goto retry; /* prefixes */
- /* can't boost Address-size override and bound */
- return (opcode != 0x62 && opcode != 0x67);
- case 0x70:
- return 0; /* can't boost conditional jump */
- case 0xc0:
- /* can't boost software-interruptions */
- return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
- case 0xd0:
- /* can boost AA* and XLAT */
- return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
- case 0xe0:
- /* can boost in/out and absolute jmps */
- return ((opcode & 0x04) || opcode == 0xea);
- case 0xf0:
- if ((opcode & 0x0c) == 0 && opcode != 0xf1)
- goto retry; /* lock/rep(ne) prefix */
- /* clear and set flags can be boost */
- return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
- default:
- if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
- goto retry; /* prefixes */
- /* can't boost CS override and call */
- return (opcode != 0x2e && opcode != 0x9a);
+ switch (opcode & 0xf0) {
+ case 0x60:
+ if (0x63 < opcode && opcode < 0x67)
+ goto retry; /* prefixes */
+ /* can't boost Address-size override and bound */
+ return (opcode != 0x62 && opcode != 0x67);
+ case 0x70:
+ return 0; /* can't boost conditional jump */
+ case 0xc0:
+ /* can't boost software-interruptions */
+ return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
+ case 0xd0:
+ /* can boost AA* and XLAT */
+ return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
+ case 0xe0:
+ /* can boost in/out and absolute jmps */
+ return ((opcode & 0x04) || opcode == 0xea);
+ case 0xf0:
+ if ((opcode & 0x0c) == 0 && opcode != 0xf1)
+ goto retry; /* lock/rep(ne) prefix */
+ /* clear and set flags can be boost */
+ return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
+ default:
+ if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
+ goto retry; /* prefixes */
+ /* can't boost CS override and call */
+ return (opcode != 0x2e && opcode != 0x9a);
}
}
EXPORT_SYMBOL_GPL(swap_can_boost);
/*
* returns non-zero if opcode modifies the interrupt flag.
*/
-static int is_IF_modifier (kprobe_opcode_t opcode)
+static int is_IF_modifier(kprobe_opcode_t opcode)
{
- switch (opcode)
- {
- case 0xfa: /* cli */
- case 0xfb: /* sti */
- case 0xcf: /* iret/iretd */
- case 0x9d: /* popf/popfd */
- return 1;
+ switch (opcode) {
+ case 0xfa: /* cli */
+ case 0xfb: /* sti */
+ case 0xcf: /* iret/iretd */
+ case 0x9d: /* popf/popfd */
+ return 1;
}
return 0;
}
* @param regs Pointer to CPU registers data.
* @return Void.
*/
-void prepare_singlestep (struct kprobe *p, struct pt_regs *regs)
+void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
{
int cpu = smp_processor_id();
if (p->ss_addr[cpu]) {
regs->EREG(ip) = (unsigned long)p->ss_addr[cpu];
p->ss_addr[cpu] = NULL;
- }
- else
- {
- regs->EREG (flags) |= TF_MASK;
- regs->EREG (flags) &= ~IF_MASK;
- /*single step inline if the instruction is an int3 */
- if (p->opcode == BREAKPOINT_INSTRUCTION){
- regs->EREG (ip) = (unsigned long) p->addr;
- //printk("break_insn!!!\n");
- }
- else
- regs->EREG (ip) = (unsigned long) p->ainsn.insn;
+ } else {
+ regs->EREG(flags) |= TF_MASK;
+ regs->EREG(flags) &= ~IF_MASK;
+ /* single step inline if the instruction is an int3 */
+ if (p->opcode == BREAKPOINT_INSTRUCTION) {
+ regs->EREG(ip) = (unsigned long) p->addr;
+ /* printk(KERN_INFO "break_insn!!!\n"); */
+ } else
+ regs->EREG(ip) = (unsigned long) p->ainsn.insn;
}
}
EXPORT_SYMBOL_GPL(prepare_singlestep);
* @param p_run Pointer to kprobe.
* @return Void.
*/
-void save_previous_kprobe (struct kprobe_ctlblk *kcb, struct kprobe *cur_p)
+void save_previous_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *cur_p)
{
- if (kcb->prev_kprobe.kp != NULL)
- {
- panic("no space to save new probe[]: task = %d/%s, prev %p, current %p, new %p,",
- current->pid, current->comm, kcb->prev_kprobe.kp->addr,
- swap_kprobe_running()->addr, cur_p->addr);
+ if (kcb->prev_kprobe.kp != NULL) {
+ panic("no space to save new probe[]: "
+ "task = %d/%s, prev %p, current %p, new %p,",
+ current->pid, current->comm, kcb->prev_kprobe.kp->addr,
+ swap_kprobe_running()->addr, cur_p->addr);
}
* @param kcb Pointer to kprobe_ctlblk which contains previous kprobe.
* @return Void.
*/
-void restore_previous_kprobe (struct kprobe_ctlblk *kcb)
+void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__get_cpu_var(swap_current_kprobe) = kcb->prev_kprobe.kp;
kcb->kprobe_status = kcb->prev_kprobe.status;
* @param kcb Pointer to kprobe_ctlblk.
* @return Void.
*/
-void set_current_kprobe (struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
+void set_current_kprobe(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
{
__get_cpu_var(swap_current_kprobe) = p;
- DBPRINTF ("set_current_kprobe[]: p=%p addr=%p\n", p, p->addr);
- kcb->kprobe_saved_eflags = kcb->kprobe_old_eflags = (regs->EREG (flags) & (TF_MASK | IF_MASK));
- if (is_IF_modifier (p->opcode))
+ DBPRINTF("set_current_kprobe[]: p=%p addr=%p\n", p, p->addr);
+ kcb->kprobe_saved_eflags = kcb->kprobe_old_eflags =
+ (regs->EREG(flags) & (TF_MASK | IF_MASK));
+ if (is_IF_modifier(p->opcode))
kcb->kprobe_saved_eflags &= ~IF_MASK;
}
return 1;
}
-#endif // !CONFIG_PREEMPT
+#endif /* !CONFIG_PREEMPT */
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
kprobe_opcode_t *addr = NULL;
struct kprobe_ctlblk *kcb;
- addr = (kprobe_opcode_t *) (regs->EREG (ip) - sizeof (kprobe_opcode_t));
+ addr = (kprobe_opcode_t *) (regs->EREG(ip) - sizeof(kprobe_opcode_t));
- preempt_disable ();
+ preempt_disable();
kcb = swap_get_kprobe_ctlblk();
p = swap_get_kprobe(addr);
/* Check we're not actually recursing */
if (swap_kprobe_running()) {
if (p) {
- if (kcb->kprobe_status == KPROBE_HIT_SS && *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
+ if (kcb->kprobe_status == KPROBE_HIT_SS &&
+ *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
regs->EREG(flags) &= ~TF_MASK;
regs->EREG(flags) |= kcb->kprobe_saved_eflags;
goto no_kprobe;
* just single step on the instruction of the new probe
* without calling any user handlers.
*/
- save_previous_kprobe (kcb, p);
- set_current_kprobe (p, regs, kcb);
+ save_previous_kprobe(kcb, p);
+ set_current_kprobe(p, regs, kcb);
swap_kprobes_inc_nmissed_count(p);
- prepare_singlestep (p, regs);
+ prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
return 1;
if (!p) {
/* Not one of ours: let kernel handle it */
- DBPRINTF ("no_kprobe");
+ DBPRINTF("no_kprobe");
goto no_kprobe;
}
}
- set_current_kprobe (p, regs, kcb);
+ set_current_kprobe(p, regs, kcb);
- if(!reenter)
+ if (!reenter)
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (p->pre_handler) {
*/
int swap_setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
- struct jprobe *jp = container_of (p, struct jprobe, kp);
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
kprobe_pre_entry_handler_t pre_entry;
entry_point_t entry;
* tailcall optimization. So, to be absolutely safe
* we also save and restore enough stack bytes to cover
* the argument area. */
- memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE (addr));
+ memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
+ MIN_STACK_SIZE(addr));
regs->EREG(flags) &= ~IF_MASK;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
trace_hardirqs_off();
{
struct kprobe_ctlblk *kcb = swap_get_kprobe_ctlblk();
- asm volatile(" xchgl %%ebx,%%esp \n"
- " int3 \n"
- " .globl swap_jprobe_return_end \n"
- " swap_jprobe_return_end: \n"
- " nop \n"::"b" (kcb->jprobe_saved_esp):"memory");
+ asm volatile(" xchgl %%ebx,%%esp\n"
+ " int3\n"
+ " .globl swap_jprobe_return_end\n"
+ " swap_jprobe_return_end:\n"
+ " nop\n"
+ : : "b" (kcb->jprobe_saved_esp) : "memory");
}
EXPORT_SYMBOL_GPL(swap_jprobe_return);
*
* This function also checks instruction size for preparing direct execution.
*/
-static void resume_execution (struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
+static void resume_execution(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
{
unsigned long *tos;
unsigned long copy_eip = (unsigned long) p->ainsn.insn;
unsigned long orig_eip = (unsigned long) p->addr;
kprobe_opcode_t insns[2];
- regs->EREG (flags) &= ~TF_MASK;
+ regs->EREG(flags) &= ~TF_MASK;
tos = stack_addr(regs);
insns[0] = p->ainsn.insn[0];
insns[1] = p->ainsn.insn[1];
- switch (insns[0])
- {
- case 0x9c: /* pushfl */
- *tos &= ~(TF_MASK | IF_MASK);
- *tos |= kcb->kprobe_old_eflags;
- break;
- case 0xc2: /* iret/ret/lret */
- case 0xc3:
- case 0xca:
- case 0xcb:
- case 0xcf:
- case 0xea: /* jmp absolute -- eip is correct */
- /* eip is already adjusted, no more changes required */
- p->ainsn.boostable = 1;
- goto no_change;
- case 0xe8: /* call relative - Fix return addr */
- *tos = orig_eip + (*tos - copy_eip);
- break;
- case 0x9a: /* call absolute -- same as call absolute, indirect */
+ switch (insns[0]) {
+ case 0x9c: /* pushfl */
+ *tos &= ~(TF_MASK | IF_MASK);
+ *tos |= kcb->kprobe_old_eflags;
+ break;
+ case 0xc2: /* iret/ret/lret */
+ case 0xc3:
+ case 0xca:
+ case 0xcb:
+ case 0xcf:
+ case 0xea: /* jmp absolute -- eip is correct */
+ /* eip is already adjusted, no more changes required */
+ p->ainsn.boostable = 1;
+ goto no_change;
+ case 0xe8: /* call relative - Fix return addr */
+ *tos = orig_eip + (*tos - copy_eip);
+ break;
+ case 0x9a: /* call absolute -- same as call absolute, indirect */
+ *tos = orig_eip + (*tos - copy_eip);
+ goto no_change;
+ case 0xff:
+ if ((insns[1] & 0x30) == 0x10) {
+ /*
+ * call absolute, indirect
+ * Fix return addr; eip is correct.
+ * But this is not boostable
+ */
*tos = orig_eip + (*tos - copy_eip);
goto no_change;
- case 0xff:
- if ((insns[1] & 0x30) == 0x10)
- {
- /*
- * call absolute, indirect
- * Fix return addr; eip is correct.
- * But this is not boostable
- */
- *tos = orig_eip + (*tos - copy_eip);
- goto no_change;
- }
- else if (((insns[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
- ((insns[1] & 0x31) == 0x21))
- { /* jmp far, absolute indirect */
- /* eip is correct. And this is boostable */
- p->ainsn.boostable = 1;
- goto no_change;
- }
- default:
- break;
+ } else if (((insns[1] & 0x31) == 0x20) || /* jmp near, absolute
+ * indirect */
+ ((insns[1] & 0x31) == 0x21)) {
+ /* jmp far, absolute indirect */
+ /* eip is correct. And this is boostable */
+ p->ainsn.boostable = 1;
+ goto no_change;
+ }
+ default:
+ break;
}
- if (p->ainsn.boostable == 0)
- {
- if ((regs->EREG (ip) > copy_eip) && (regs->EREG (ip) - copy_eip) + 5 < MAX_INSN_SIZE)
- {
+ if (p->ainsn.boostable == 0) {
+ if ((regs->EREG(ip) > copy_eip) &&
+ (regs->EREG(ip) - copy_eip) + 5 < MAX_INSN_SIZE) {
/*
* These instructions can be executed directly if it
* jumps back to correct address.
*/
- set_jmp_op((void *)regs->EREG(ip), (void *)orig_eip + (regs->EREG(ip) - copy_eip));
+ set_jmp_op((void *)regs->EREG(ip),
+ (void *)orig_eip +
+ (regs->EREG(ip) - copy_eip));
p->ainsn.boostable = 1;
- }
- else
- {
+ } else {
p->ainsn.boostable = -1;
}
}
- regs->EREG (ip) = orig_eip + (regs->EREG (ip) - copy_eip);
+ regs->EREG(ip) = orig_eip + (regs->EREG(ip) - copy_eip);
no_change:
return;
* Interrupts are disabled on entry as trap1 is an interrupt gate and they
* remain disabled thoroughout this function.
*/
-static int post_kprobe_handler (struct pt_regs *regs)
+static int post_kprobe_handler(struct pt_regs *regs)
{
struct kprobe *cur = swap_kprobe_running();
struct kprobe_ctlblk *kcb = swap_get_kprobe_ctlblk();
if (!cur)
return 0;
- if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler)
- {
+ if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
- cur->post_handler (cur, regs, 0);
+ cur->post_handler(cur, regs, 0);
}
- resume_execution (cur, regs, kcb);
- regs->EREG (flags) |= kcb->kprobe_saved_eflags;
+ resume_execution(cur, regs, kcb);
+ regs->EREG(flags) |= kcb->kprobe_saved_eflags;
#ifndef CONFIG_X86
- trace_hardirqs_fixup_flags (regs->EREG (flags));
-#endif // CONFIG_X86
- /*Restore back the original saved kprobes variables and continue. */
- if (kcb->kprobe_status == KPROBE_REENTER)
- {
- restore_previous_kprobe (kcb);
+ trace_hardirqs_fixup_flags(regs->EREG(flags));
+#endif /* CONFIG_X86 */
+ /* Restore back the original saved kprobes variables and continue. */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
goto out;
}
swap_reset_current_kprobe();
* will have TF set, in which case, continue the remaining processing
* of do_debug, as if this is not a probe hit.
*/
- if (regs->EREG (flags) & TF_MASK)
+ if (regs->EREG(flags) & TF_MASK)
return 0;
return 1;
struct kprobe *cur = swap_kprobe_running();
struct kprobe_ctlblk *kcb = swap_get_kprobe_ctlblk();
- switch (kcb->kprobe_status)
- {
- case KPROBE_HIT_SS:
- case KPROBE_REENTER:
- /*
- * We are here because the instruction being single
- * stepped caused a page fault. We reset the current
- * kprobe and the eip points back to the probe address
- * and allow the page fault handler to continue as a
- * normal page fault.
- */
- regs->EREG (ip) = (unsigned long) cur->addr;
- regs->EREG (flags) |= kcb->kprobe_old_eflags;
- if (kcb->kprobe_status == KPROBE_REENTER)
- restore_previous_kprobe (kcb);
- else
- swap_reset_current_kprobe();
- swap_preempt_enable_no_resched();
- break;
- case KPROBE_HIT_ACTIVE:
- case KPROBE_HIT_SSDONE:
- /*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accouting
- * these specific fault cases.
- */
- swap_kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler (cur, regs, trapnr))
- return 1;
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single
+ * stepped caused a page fault. We reset the current
+ * kprobe and the eip points back to the probe address
+ * and allow the page fault handler to continue as a
+ * normal page fault.
+ */
+ regs->EREG(ip) = (unsigned long) cur->addr;
+ regs->EREG(flags) |= kcb->kprobe_old_eflags;
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ restore_previous_kprobe(kcb);
+ else
+ swap_reset_current_kprobe();
+ swap_preempt_enable_no_resched();
+ break;
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * We increment the nmissed count for accounting,
+ * we can also use npre/npostfault count for accouting
+ * these specific fault cases.
+ */
+ swap_kprobes_inc_nmissed_count(cur);
+
+ /*
+ * We come here because instructions in the pre/post
+ * handler caused the page_fault, this could happen
+ * if handler tries to access user space by
+ * copy_from_user(), get_user() etc. Let the
+ * user-specified handler try to fix it first.
+ */
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+ return 1;
- /*
- * In case the user-specified fault handler returned
- * zero, try to fix up.
- */
- if (swap_fixup_exception(regs))
- return 1;
+ /*
+ * In case the user-specified fault handler returned
+ * zero, try to fix up.
+ */
+ if (swap_fixup_exception(regs))
+ return 1;
- /*
- * fixup_exception() could not handle it,
- * Let do_page_fault() fix it.
- */
- break;
- default:
- break;
+ /*
+ * fixup_exception() could not handle it,
+ * Let do_page_fault() fix it.
+ */
+ break;
+ default:
+ break;
}
return 0;
}
struct die_args *args = (struct die_args *) data;
int ret = NOTIFY_DONE;
- DBPRINTF ("val = %ld, data = 0x%X", val, (unsigned int) data);
+ DBPRINTF("val = %ld, data = 0x%X", val, (unsigned int) data);
if (args->regs == NULL || user_mode_vm(args->regs))
return ret;
- DBPRINTF ("switch (val) %lu %d %d", val, DIE_INT3, DIE_TRAP);
- switch (val)
- {
+ DBPRINTF("switch (val) %lu %d %d", val, DIE_INT3, DIE_TRAP);
+ switch (val) {
#ifdef CONFIG_KPROBES
- case DIE_INT3:
+ case DIE_INT3:
#else
- case DIE_TRAP:
+ case DIE_TRAP:
#endif
- DBPRINTF ("before kprobe_handler ret=%d %p", ret, args->regs);
- if (kprobe_handler (args->regs))
- ret = NOTIFY_STOP;
- DBPRINTF ("after kprobe_handler ret=%d %p", ret, args->regs);
- break;
- case DIE_DEBUG:
- if (post_kprobe_handler (args->regs))
- ret = NOTIFY_STOP;
- break;
- case DIE_GPF:
- /* swap_kprobe_running() needs smp_processor_id() */
- preempt_disable ();
- if (swap_kprobe_running() &&
- kprobe_fault_handler(args->regs, args->trapnr))
- ret = NOTIFY_STOP;
- preempt_enable ();
- break;
- default:
- break;
+ DBPRINTF("before kprobe_handler ret=%d %p",
+ ret, args->regs);
+ if (kprobe_handler (args->regs))
+ ret = NOTIFY_STOP;
+ DBPRINTF("after kprobe_handler ret=%d %p",
+ ret, args->regs);
+ break;
+ case DIE_DEBUG:
+ if (post_kprobe_handler(args->regs))
+ ret = NOTIFY_STOP;
+ break;
+ case DIE_GPF:
+ /* swap_kprobe_running() needs smp_processor_id() */
+ preempt_disable();
+ if (swap_kprobe_running() &&
+ kprobe_fault_handler(args->regs, args->trapnr))
+ ret = NOTIFY_STOP;
+ preempt_enable();
+ break;
+ default:
+ break;
}
- DBPRINTF ("ret=%d", ret);
+ DBPRINTF("ret=%d", ret);
/* if(ret == NOTIFY_STOP) */
- /* handled_exceptions++; */
+ /* handled_exceptions++; */
return ret;
}
int swap_longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = swap_get_kprobe_ctlblk();
- u8 *addr = (u8 *) (regs->EREG (ip) - 1);
+ u8 *addr = (u8 *) (regs->EREG(ip) - 1);
unsigned long stack_addr = (unsigned long) (kcb->jprobe_saved_esp);
- struct jprobe *jp = container_of (p, struct jprobe, kp);
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
- DBPRINTF ("p = %p\n", p);
+ DBPRINTF("p = %p\n", p);
- if ((addr > (u8 *)swap_jprobe_return) &&
+ if ((addr > (u8 *)swap_jprobe_return) &&
(addr < (u8 *)swap_jprobe_return_end)) {
if (stack_addr(regs) != kcb->jprobe_saved_esp) {
struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
- printk("current esp %p does not match saved esp %p\n",
+ printk(KERN_INFO "current esp %p does not match saved esp %p\n",
stack_addr(regs), kcb->jprobe_saved_esp);
- printk ("Saved registers for jprobe %p\n", jp);
+ printk(KERN_INFO "Saved registers for jprobe %p\n", jp);
swap_show_registers(saved_regs);
- printk ("Current registers\n");
+ printk(KERN_INFO "Current registers\n");
swap_show_registers(regs);
panic("BUG");
- //BUG ();
+ /* BUG(); */
}
*regs = kcb->jprobe_saved_regs;
- memcpy ((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack, MIN_STACK_SIZE (stack_addr));
+ memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
+ MIN_STACK_SIZE(stack_addr));
swap_preempt_enable_no_resched();
return 1;
}
void kjump_trampoline(void);
void kjump_trampoline_int3(void);
__asm(
- "kjump_trampoline: \n"
- "call kjump_handler \n"
- "kjump_trampoline_int3: \n"
- "nop \n" /* for restore_regs_kp */
+ "kjump_trampoline:\n"
+ "call kjump_handler\n"
+ "kjump_trampoline_int3:\n"
+ "nop\n" /* for restore_regs_kp */
);
int set_kjump_cb(struct pt_regs *regs, jumper_cb_t cb, void *data, size_t size)
ret = swap_register_kprobe(&restore_regs_kp);
if (ret)
- printk("ERROR: kjump_init(), ret=%d\n", ret);
+ printk(KERN_INFO "ERROR: kjump_init(), ret=%d\n", ret);
return ret;
}
void jump_trampoline(void);
__asm(
- "jump_trampoline: \n"
- "pushf \n"
+ "jump_trampoline:\n"
+ "pushf\n"
SWAP_SAVE_REGS_STRING
- "movl %ebx, %eax \n" /* data --> ax */
- "call get_bx \n"
- "movl %eax, (%esp) \n" /* restore bx */
- "movl %ebx, %eax \n" /* data --> ax */
- "call jump_handler \n"
+ "movl %ebx, %eax\n" /* data --> ax */
+ "call get_bx\n"
+ "movl %eax, (%esp)\n" /* restore bx */
+ "movl %ebx, %eax\n" /* data --> ax */
+ "call jump_handler\n"
/* move flags to cs */
- "movl 56(%esp), %edx \n"
- "movl %edx, 52(%esp) \n"
+ "movl 56(%esp), %edx\n"
+ "movl %edx, 52(%esp)\n"
/* replace saved flags with true return address. */
- "movl %eax, 56(%esp) \n"
+ "movl %eax, 56(%esp)\n"
SWAP_RESTORE_REGS_STRING
"popf\n"
"ret\n"
return 0;
not_found:
- printk("ERROR: symbol %s(...) not found\n", sym);
+ printk(KERN_INFO "ERROR: symbol %s(...) not found\n", sym);
return -ESRCH;
}
void swap_arch_exit_kprobes(void)
{
kjump_exit();
- unregister_die_notifier (&kprobe_exceptions_nb);
+ unregister_die_notifier(&kprobe_exceptions_nb);
}
static inline struct pt_regs *swap_get_syscall_uregs(unsigned long sp)
{
- return NULL; //FIXME currently not implemented for x86
+ return NULL; /* FIXME currently not implemented for x86 */
}
static inline unsigned long swap_get_stack_ptr(struct pt_regs *regs)
/**
* @brief Entry point.
*/
-typedef kprobe_opcode_t (*entry_point_t) (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
+typedef kprobe_opcode_t (*entry_point_t) (unsigned long, unsigned long,
+ unsigned long, unsigned long,
+ unsigned long, unsigned long);
int arch_init_module_deps(void);
#ifndef _SWAP_KPROBE_DEBUG_H
#define _SWAP_KPROBE_DEBUG_H
-//#define _DEBUG
+/* #define _DEBUG */
#ifdef _DEBUG
#define DBPRINTF(format, args...) do { \
- if( 1 ){ \
+ if (1) { \
char *f = __FILE__; \
char *n = strrchr(f, '/'); \
- printk("%s : %u : %s : " format "\n" , (n) ? n+1 : f, __LINE__, __FUNCTION__, ##args); \
+ printk(KERN_INFO "%s : %u : %s : " format "\n" , \
+ (n) ? n+1 : f, __LINE__, __func__, ##args); \
} \
- } while(0)
+ } while (0)
#else
#define DBPRINTF(format, args...)
#endif
*/
#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)
#include <linux/config.h>
#endif
*/
struct slot_manager sm;
-DEFINE_PER_CPU(struct kprobe *, swap_current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe *, swap_current_kprobe);
static DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
static DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
-static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
+static DEFINE_PER_CPU(struct kprobe *, kprobe_instance);
struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
EXPORT_SYMBOL_GPL(kprobe_count);
-static void *(*module_alloc)(unsigned long size) = NULL;
-static void *(*module_free)(struct module *mod, void *module_region) = NULL;
+static void *(*module_alloc)(unsigned long size);
+static void *(*module_free)(struct module *mod, void *module_region);
static void *__wrapper_module_alloc(unsigned long size)
{
{
if (!orig_ret_address || (orig_ret_address == trampoline_address)) {
struct task_struct *task;
- if (ri == NULL) {
+ if (ri == NULL)
panic("kretprobe BUG!: ri = NULL\n");
- }
task = ri->task;
- if (task == NULL) {
+ if (task == NULL)
panic("kretprobe BUG!: task = NULL\n");
- }
- if (ri->rp == NULL) {
+ if (ri->rp == NULL)
panic("kretprobe BUG!: ri->rp = NULL\n");
- }
- panic("kretprobe BUG!: Processing kretprobe %p @ %p (%d/%d - %s)\n",
- ri->rp, ri->rp->kp.addr, ri->task->tgid, ri->task->pid, ri->task->comm);
+ panic("kretprobe BUG!: "
+ "Processing kretprobe %p @ %p (%d/%d - %s)\n",
+ ri->rp, ri->rp->kp.addr, ri->task->tgid,
+ ri->task->pid, ri->task->comm);
}
}
/*
* This routine is called either:
- * - under the kprobe_mutex - during kprobe_[un]register()
- * OR
- * - with preemption disabled - from arch/xxx/kernel/kprobes.c
+ * - under the kprobe_mutex - during kprobe_[un]register()
+ * OR
+ * - with preemption disabled - from arch/xxx/kernel/kprobes.c
*/
/**
struct kprobe *p;
DECLARE_NODE_PTR_FOR_HLIST(node);
- head = &kprobe_table[hash_ptr (addr, KPROBE_HASH_BITS)];
+ head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
swap_hlist_for_each_entry_rcu(p, node, head, hlist) {
- if (p->addr == addr) {
+ if (p->addr == addr)
return p;
- }
}
return NULL;
return 0;
}
-static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, unsigned long flags)
+static void aggr_post_handler(struct kprobe *p,
+ struct pt_regs *regs,
+ unsigned long flags)
{
struct kprobe *kp;
}
}
-static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr)
+static int aggr_fault_handler(struct kprobe *p,
+ struct pt_regs *regs,
+ int trapnr)
{
struct kprobe *cur = __get_cpu_var(kprobe_instance);
{
struct kprobe *cur = __get_cpu_var(kprobe_instance);
int ret = 0;
- DBPRINTF ("cur = 0x%p\n", cur);
+ DBPRINTF("cur = 0x%p\n", cur);
if (cur)
- DBPRINTF ("cur = 0x%p cur->break_handler = 0x%p\n", cur, cur->break_handler);
+ DBPRINTF("cur = 0x%p cur->break_handler = 0x%p\n",
+ cur, cur->break_handler);
if (cur && cur->break_handler) {
if (cur->break_handler(cur, regs))
/* Add rp inst onto table */
INIT_HLIST_NODE(&ri->hlist);
- hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]);
+ hlist_add_head(&ri->hlist,
+ &kretprobe_inst_table[hash_ptr(ri->task,
+ KPROBE_HASH_BITS)]);
/* Also add this rp inst to the used list. */
INIT_HLIST_NODE(&ri->uflist);
static int add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
{
if (p->break_handler) {
- if (old_p->break_handler) {
+ if (old_p->break_handler)
return -EEXIST;
- }
list_add_tail_rcu(&p->list, &old_p->list);
old_p->break_handler = aggr_break_handler;
list_add_rcu(&p->list, &old_p->list);
}
- if (p->post_handler && !old_p->post_handler) {
+ if (p->post_handler && !old_p->post_handler)
old_p->post_handler = aggr_post_handler;
- }
return 0;
}
{
int ret = 0;
struct kprobe *ap;
- DBPRINTF ("start\n");
+ DBPRINTF("start\n");
- DBPRINTF ("p = %p old_p = %p \n", p, old_p);
+ DBPRINTF("p = %p old_p = %p\n", p, old_p);
if (old_p->pre_handler == aggr_pre_handler) {
- DBPRINTF ("aggr_pre_handler \n");
+ DBPRINTF("aggr_pre_handler\n");
copy_kprobe(old_p, p);
ret = add_new_kprobe(old_p, p);
} else {
- DBPRINTF ("kzalloc\n");
+ DBPRINTF("kzalloc\n");
#ifdef kzalloc
ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
#else
return -ENOMEM;
add_aggr_kprobe(ap, old_p);
copy_kprobe(ap, p);
- DBPRINTF ("ap = %p p = %p old_p = %p \n", ap, p, old_p);
+ DBPRINTF("ap = %p p = %p old_p = %p\n", ap, p, old_p);
ret = add_new_kprobe(ap, p);
}
if (!p->addr)
return -EINVAL;
- DBPRINTF ("p->addr = 0x%p\n", p->addr);
+ DBPRINTF("p->addr = 0x%p\n", p->addr);
p->addr = (kprobe_opcode_t *)(((char *)p->addr) + p->offset);
- DBPRINTF ("p->addr = 0x%p p = 0x%p\n", p->addr, p);
+ DBPRINTF("p->addr = 0x%p p = 0x%p\n", p->addr, p);
#ifdef KPROBES_PROFILE
p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
atomic_inc(&kprobe_count);
goto out;
}
-
- if ((ret = swap_arch_prepare_kprobe(p, &sm)) != 0)
+ ret = swap_arch_prepare_kprobe(p, &sm);
+ if (ret != 0)
goto out;
- DBPRINTF ("before out ret = 0x%x\n", ret);
+ DBPRINTF("before out ret = 0x%x\n", ret);
INIT_HLIST_NODE(&p->hlist);
- hlist_add_head_rcu(&p->hlist, &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
+ hlist_add_head_rcu(&p->hlist,
+ &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
swap_arch_arm_kprobe(p);
out:
- DBPRINTF ("out ret = 0x%x\n", ret);
+ DBPRINTF("out ret = 0x%x\n", ret);
return ret;
}
EXPORT_SYMBOL_GPL(swap_register_kprobe);
struct kprobe *old_p, *list_p;
old_p = swap_get_kprobe(kp->addr);
- if (unlikely (!old_p))
+ if (unlikely(!old_p))
return;
if (kp != old_p) {
struct kretprobe_instance *ri;
unsigned long flags = 0;
- /* TODO: consider to only swap the RA after the last pre_handler fired */
+ /* TODO: consider to only swap the RA
+ * after the last pre_handler fired */
spin_lock_irqsave(&kretprobe_lock, flags);
/* TODO: test - remove retprobe after func entry but before its exit */
- if ((ri = get_free_rp_inst(rp)) != NULL) {
+ ri = get_free_rp_inst(rp);
+ if (ri != NULL) {
ri->rp = rp;
ri->task = current;
- if (rp->entry_handler) {
+ if (rp->entry_handler)
rp->entry_handler(ri, regs);
- }
swap_arch_prepare_kretprobe(ri, regs);
/*
* We are using different hash keys (current and mm) for finding kernel
- * space and user space probes. Kernel space probes can change mm field in
- * task_struct. User space probes can be shared between threads of one
- * process so they have different current but same mm.
+ * space and user space probes. Kernel space probes can change mm field
+ * in task_struct. User space probes can be shared between threads of
+ * one process so they have different current but same mm.
*/
head = kretprobe_inst_table_head(current);
continue;
if (ri->rp && ri->rp->handler) {
__get_cpu_var(swap_current_kprobe) = &ri->rp->kp;
- swap_get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
+ swap_get_kprobe_ctlblk()->kprobe_status =
+ KPROBE_HIT_ACTIVE;
ri->rp->handler(ri, regs);
__get_cpu_var(swap_current_kprobe) = NULL;
}
}
kretprobe_assert(ri, orig_ret_address, trampoline_address);
- if (kcb->kprobe_status == KPROBE_REENTER) {
+ if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
- } else {
+ else
swap_reset_current_kprobe();
- }
spin_unlock_irqrestore(&kretprobe_lock, flags);
swap_preempt_enable_no_resched();
DBPRINTF("Alloc aditional mem for retprobes");
if ((unsigned long)rp->kp.addr == sched_addr) {
- rp->maxactive += SCHED_RP_NR;//max (100, 2 * NR_CPUS);
+ rp->maxactive += SCHED_RP_NR; /* max (100, 2 * NR_CPUS); */
alloc_nodes = SCHED_RP_NR;
} else {
-#if 1//def CONFIG_PREEMPT
- rp->maxactive += max (COMMON_RP_NR, 2 * NR_CPUS);
+#if 1/* def CONFIG_PREEMPT */
+ rp->maxactive += max(COMMON_RP_NR, 2 * NR_CPUS);
#else
rp->maxacpptive += NR_CPUS;
#endif
hlist_add_head(&inst->uflist, &rp->free_instances);
}
- DBPRINTF ("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr, (unsigned long) (*(rp->kp.addr)), (unsigned long) (*(rp->kp.addr + 1)), (unsigned long) (*(rp->kp.addr + 2)));
+ DBPRINTF("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr,
+ (unsigned long) (*(rp->kp.addr)),
+ (unsigned long) (*(rp->kp.addr + 1)),
+ (unsigned long) (*(rp->kp.addr + 2)));
return 0;
}
int ret = 0;
struct kretprobe_instance *inst;
int i;
- DBPRINTF ("START");
+ DBPRINTF("START");
rp->kp.pre_handler = pre_handler_kretprobe;
rp->kp.post_handler = NULL;
/* Pre-allocate memory for max kretprobe instances */
if ((unsigned long)rp->kp.addr == exit_addr) {
- rp->kp.pre_handler = NULL; //not needed for do_exit
+ rp->kp.pre_handler = NULL; /* not needed for do_exit */
rp->maxactive = 0;
} else if ((unsigned long)rp->kp.addr == do_group_exit_addr) {
rp->kp.pre_handler = NULL;
rp->kp.pre_handler = NULL;
rp->maxactive = 0;
} else if (rp->maxactive <= 0) {
-#if 1//def CONFIG_PREEMPT
- rp->maxactive = max (COMMON_RP_NR, 2 * NR_CPUS);
+#if 1/* def CONFIG_PREEMPT */
+ rp->maxactive = max(COMMON_RP_NR, 2 * NR_CPUS);
#else
rp->maxactive = NR_CPUS;
#endif
hlist_add_head(&inst->uflist, &rp->free_instances);
}
- DBPRINTF ("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr, (unsigned long) (*(rp->kp.addr)), (unsigned long) (*(rp->kp.addr + 1)), (unsigned long) (*(rp->kp.addr + 2)));
+ DBPRINTF("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr,
+ (unsigned long) (*(rp->kp.addr)),
+ (unsigned long) (*(rp->kp.addr + 1)),
+ (unsigned long) (*(rp->kp.addr + 2)));
rp->nmissed = 0;
/* Establish function entry probe point */
- if ((ret = swap_register_kprobe(&rp->kp)) != 0)
+ ret = swap_register_kprobe(&rp->kp);
+ if (ret != 0)
free_rp_inst(rp);
- DBPRINTF ("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr, (unsigned long) (*(rp->kp.addr)), (unsigned long) (*(rp->kp.addr + 1)), (unsigned long) (*(rp->kp.addr + 2)));
+ DBPRINTF("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr,
+ (unsigned long) (*(rp->kp.addr)),
+ (unsigned long) (*(rp->kp.addr + 1)),
+ (unsigned long) (*(rp->kp.addr + 2)));
return ret;
}
swap_hlist_for_each_entry(ri, node, &rp->used_instances, uflist) {
if (swap_disarm_krp_inst(ri) != 0) {
- printk("%s (%d/%d): cannot disarm krp instance (%08lx)\n",
- ri->task->comm, ri->task->tgid, ri->task->pid,
- (unsigned long)rp->kp.addr);
+ printk(KERN_INFO "%s (%d/%d): cannot disarm "
+ "krp instance (%08lx)\n",
+ ri->task->comm, ri->task->tgid, ri->task->pid,
+ (unsigned long)rp->kp.addr);
}
}
}
spin_lock_irqsave(&kretprobe_lock, flags);
- while ((ri = get_used_rp_inst(rp)) != NULL) {
+ while ((ri = get_used_rp_inst(rp)) != NULL)
recycle_rp_inst(ri);
- }
free_rp_inst(rp);
spin_unlock_irqrestore(&kretprobe_lock, flags);
}
EXPORT_SYMBOL_GPL(swap_unregister_kretprobe);
-static void inline rm_task_trampoline(struct task_struct *p, struct kretprobe_instance *ri)
+static inline void rm_task_trampoline(struct task_struct *p,
+ struct kretprobe_instance *ri)
{
arch_set_task_pc(p, (unsigned long)ri->ret_addr);
}
if (!sp) {
unsigned long pc = arch_get_task_pc(ri->task);
- printk("---> [%d] %s (%d/%d): pc = %08lx, ra = %08lx, tramp= %08lx (%08lx)\n",
+ printk(KERN_INFO "---> [%d] %s (%d/%d): pc = %08lx, ra = %08lx, tramp= %08lx (%08lx)\n",
task_cpu(ri->task),
ri->task->comm, ri->task->tgid, ri->task->pid,
pc, (long unsigned int)ri->ret_addr,
(long unsigned int)tramp,
- (long unsigned int)(ri->rp ? ri->rp->kp.addr: NULL));
+ (long unsigned int)(ri->rp ? ri->rp->kp.addr : NULL));
/* __switch_to retprobe handling */
if (pc == (unsigned long)tramp) {
}
if (found) {
- printk("---> [%d] %s (%d/%d): tramp (%08lx) found at %08lx (%08lx /%+d) - %p\n",
+ printk(KERN_INFO "---> [%d] %s (%d/%d): tramp (%08lx) "
+ "found at %08lx (%08lx /%+d) - %p\n",
task_cpu(ri->task),
ri->task->comm, ri->task->tgid, ri->task->pid,
(long unsigned int)tramp,
(long unsigned int)found, (long unsigned int)ri->sp,
- found - ri->sp, ri->rp ? ri->rp->kp.addr: NULL);
+ found - ri->sp, ri->rp ? ri->rp->kp.addr : NULL);
*found = (unsigned long)ri->ret_addr;
retval = 0;
} else {
- printk("---> [%d] %s (%d/%d): tramp (%08lx) NOT found at sp = %08lx - %p\n",
- task_cpu(ri->task),
- ri->task->comm, ri->task->tgid, ri->task->pid,
- (long unsigned int)tramp,
- (long unsigned int)ri->sp, ri->rp ? ri->rp->kp.addr: NULL);
+ printk(KERN_INFO "---> [%d] %s (%d/%d): tramp (%08lx) "
+ "NOT found at sp = %08lx - %p\n",
+ task_cpu(ri->task),
+ ri->task->comm, ri->task->tgid, ri->task->pid,
+ (long unsigned int)tramp,
+ (long unsigned int)ri->sp,
+ ri->rp ? ri->rp->kp.addr : NULL);
}
return retval;
sched_addr = swap_ksyms("__switch_to");
exit_addr = swap_ksyms("do_exit");
sys_exit_group_addr = swap_ksyms("sys_exit_group");
- do_group_exit_addr = swap_ksyms("do_group_exit");
- sys_exit_addr = swap_ksyms("sys_exit");
+ do_group_exit_addr = swap_ksyms("do_group_exit");
+ sys_exit_addr = swap_ksyms("sys_exit");
if (sched_addr == 0 ||
exit_addr == 0 ||
}
ret = init_module_dependencies();
- if (ret) {
+ if (ret)
return ret;
- }
return arch_init_module_deps();
}
return 0;
not_found:
- printk("ERROR: symbol '%s' not found\n", sym);
+ printk(KERN_INFO "ERROR: symbol '%s' not found\n", sym);
return -ESRCH;
}
#ifndef _SWAP_KPROBES_H
#define _SWAP_KPROBES_H
-#include <linux/version.h> // LINUX_VERSION_CODE, KERNEL_VERSION()
+#include <linux/version.h> /* LINUX_VERSION_CODE, KERNEL_VERSION() */
#include <linux/notifier.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
/** Invalid value */
#define INVALID_VALUE 0xFFFFFFFF
/** Invalid pointer */
-#define INVALID_POINTER (void*)INVALID_VALUE
+#define INVALID_POINTER (void *)INVALID_VALUE
/** Jprobe entry */
#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
/**
* @brief Kprobe post handler pointer.
*/
-typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *, unsigned long flags);
+typedef void (*kprobe_post_handler_t) (struct kprobe *,
+ struct pt_regs *,
+ unsigned long flags);
/**
* @brief Kprobe fault handler pointer.
*/
-typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *, int trapnr);
+typedef int (*kprobe_fault_handler_t) (struct kprobe *,
+ struct pt_regs *,
+ int trapnr);
/**
* @brief Kretprobe handler pointer.
*/
-typedef int (*kretprobe_handler_t) (struct kretprobe_instance *, struct pt_regs *);
+typedef int (*kretprobe_handler_t) (struct kretprobe_instance *,
+ struct pt_regs *);
/**
* @struct kprobe
* @brief Main kprobe struct.
*/
-struct kprobe
-{
- struct hlist_node hlist; /**< Hash list.*/
+struct kprobe {
+ struct hlist_node hlist; /**< Hash list.*/
/** List of probes to search by instruction slot.*/
struct hlist_node is_hlist;
/** List of kprobes for multi-handler support.*/
kprobe_opcode_t opcode;
/** Copy of the original instruction.*/
struct arch_specific_insn ainsn;
- /** Override single-step target address, may be used to redirect
+ /** Override single-step target address, may be used to redirect
* control-flow to arbitrary address after probe point without
* invocation of original instruction; useful for functions
* replacement. If jprobe.entry should return address of function or
/**
* @brief Kprobe pre-entry handler pointer.
*/
-typedef unsigned long (*kprobe_pre_entry_handler_t) (void *priv_arg, struct pt_regs * regs);
+typedef unsigned long (*kprobe_pre_entry_handler_t) (void *priv_arg,
+ struct pt_regs *regs);
/**
* Because of the way compilers allocate stack space for local variables
* etc upfront, regardless of sub-scopes within a function, this mirroring
* principle currently works only for probes placed on function entry points.
- */
-struct jprobe
-{
+ */
+struct jprobe {
struct kprobe kp; /**< This probes kprobe.*/
kprobe_opcode_t *entry; /**< Probe handling code to jump to.*/
/** Handler which will be called before 'entry'. */
* @struct jprobe_instance
* @brief Jprobe instance struct.
*/
-struct jprobe_instance
-{
- // either on free list or used list
+struct jprobe_instance {
+ /* either on free list or used list */
struct hlist_node uflist; /**< Jprobes hash list. */
struct hlist_node hlist; /**< Jprobes hash list. */
struct jprobe *jp; /**< Pointer to the target jprobe. */
* @brief Function-return probe
* Note: User needs to provide a handler function, and initialize maxactive.
*/
-struct kretprobe
-{
+struct kretprobe {
struct kprobe kp; /**< Kprobe of this kretprobe.*/
kretprobe_handler_t handler; /**< Handler of this kretprobe.*/
kretprobe_handler_t entry_handler; /**< Entry handler of this kretprobe.*/
struct hlist_head used_instances;
#ifdef CONFIG_ARM
- unsigned arm_noret:1; /**< No-return flag for ARM.*/
- unsigned thumb_noret:1; /**< No-return flag for Thumb.*/
+ unsigned arm_noret:1; /**< No-return flag for ARM.*/
+ unsigned thumb_noret:1; /**< No-return flag for Thumb.*/
#endif
};
* @struct kretprobe_instance
* @brief Instance of kretprobe.
*/
-struct kretprobe_instance
-{
- // either on free list or used list
+struct kretprobe_instance {
+ /* either on free list or used list */
struct hlist_node uflist; /**< Kretprobe hash list.*/
struct hlist_node hlist; /**< Kretprobe hash list.*/
struct kretprobe *rp; /**< Pointer to this instance's kretprobe.*/
extern void swap_kprobes_inc_nmissed_count(struct kprobe *p);
-//
-// Large value for fast but memory consuming implementation
-// it is good when a lot of probes are instrumented
-//
-//#define KPROBE_HASH_BITS 6
+/*
+ * Large value for fast but memory consuming implementation
+ * it is good when a lot of probes are instrumented
+ */
+/* #define KPROBE_HASH_BITS 6 */
#define KPROBE_HASH_BITS 16
#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
void swap_unregister_kretprobes_bottom(struct kretprobe **rps, size_t size);
-int swap_disarm_urp_inst_for_task(struct task_struct *parent, struct task_struct *task);
+int swap_disarm_urp_inst_for_task(struct task_struct *parent,
+ struct task_struct *task);
int trampoline_probe_handler (struct kprobe *p, struct pt_regs *regs);
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38))
#ifndef is_zero_pfn
-static unsigned long swap_zero_pfn = 0;
+static unsigned long swap_zero_pfn ;
#endif /* is_zero_pfn */
#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38)) */
#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36) */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
-DECLARE_MOD_FUNC_DEP(do_mmap_pgoff, unsigned long, struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff, unsigned long *populate);
+DECLARE_MOD_FUNC_DEP(do_mmap_pgoff, unsigned long, struct file *file,
+ unsigned long addr, unsigned long len, unsigned long prot,
+ unsigned long flags, unsigned long pgoff,
+ unsigned long *populate);
DECLARE_MOD_DEP_WRAPPER(swap_do_mmap_pgoff,
unsigned long,
struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flags, unsigned long pgoff,
unsigned long *populate)
-IMP_MOD_DEP_WRAPPER(do_mmap_pgoff, file, addr, len, prot, flags, pgoff, populate)
+IMP_MOD_DEP_WRAPPER(do_mmap_pgoff, file, addr, len,
+ prot, flags, pgoff, populate)
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
-DECLARE_MOD_FUNC_DEP(do_mmap_pgoff, unsigned long, struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff);
+DECLARE_MOD_FUNC_DEP(do_mmap_pgoff, unsigned long, struct file *file,
+ unsigned long addr, unsigned long len, unsigned long prot,
+ unsigned long flags, unsigned long pgoff);
DECLARE_MOD_DEP_WRAPPER(swap_do_mmap_pgoff,
unsigned long,
struct file *file, unsigned long addr,
/* copy_to_user_page */
#ifndef copy_to_user_page
-static DECLARE_MOD_FUNC_DEP(copy_to_user_page, void, struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *dst, const void *src, unsigned long len);
+static DECLARE_MOD_FUNC_DEP(copy_to_user_page, void, struct vm_area_struct *vma,
+ struct page *page, unsigned long uaddr, void *dst,
+ const void *src, unsigned long len);
DECLARE_MOD_DEP_WRAPPER(swap_copy_to_user_page,
void,
struct vm_area_struct *vma, struct page *page,
#endif /* copy_to_user_page */
-static DECLARE_MOD_FUNC_DEP(find_extend_vma, struct vm_area_struct *, struct mm_struct * mm, unsigned long addr);
+static DECLARE_MOD_FUNC_DEP(find_extend_vma, struct vm_area_struct *,
+ struct mm_struct *mm, unsigned long addr);
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 30)
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)
-static DECLARE_MOD_FUNC_DEP(handle_mm_fault, int, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access);
+static DECLARE_MOD_FUNC_DEP(handle_mm_fault, int, struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long address,
+ int write_access);
#endif
#else
-static DECLARE_MOD_FUNC_DEP(handle_mm_fault, int, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags);
+static DECLARE_MOD_FUNC_DEP(handle_mm_fault, int, struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long address,
+ unsigned int flags);
#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 30) */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38)
-static DECLARE_MOD_FUNC_DEP(get_gate_vma, struct vm_area_struct *, struct mm_struct *mm);
+static DECLARE_MOD_FUNC_DEP(get_gate_vma, struct vm_area_struct *,
+ struct mm_struct *mm);
#else /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38) */
-static DECLARE_MOD_FUNC_DEP(get_gate_vma, struct vm_area_struct *, struct task_struct *tsk);
+static DECLARE_MOD_FUNC_DEP(get_gate_vma, struct vm_area_struct *,
+ struct task_struct *tsk);
#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38) */
#ifdef __HAVE_ARCH_GATE_AREA
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38)
-DECLARE_MOD_FUNC_DEP(in_gate_area, int, struct mm_struct *mm, unsigned long addr);
+DECLARE_MOD_FUNC_DEP(in_gate_area, int, struct mm_struct *mm,
+ unsigned long addr);
#else /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38) */
-DECLARE_MOD_FUNC_DEP(in_gate_area, int, struct task_struct *task, unsigned long addr);
+DECLARE_MOD_FUNC_DEP(in_gate_area, int, struct task_struct *task,
+ unsigned long addr);
#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38) */
#endif /* __HAVE_ARCH_GATE_AREA */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
static DECLARE_MOD_FUNC_DEP(follow_page_mask, \
- struct page *, struct vm_area_struct * vma, \
+ struct page *, struct vm_area_struct *vma, \
unsigned long address, unsigned int foll_flags, \
unsigned int *page_mask);
DECLARE_MOD_DEP_WRAPPER(swap_follow_page_mask,
struct page *,
- struct vm_area_struct * vma, unsigned long address,
+ struct vm_area_struct *vma, unsigned long address,
unsigned int foll_flags, unsigned int *page_mask)
-IMP_MOD_DEP_WRAPPER (follow_page_mask, vma, address, foll_flags, page_mask)
+IMP_MOD_DEP_WRAPPER(follow_page_mask, vma, address, foll_flags, page_mask)
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
static DECLARE_MOD_FUNC_DEP(follow_page, \
- struct page *, struct vm_area_struct * vma, \
+ struct page *, struct vm_area_struct *vma, \
unsigned long address, unsigned int foll_flags);
DECLARE_MOD_DEP_WRAPPER(swap_follow_page,
struct page *,
- struct vm_area_struct * vma, unsigned long address,
+ struct vm_area_struct *vma, unsigned long address,
unsigned int foll_flags)
-IMP_MOD_DEP_WRAPPER (follow_page, vma, address, foll_flags)
+IMP_MOD_DEP_WRAPPER(follow_page, vma, address, foll_flags)
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
static DECLARE_MOD_FUNC_DEP(__flush_anon_page, \
void, struct task_struct *tsk);
#else
static DECLARE_MOD_FUNC_DEP(put_task_struct, \
- void, struct rcu_head * rhp);
+ void, struct rcu_head *rhp);
#endif
DECLARE_MOD_DEP_WRAPPER(swap_find_extend_vma,
struct vm_area_struct *,
- struct mm_struct * mm, unsigned long addr)
-IMP_MOD_DEP_WRAPPER (find_extend_vma, mm, addr)
+ struct mm_struct *mm, unsigned long addr)
+IMP_MOD_DEP_WRAPPER(find_extend_vma, mm, addr)
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 30)
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)
int,
struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, int write_access)
-IMP_MOD_DEP_WRAPPER (handle_mm_fault, mm, vma, address, write_access)
+IMP_MOD_DEP_WRAPPER(handle_mm_fault, mm, vma, address, write_access)
#endif
#else
DECLARE_MOD_DEP_WRAPPER(swap_handle_mm_fault,
int,
struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
-IMP_MOD_DEP_WRAPPER (handle_mm_fault, mm, vma, address, flags)
+IMP_MOD_DEP_WRAPPER(handle_mm_fault, mm, vma, address, flags)
#endif
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38)
DECLARE_MOD_DEP_WRAPPER(swap_get_gate_vma,
struct vm_area_struct *,
struct mm_struct *mm)
-IMP_MOD_DEP_WRAPPER (get_gate_vma, mm)
+IMP_MOD_DEP_WRAPPER(get_gate_vma, mm)
#else /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38) */
DECLARE_MOD_DEP_WRAPPER(swap_get_gate_vma,
struct vm_area_struct *,
struct task_struct *tsk)
-IMP_MOD_DEP_WRAPPER (get_gate_vma, tsk)
+IMP_MOD_DEP_WRAPPER(get_gate_vma, tsk)
#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38) */
#ifdef CONFIG_HUGETLB_PAGE
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
-DECLARE_MOD_FUNC_DEP(follow_hugetlb_page, \
- int, \
- struct mm_struct *mm, struct vm_area_struct *vma, \
- struct page **pages, struct vm_area_struct **vmas, \
- unsigned long *position, int *length, int i, \
+DECLARE_MOD_FUNC_DEP(follow_hugetlb_page, \
+ int, \
+ struct mm_struct *mm, struct vm_area_struct *vma, \
+ struct page **pages, struct vm_area_struct **vmas, \
+ unsigned long *position, int *length, int i, \
unsigned int flags);
DECLARE_MOD_DEP_WRAPPER(swap_follow_hugetlb_page,
int,
struct page **pages, struct vm_area_struct **vmas,
unsigned long *position, int *length, int i,
unsigned int flags)
-IMP_MOD_DEP_WRAPPER(follow_hugetlb_page, \
+IMP_MOD_DEP_WRAPPER(follow_hugetlb_page, \
mm, vma, pages, vmas, position, length, i, flags)
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0) */
-DECLARE_MOD_FUNC_DEP(follow_hugetlb_page, \
- long, \
- struct mm_struct *mm, struct vm_area_struct *vma, \
- struct page **pages, struct vm_area_struct **vmas, \
- unsigned long *position, unsigned long *nr_pages, \
+DECLARE_MOD_FUNC_DEP(follow_hugetlb_page, \
+ long, \
+ struct mm_struct *mm, struct vm_area_struct *vma, \
+ struct page **pages, struct vm_area_struct **vmas, \
+ unsigned long *position, unsigned long *nr_pages, \
long i, unsigned int flags);
DECLARE_MOD_DEP_WRAPPER(swap_follow_hugetlb_page,
long,
struct page **pages, struct vm_area_struct **vmas,
unsigned long *position, unsigned long *nr_pages,
long i, unsigned int flags)
-IMP_MOD_DEP_WRAPPER(follow_hugetlb_page, \
+IMP_MOD_DEP_WRAPPER(follow_hugetlb_page, \
mm, vma, pages, vmas, position, nr_pages, i, flags)
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0) */
#define swap_follow_hugetlb_page follow_hugetlb_page
#endif /* CONFIG_HUGETLB_PAGE */
-static inline int swap_in_gate_area(struct task_struct *task, unsigned long addr)
+static inline int swap_in_gate_area(struct task_struct *task,
+ unsigned long addr)
{
#ifdef __HAVE_ARCH_GATE_AREA
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38)
struct mm_struct *mm = task->mm;
- IMP_MOD_DEP_WRAPPER (in_gate_area, mm, addr)
+ IMP_MOD_DEP_WRAPPER(in_gate_area, mm, addr)
#else /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38) */
- IMP_MOD_DEP_WRAPPER (in_gate_area, task, addr)
+ IMP_MOD_DEP_WRAPPER(in_gate_area, task, addr)
#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38) */
#else /*__HAVE_ARCH_GATE_AREA */
return in_gate_area(task, addr);
void,
struct vm_area_struct *vma, struct page *page,
unsigned long vmaddr)
-IMP_MOD_DEP_WRAPPER (__flush_anon_page, vma, page, vmaddr)
+IMP_MOD_DEP_WRAPPER(__flush_anon_page, vma, page, vmaddr)
static inline void swap_flush_anon_page(struct vm_area_struct *vma,
struct page *page,
struct page *,
struct vm_area_struct *vma, unsigned long addr,
pte_t pte)
-IMP_MOD_DEP_WRAPPER (vm_normal_page, vma, addr, pte)
+IMP_MOD_DEP_WRAPPER(vm_normal_page, vma, addr, pte)
#define GUP_FLAGS_IGNORE_SIGKILL 0x8
#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 38) */
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)
static inline int use_zero_page(struct vm_area_struct *vma)
{
/*
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
+static inline int stack_guard_page(struct vm_area_struct *vma,
+ unsigned long addr)
{
return stack_guard_page_start(vma, addr) ||
stack_guard_page_end(vma, addr+PAGE_SIZE);
if (nonblocking)
fault_flags |= FAULT_FLAG_ALLOW_RETRY;
if (foll_flags & FOLL_NOWAIT)
- fault_flags |= (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT);
+ fault_flags |=
+ (FAULT_FLAG_ALLOW_RETRY |
+ FAULT_FLAG_RETRY_NOWAIT);
ret = swap_handle_mm_fault(mm, vma, start,
fault_flags);
VM_FAULT_HWPOISON_LARGE)) {
if (i)
return i;
- else if (gup_flags & FOLL_HWPOISON)
+ else if (gup_flags &
+ FOLL_HWPOISON)
return -EHWPOISON;
else
return -EFAULT;
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
-static int __get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int nr_pages, unsigned int gup_flags,
- struct page **pages, struct vm_area_struct **vmas,
- int *nonblocking)
+static int __get_user_pages_uprobe(struct task_struct *tsk,
+ struct mm_struct *mm, unsigned long start,
+ int nr_pages, unsigned int gup_flags,
+ struct page **pages,
+ struct vm_area_struct **vmas,
+ int *nonblocking)
{
int i;
unsigned long vm_flags;
- if (nr_pages <= 0) {
+ if (nr_pages <= 0)
return 0;
- }
VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
pte_t *pte;
/* user gate pages are read-only */
- if (gup_flags & FOLL_WRITE) {
+ if (gup_flags & FOLL_WRITE)
return i ? : -EFAULT;
- }
if (pg > TASK_SIZE)
pgd = pgd_offset_k(pg);
else
pud = pud_offset(pgd, pg);
BUG_ON(pud_none(*pud));
pmd = pmd_offset(pud, pg);
- if (pmd_none(*pmd)) {
+ if (pmd_none(*pmd))
return i ? : -EFAULT;
- }
VM_BUG_ON(pmd_trans_huge(*pmd));
pte = pte_offset_map(pmd, pg);
if (pte_none(*pte)) {
* If we have a pending SIGKILL, don't keep faulting
* pages and potentially allocating memory.
*/
- if (unlikely(fatal_signal_pending(current))) {
+ if (unlikely(fatal_signal_pending(current)))
return i ? i : -ERESTARTSYS;
- }
/* cond_resched(); */
- while (!(page = swap_follow_page(vma, start, foll_flags))) {
+ while (!(page = swap_follow_page(vma, start,
+ foll_flags))) {
int ret;
unsigned int fault_flags = 0;
if (nonblocking)
fault_flags |= FAULT_FLAG_ALLOW_RETRY;
if (foll_flags & FOLL_NOWAIT)
- fault_flags |= (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT);
+ fault_flags |=
+ (FAULT_FLAG_ALLOW_RETRY |
+ FAULT_FLAG_RETRY_NOWAIT);
ret = swap_handle_mm_fault(mm, vma, start,
fault_flags);
if (ret & VM_FAULT_ERROR) {
- if (ret & VM_FAULT_OOM) {
+ if (ret & VM_FAULT_OOM)
return i ? i : -ENOMEM;
- }
if (ret & (VM_FAULT_HWPOISON |
- VM_FAULT_HWPOISON_LARGE)) {
- if (i) {
+ VM_FAULT_HWPOISON_LARGE)) {
+ if (i)
return i;
- }
- else if (gup_flags & FOLL_HWPOISON) {
+ else if (gup_flags &
+ FOLL_HWPOISON)
return -EHWPOISON;
- }
- else {
+ else
return -EFAULT;
- }
}
- if (ret & VM_FAULT_SIGBUS) {
+ if (ret & VM_FAULT_SIGBUS)
return i ? i : -EFAULT;
- }
BUG();
}
/* cond_resched(); */
}
- if (IS_ERR(page)) {
+ if (IS_ERR(page))
return i ? i : PTR_ERR(page);
- }
if (pages) {
pages[i] = page;
#else /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38) */
-static int __get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, int flags,
- struct page **pages, struct vm_area_struct **vmas)
+static int __get_user_pages_uprobe(struct task_struct *tsk,
+ struct mm_struct *mm,
+ unsigned long start, int len, int flags,
+ struct page **pages,
+ struct vm_area_struct **vmas)
{
int i;
unsigned int vm_flags = 0;
vma = find_vma(mm, start);
if (!vma && swap_in_gate_area(tsk, start)) {
unsigned long pg = start & PAGE_MASK;
- struct vm_area_struct *gate_vma = swap_get_gate_vma(tsk);
+ struct vm_area_struct *gate_vma =
+ swap_get_gate_vma(tsk);
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
return i ? : -EFAULT;
}
if (pages) {
- struct page *page = swap_vm_normal_page(gate_vma, start, *pte);
+ struct page *page =
+ swap_vm_normal_page(gate_vma, start,
+ *pte);
pages[i] = page;
if (page)
get_page(page);
if (pages)
foll_flags |= FOLL_GET;
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,30)
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 30)
if (!write && use_zero_page(vma))
foll_flags |= FOLL_ANON;
#endif
foll_flags |= FOLL_WRITE;
- //cond_resched();
+ /* cond_resched(); */
- DBPRINTF ("pages = %p vma = %p\n", pages, vma);
- while (!(page = swap_follow_page(vma, start, foll_flags))) {
+ DBPRINTF("pages = %p vma = %p\n", pages, vma);
+ while (!(page = swap_follow_page(vma, start,
+ foll_flags))) {
int ret;
ret = swap_handle_mm_fault(mm, vma, start,
foll_flags & FOLL_WRITE);
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
if (ret & VM_FAULT_WRITE)
foll_flags &= ~FOLL_WRITE;
!(vma->vm_flags & VM_WRITE))
foll_flags &= ~FOLL_WRITE;
- //cond_resched();
+ /* cond_resched(); */
#endif
}
for (step = GET_STEP_4(len); len; len -= step) {
switch (GET_STEP_4(len)) {
case 1:
- get_user(*(u8 *)(buf + pos), (unsigned long *)(addr + pos));
+ get_user(*(u8 *)(buf + pos),
+ (unsigned long *)(addr + pos));
step = 1;
break;
case 2:
case 3:
- get_user(*(u16 *)(buf + pos), (unsigned long *)(addr + pos));
+ get_user(*(u16 *)(buf + pos),
+ (unsigned long *)(addr + pos));
step = 2;
break;
case 4:
- get_user(*(u32 *)(buf + pos), (unsigned long *)(addr + pos));
+ get_user(*(u32 *)(buf + pos),
+ (unsigned long *)(addr + pos));
step = 4;
break;
}
}
}
-// not working
+/* not working */
static void write_data_current(unsigned long addr, void *buf, int len)
{
int step;
for (step = GET_STEP_4(len); len; len -= step) {
switch (GET_STEP_4(len)) {
case 1:
- put_user(*(u8 *)(buf + pos), (unsigned long *)(addr + pos));
+ put_user(*(u8 *)(buf + pos),
+ (unsigned long *)(addr + pos));
step = 1;
break;
case 2:
case 3:
- put_user(*(u16 *)(buf + pos), (unsigned long *)(addr + pos));
+ put_user(*(u16 *)(buf + pos),
+ (unsigned long *)(addr + pos));
step = 2;
break;
case 4:
- put_user(*(u32 *)(buf + pos), (unsigned long *)(addr + pos));
+ put_user(*(u32 *)(buf + pos),
+ (unsigned long *)(addr + pos));
step = 4;
break;
}
* @param write Write flag. If 0 - reading, if 1 - writing.
* @return Read-write size, error code on error.
*/
-int access_process_vm_atomic(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
+int access_process_vm_atomic(struct task_struct *tsk, unsigned long addr,
+ void *buf, int len, int write)
{
struct mm_struct *mm;
struct vm_area_struct *vma;
void *old_buf = buf;
int atomic;
- if (len <= 0) {
+ if (len <= 0)
return -1;
- }
#if ACCESS_PROCESS_OPTIMIZATION
if (write == 0 && tsk == current) {
if (write) {
swap_copy_to_user_page(vma, page, addr,
- maddr + offset, buf, bytes);
+ maddr + offset,
+ buf, bytes);
set_page_dirty_lock(page);
} else {
copy_from_user_page(vma, page, addr,
- buf, maddr + offset, bytes);
+ buf, maddr + offset,
+ bytes);
}
atomic ? swap_kunmap_atomic(maddr) : kunmap(page);
return buf - old_buf;
}
+EXPORT_SYMBOL_GPL(access_process_vm_atomic);
/**
* @brief Page present.
* @param mm Pointer to the target mm_struct.
* @param address Address.
*/
-int page_present (struct mm_struct *mm, unsigned long address)
+int page_present(struct mm_struct *mm, unsigned long address)
{
pgd_t *pgd;
pud_t *pud;
pte_unmap(ptep);
if (pte_present(pte)) {
pfn = pte_pfn(pte);
- if (pfn_valid(pfn)) {
+ if (pfn_valid(pfn))
return 1;
- }
}
out:
return 0;
}
-
-
-EXPORT_SYMBOL_GPL (page_present);
-EXPORT_SYMBOL_GPL (access_process_vm_atomic);
+EXPORT_SYMBOL_GPL(page_present);
#ifndef _SWAP_KPROBES_DEPS_H
#define _SWAP_KPROBES_DEPS_H
-#include <linux/version.h> // LINUX_VERSION_CODE, KERNEL_VERSION()
+#include <linux/version.h> /* LINUX_VERSION_CODE, KERNEL_VERSION() */
#include <linux/hugetlb.h>
#include <linux/mempolicy.h>
#include <linux/highmem.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
#define DECLARE_NODE_PTR_FOR_HLIST(var_name)
-#define swap_hlist_for_each_entry_rcu(tpos, pos, head, member) hlist_for_each_entry_rcu(tpos, head, member)
-#define swap_hlist_for_each_entry_safe(tpos, pos, n, head, member) hlist_for_each_entry_safe(tpos, n, head, member)
-#define swap_hlist_for_each_entry(tpos, pos, head, member) hlist_for_each_entry(tpos, head, member)
+#define swap_hlist_for_each_entry_rcu(tpos, pos, head, member) \
+ hlist_for_each_entry_rcu(tpos, head, member)
+#define swap_hlist_for_each_entry_safe(tpos, pos, n, head, member) \
+ hlist_for_each_entry_safe(tpos, n, head, member)
+#define swap_hlist_for_each_entry(tpos, pos, head, member) \
+ hlist_for_each_entry(tpos, head, member)
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
#define DECLARE_NODE_PTR_FOR_HLIST(var_name) struct hlist_node *var_name
-#define swap_hlist_for_each_entry_rcu(tpos, pos, head, member) hlist_for_each_entry_rcu(tpos, pos, head, member)
-#define swap_hlist_for_each_entry_safe(tpos, pos, n, head, member) hlist_for_each_entry_safe(tpos, pos, n, head, member)
-#define swap_hlist_for_each_entry(tpos, pos, head, member) hlist_for_each_entry(tpos, pos, head, member)
+#define swap_hlist_for_each_entry_rcu(tpos, pos, head, member) \
+ hlist_for_each_entry_rcu(tpos, pos, head, member)
+#define swap_hlist_for_each_entry_safe(tpos, pos, n, head, member) \
+ hlist_for_each_entry_safe(tpos, pos, n, head, member)
+#define swap_hlist_for_each_entry(tpos, pos, head, member) \
+ hlist_for_each_entry(tpos, pos, head, member)
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12))
#define swap_preempt_enable_no_resched() barrier()
#endif /* CONFIG_PREEMPT_COUNT */
-#else /* !(defined(MODULE) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) */
+#else /* !(defined(MODULE) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
#define swap_preempt_enable_no_resched() preempt_enable_no_resched()
-#endif /* !(defined(MODULE) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) */
+#endif /* !(defined(MODULE) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
-//--------------------- Declaration of module dependencies ------------------------//
+/* --------------------- Declaration of module dependencies ----------------- */
#define DECLARE_MOD_FUNC_DEP(name, ret, ...) ret(*__ref_##name)(__VA_ARGS__)
#define DECLARE_MOD_CB_DEP(name, ret, ...) ret(*name)(__VA_ARGS__)
-//----------------- Implementation of module dependencies wrappers -----------------//
+/* ---------------- Implementation of module dependencies wrappers ---------- */
#define DECLARE_MOD_DEP_WRAPPER(name, ret, ...) ret name(__VA_ARGS__)
#define IMP_MOD_DEP_WRAPPER(name, ...) \
}
-//---------------------- Module dependencies initialization --------------------//
+/* --------------------- Module dependencies initialization ----------------- */
#define INIT_MOD_DEP_VAR(dep, name) \
{ \
- __ref_##dep = (void *) swap_ksyms (#name); \
- if (!__ref_##dep) \
- { \
- DBPRINTF (#name " is not found! Oops. Where is it?"); \
+ __ref_##dep = (void *) swap_ksyms(#name); \
+ if (!__ref_##dep) { \
+ DBPRINTF(#name " is not found! Oops. Where is it?"); \
return -ESRCH; \
} \
}
#define INIT_MOD_DEP_CB(dep, name) \
{ \
- dep = (void *) swap_ksyms (#name); \
- if (!dep) \
- { \
- DBPRINTF (#name " is not found! Oops. Where is it?"); \
+ dep = (void *) swap_ksyms(#name); \
+ if (!dep) { \
+ DBPRINTF(#name " is not found! Oops. Where is it?"); \
return -ESRCH; \
} \
}
int init_module_dependencies(void);
-int access_process_vm_atomic(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
+int access_process_vm_atomic(struct task_struct *tsk, unsigned long addr,
+ void *buf, int len, int write);
-#define read_proc_vm_atomic(tsk, addr, buf, len) access_process_vm_atomic (tsk, addr, buf, len, 0)
-#define write_proc_vm_atomic(tsk, addr, buf, len) access_process_vm_atomic (tsk, addr, buf, len, 1)
-int page_present (struct mm_struct *mm, unsigned long addr);
+#define read_proc_vm_atomic(tsk, addr, buf, len) \
+ access_process_vm_atomic(tsk, addr, buf, len, 0)
+#define write_proc_vm_atomic(tsk, addr, buf, len) \
+ access_process_vm_atomic(tsk, addr, buf, len, 1)
+int page_present(struct mm_struct *mm, unsigned long addr);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
unsigned long swap_do_mmap_pgoff(struct file *file, unsigned long addr,
* @var fixed_alloc::chunk
* Chunk.
*/
-struct fixed_alloc
-{
+struct fixed_alloc {
struct hlist_node hlist;
struct chunk chunk;
};
-static void chunk_init(struct chunk *chunk, void *data, size_t size, size_t size_block)
+static void chunk_init(struct chunk *chunk,
+ void *data,
+ size_t size,
+ size_t size_block)
{
unsigned long i;
unsigned long *p;
chunk->count_available = size / size_block;
chunk->size = chunk->count_available;
- chunk->index = kmalloc(sizeof(*chunk->index)*chunk->count_available, GFP_ATOMIC);
+ chunk->index = kmalloc(sizeof(*chunk->index)*chunk->count_available,
+ GFP_ATOMIC);
p = chunk->index;
- for (i = 0; i != chunk->count_available; ++p) {
+ for (i = 0; i != chunk->count_available; ++p)
*p = ++i;
- }
}
static void chunk_uninit(struct chunk *chunk)
kfree(chunk->index);
}
-static void* chunk_allocate(struct chunk *chunk, size_t size_block)
+static void *chunk_allocate(struct chunk *chunk, size_t size_block)
{
unsigned long *ret;
- if (!chunk->count_available) {
+ if (!chunk->count_available)
return NULL;
- }
spin_lock(&chunk->lock);
ret = chunk->data + chunk->first_available*size_block;
static inline int chunk_check_ptr(struct chunk *chunk, void *p, size_t size)
{
- if (( chunk->data <= (unsigned long *)p) &&
- ((chunk->data + size/sizeof(chunk->data)) > (unsigned long *)p)) {
+ if ((chunk->data <= (unsigned long *)p) &&
+ ((chunk->data + size/sizeof(chunk->data)) > (unsigned long *)p))
return 1;
- }
return 0;
}
struct fixed_alloc *fa;
fa = kmalloc(sizeof(*fa), GFP_ATOMIC);
- if (fa == NULL) {
+ if (fa == NULL)
return NULL;
- }
data = sm->alloc(sm);
- if(data == NULL) {
+ if (data == NULL) {
kfree(fa);
return NULL;
}
- chunk_init(&fa->chunk, data, PAGE_SIZE/sizeof(unsigned long), sm->slot_size);
+ chunk_init(&fa->chunk, data,
+ PAGE_SIZE/sizeof(unsigned long), sm->slot_size);
return fa;
}
}
fa = create_fixed_alloc(sm);
- if(fa == NULL)
+ if (fa == NULL)
return NULL;
INIT_HLIST_NODE(&fa->hlist);
* id_sys_clone,
* id_sys_clone,
*/
- id_sys_execve
+ id_sys_execve
};
static enum syscall_id id_signal[] = {
* @def CREATE_FEATURE
* Feature initialization.
*/
-#define CREATE_FEATURE(x, subtype) \
-{ \
+#define CREATE_FEATURE(x, subtype) \
+{ \
.cnt = sizeof(x) / sizeof(enum syscall_id), \
.feature_list = x, \
.sub_type = subtype, \
static int fops_cmp_func(void *key_a, void *key_b)
{
- return (key_a - key_b);
+ return key_a - key_b;
}
static inline struct map *__get_map(void)
__fops_dput(dentry);
/* it's ok if dentry is already inserted */
- return (ret == -EEXIST ? 0: ret);
+ return ret == -EEXIST ? 0 : ret;
}
static struct dentry *fops_dsearch(struct dentry *dentry)
if (check_event(task))
/* it is 'our' task: just add the dentry to the map */
- return (fops_dinsert(dentry) ? : -EAGAIN);
+ return fops_dinsert(dentry) ? : -EAGAIN;
else
/* not 'our' task: check if the file is 'interesting' */
- return (fops_dsearch(dentry) ? 0: -ESRCH);
+ return fops_dsearch(dentry) ? 0 : -ESRCH;
}
static char *fops_fpath(struct file *file, char *buf, int buflen)
filepath = fops_fpath(file, buf, PATH_LEN);
if (lock_arg_init(fprobe->id, regs, &arg) == 0) {
- subtype = (arg.type == F_UNLCK ? FOPS_LOCK_RELEASE:
- FOPS_LOCK_START);
+ subtype = (arg.type == F_UNLCK ?
+ FOPS_LOCK_RELEASE :
+ FOPS_LOCK_START);
custom_entry_event(F_ADDR(rp), regs, PT_FILE,
subtype, "Sxddxx",
if (rp && priv->dentry) {
int subtype;
if (priv->subtype == FOPS_LOCK_START)
- subtype = FOPS_LOCK_END; /* lock ret marked as lock_end */
+ subtype = FOPS_LOCK_END;
else
subtype = priv->subtype;
struct inode *inode = dentry->d_inode;
printk(FOPS_PREFIX "Releasing dentry(%p/%p/%d): %s\n",
- dentry, inode, inode ? inode->i_nlink: 0,
+ dentry, inode, inode ? inode->i_nlink : 0,
__fops_dpath(dentry, buf, PATH_LEN));
__fops_dput(dentry);
/* ====================== SWITCH_CONTEXT ======================= */
-static int switch_entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+static int switch_entry_handler(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
{
if (check_event(current))
switch_entry(regs);
return 0;
}
-static int switch_ret_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+static int switch_ret_handler(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
{
if (check_event(current))
switch_exit(regs);
};
static DEFINE_MUTEX(mutex_sc_enable);
-static int sc_enable = 0;
+static int sc_enable;
/**
* @brief Get scheduler address.
addr = swap_ksyms("__switch_to");
if (addr == 0) {
- printk("ERROR: not found '__switch_to'\n");
+ printk(KERN_INFO "ERROR: not found '__switch_to'\n");
return -EINVAL;
}
mutex_lock(&mutex_sc_enable);
if (sc_enable) {
- printk("switch context profiling is already run!\n");
+ printk(KERN_INFO "switch context profiling is already run!\n");
goto unlock;
}
mutex_lock(&mutex_sc_enable);
if (sc_enable == 0) {
- printk("switch context profiling is not running!\n");
+ printk(KERN_INFO "switch context profiling is not running!\n");
ret = -EINVAL;
goto unlock;
}
static int register_syscall(size_t id)
{
int ret;
- printk("register_syscall: %s\n", get_sys_name(id));
+ printk(KERN_INFO "register_syscall: %s\n", get_sys_name(id));
if (ksp[id].rp.kp.addr == NULL)
return 0;
static int unregister_syscall(size_t id)
{
- printk("unregister_syscall: %s\n", get_sys_name(id));
+ printk(KERN_INFO "unregister_syscall: %s\n", get_sys_name(id));
if (ksp[id].rp.kp.addr == NULL)
return 0;
ret = unregister_syscall(id_p[cnt]);
if (ret)
return ret;
- }
+ }
return ret;
}
id = f->feature_list[i];
if (get_counter(id) == 0) {
- printk("syscall %s not installed\n",
+ printk(KERN_INFO "syscall %s not installed\n",
get_sys_name(id));
kfree(id_p);
BUG();
} else {
ret = unregister_syscall(id);
if (ret)
- printk("syscall %s uninstall error, ret=%d\n",
+ printk(KERN_INFO "syscall %s uninstall error, ret=%d\n",
get_sys_name(id), ret);
}
}
if (get_counter(id) == 0) {
ret = register_syscall(id);
if (ret) {
- printk("syscall %s install error, ret=%d\n",
+ printk(KERN_INFO "syscall %s install error, ret=%d\n",
get_sys_name(id), ret);
do_uninstall_features(f, --i);
mutex_lock(&mutex_features);
if (f->enable) {
- printk("energy profiling is already run!\n");
+ printk(KERN_INFO "energy profiling is already run!\n");
ret = -EINVAL;
goto unlock;
}
mutex_lock(&mutex_features);
if (f->enable == 0) {
- printk("feature[%d] is not running!\n", feature_index(f));
+ printk(KERN_INFO "feature[%d] is not running!\n",
+ feature_index(f));
ret = -EINVAL;
goto unlock;
}
break;
default:
f = get_feature(id);
- ret = f ? install_features(f): -EINVAL;
+ ret = f ? install_features(f) : -EINVAL;
break;
}
break;
default:
f = get_feature(id);
- ret = f ? uninstall_features(f): -EINVAL;
+ ret = f ? uninstall_features(f) : -EINVAL;
break;
}
name = get_sys_name(i);
addr = swap_ksyms(name);
if (addr == 0) {
- printk("INFO: %s() not found\n", name);
+ printk(KERN_INFO "INFO: %s() not found\n", name);
} else if (ni_syscall == addr) {
- printk("INFO: %s is not install\n", name);
+ printk(KERN_INFO "INFO: %s is not install\n", name);
addr = 0;
}
{
size_t i;
- for (i = 0; i < f->cnt; ++i) {
- printk(" feature[%3u]: %s\n", i, get_sys_name(f->feature_list[i]));
- }
+ for (i = 0; i < f->cnt; ++i)
+ printk(KERN_INFO " feature[%3u]: %s\n", i,
+ get_sys_name(f->feature_list[i]));
}
/**
{
int i;
- printk("print_features:\n");
+ printk(KERN_INFO "print_features:\n");
for (i = 0; i < feature_cnt; ++i) {
- printk("feature: %d\n", i);
+ printk(KERN_INFO "feature: %d\n", i);
print_feature(&features[i]);
}
}
{
int i;
- printk("SYSCALL:\n");
- for (i = 0; i < syscall_name_cnt; ++i) {
- printk(" [%2d] %s\n", get_counter(i), get_sys_name(i));
- }
+ printk(KERN_INFO "SYSCALL:\n");
+ for (i = 0; i < syscall_name_cnt; ++i)
+ printk(KERN_INFO " [%2d] %s\n",
+ get_counter(i), get_sys_name(i));
}
/* debug */
{
struct entry *entry = __search(map, key);
- return (entry ? entry_data(entry): NULL);
+ return entry ? entry_data(entry) : NULL;
}
static void *__remove(struct map *map, struct entry *entry)
struct entry *entry = __search(map, key);
/* Removes entry from the tree but does not free the data */
- return (entry ? __remove(map, entry): NULL);
+ return entry ? __remove(map, entry) : NULL;
}
static void *__replace(struct map *map, struct entry *old, struct entry *new)
X(sys_fchownat, dsddd), \
X(sys_fgetxattr, dspx), \
X(sys_flistxattr, dpx), \
- X(sys_fork, ), \
+ X(sys_fork,), \
X(sys_fremovexattr, ds), \
X(sys_fstat64, xp), \
X(sys_ftruncate64, dx), \
X(sys_getsockopt, dddpd), \
X(sys_getxattr, sspx), \
X(sys_inotify_add_watch, dsd), \
- X(sys_inotify_init, ), \
+ X(sys_inotify_init,), \
X(sys_inotify_init1, d), \
X(sys_inotify_rm_watch, dd), \
X(sys_ipc, ddxxpx), \
X(sys_openat, dsdd), \
X(sys_open_by_handle_at, dpd), \
X(sys_open, sdd), \
- X(sys_pause, ), \
+ X(sys_pause,), \
X(sys_pipe2, dd), \
X(sys_ppoll, pdpp), \
X(sys_pread64, dpxx), \
X(sys_utimensat, dspd), \
X(sys_utime, pp), \
X(sys_utimes, pp), \
- X(sys_vfork, ), \
+ X(sys_vfork,), \
X(sys_vmsplice, dpxd), \
X(sys_wait4, dddp), \
X(sys_waitid, ddpdp)
static int __init init_ks_manager(void)
{
- return 0;
+ return 0;
}
static void __exit exit_ks_manager(void)
module_init(init_ks_manager);
module_exit(exit_ks_manager);
-MODULE_LICENSE ("GPL");
+MODULE_LICENSE("GPL");
{
struct symbol_data *sym_data_p = (struct symbol_data *)data;
- /* We expect that real symbol name should have at least the same length as
- * symbol name we are looking for. */
+ /* We expect that real symbol name should have at least the same
+ * length as symbol name we are looking for. */
if (strncmp(sym_data_p->name, sym, sym_data_p->len) == 0) {
sym_data_p->addr = addr;
/* Return != 0 to stop loop over the symbols */
{
int ret = ksyms_init();
- printk("SWAP_KSYMS: Module initialized\n");
+ printk(KERN_INFO "SWAP_KSYMS: Module initialized\n");
return ret;
}
{
ksyms_exit();
- printk("SWAP_KSYMS: Module uninitialized\n");
+ printk(KERN_INFO "SWAP_KSYMS: Module uninitialized\n");
}
module_init(swap_ksyms_init);
do { \
char *f = __FILE__; \
char *n = strrchr(f, '/'); \
- printk("%s:%u \'%s\' ERROR: " format "\n" , (n) ? n+1 : f, __LINE__, __FUNCTION__, ##args); \
- } while(0)
+ printk(KERN_INFO "%s:%u \'%s\' ERROR: " format "\n" , \
+ (n) ? n+1 : f, __LINE__, __func__, ##args); \
+ } while (0)
/**
* @struct sys_map_item
char *name;
};
-static char* sm_path = NULL;
+static char *sm_path;
module_param(sm_path, charp, 0);
/**
* List of sys_map_item.
*/
LIST_HEAD(smi_list);
-static struct file *file = NULL;
+static struct file *file;
-static int cnt_init_sm = 0;
+static int cnt_init_sm;
/**
* @var cnt_init_sm_lock
file = NULL;
if (ret) {
- KSYMS_ERR("while closing file \'%s\' err=%d", sm_path, ret);
+ KSYMS_ERR("while closing file \'%s\' err=%d",
+ sm_path, ret);
}
}
}
static int file_check(void)
{
int ret = file_open();
- if (ret == 0) {
+ if (ret == 0)
file_close();
- }
return ret;
}
static long file_size(struct file *file)
{
struct kstat st;
- if (vfs_getattr(file->f_path.mnt, file->f_path.dentry, &st)) {
+ if (vfs_getattr(file->f_path.mnt, file->f_path.dentry, &st))
return -1;
- }
- if (!S_ISREG(st.mode)) {
+ if (!S_ISREG(st.mode))
return -1;
- }
- if (st.size != (long)st.size) {
+ if (st.size != (long)st.size)
return -1;
- }
return st.size;
}
kfree(line);
- if (is_symbol_attr(attr)) {
+ if (is_symbol_attr(attr))
smi = create_smi(addr, name);
- }
return smi;
}
for (c = start; c < end; ++c) {
if (is_endline(*c)) {
smi = get_sys_map_item(start, c);
- if (smi) {
+ if (smi)
add_smi(smi);
- }
for (start = c; c < end; ++c) {
if (!is_endline(*c)) {
long size;
int ret = file_open();
- if (ret) {
+ if (ret)
return ret;
- }
size = file_size(file);
if (size < 0) {
int ret = 0;
down(&cnt_init_sm_lock);
- if (cnt_init_sm == 0) {
+ if (cnt_init_sm == 0)
ret = create_sys_map();
- }
++cnt_init_sm;
up(&cnt_init_sm_lock);
{
down(&cnt_init_sm_lock);
--cnt_init_sm;
- if (cnt_init_sm == 0) {
+ if (cnt_init_sm == 0)
free_sys_map();
- }
if (cnt_init_sm < 0) {
KSYMS_ERR("cnt_init_sm=%d", cnt_init_sm);
struct sys_map_item *smi;
list_for_each_entry(smi, &smi_list, list) {
- if (strcmp(name, smi->name) == 0) {
+ if (strcmp(name, smi->name) == 0)
return smi->addr;
- }
}
return 0;
}
ret = file_check();
- if (ret) {
+ if (ret)
return -EINVAL;
- }
- // TODO: calling func 'swap_get_ksyms' in module used func 'swap_ksyms'
+ /* TODO: calling func 'swap_get_ksyms' in
+ * module used func 'swap_ksyms' */
swap_get_ksyms();
return 0;
{
down(&cnt_init_sm_lock);
- if (cnt_init_sm > 0) {
+ if (cnt_init_sm > 0)
free_sys_map();
- }
up(&cnt_init_sm_lock);
}
};
-static struct dentry *swap_dir = NULL;
+static struct dentry *swap_dir;
/**
* @brief Get debugfs dir.
static LIST_HEAD(init_list);
static DEFINE_MUTEX(inst_mutex);
-static unsigned init_flag = 0;
+static unsigned init_flag;
static int do_once(void)
{
ret = swap_cpu_down(cpu, 0);
if (ret == 0)
cpumask_set_cpu(cpu, mask);
- printk("===> SWAP CPU[%d] down(%d)\n", cpu, ret);
+ printk(KERN_INFO "===> SWAP CPU[%d] down(%d)\n", cpu, ret);
}
WARN_ON(num_online_cpus() > 1);
for_each_cpu(cpu, mask) {
ret = swap_cpu_up(cpu, 0);
- printk("===> SWAP CPU[%d] up(%d)\n", cpu, ret);
+ printk(KERN_INFO "===> SWAP CPU[%d] up(%d)\n", cpu, ret);
}
out:
return 0;
not_found:
- printk("ERROR: symbol %s(...) not found\n", sym);
+ printk(KERN_INFO "ERROR: symbol %s(...) not found\n", sym);
return -ESRCH;
}
* @brief SIZE_FEATURE_LIST definition.
*/
enum {
- SIZE_FEATURE_LIST = sizeof(feature_list) / sizeof(struct feature_item *),
+ SIZE_FEATURE_LIST =
+ sizeof(feature_list) / sizeof(struct feature_item *),
};
-static u64 feature_inst = 0;
-static u64 feature_mask = 0;
+static u64 feature_inst;
+static u64 feature_mask;
/**
* @brief Inits features list.
{
int i;
for (i = 0; i < SIZE_FEATURE_LIST; ++i) {
- printk("### f init_feature_mask[%2d]=%p\n", i, feature_list[i]);
+ printk(KERN_INFO "### f init_feature_mask[%2d]=%p\n", i,
+ feature_list[i]);
if (feature_list[i] != NULL) {
feature_mask |= ((u64)1) << i;
- printk("### f name=%s\n", feature_list[i]->name);
+ printk(KERN_INFO "### f name=%s\n",
+ feature_list[i]->name);
}
}
if (size) {
mb->begin = vmalloc(size);
if (mb->begin == NULL) {
- printk("Cannot alloc memory!\n");
+ printk(KERN_INFO "Cannot alloc memory!\n");
return -ENOMEM;
}
reset_discarded();
us_inst = create_us_inst_data(mb);
- if (us_inst == NULL) {
+ if (us_inst == NULL)
return -EINVAL;
- }
if (!is_end_mb(mb)) {
print_err("to long message, remained=%u", remained_mb(mb));
ret = mod_us_inst(us_inst, MT_ADD);
if (ret) {
- printk("Cannot mod us inst, ret = %d\n", ret);
+ printk(KERN_INFO "Cannot mod us inst, ret = %d\n", ret);
ret = -EINVAL;
goto free_us_inst;
}
conf.use_features1 = 0;
ret = set_config(&conf);
if (ret)
- printk("Cannot set config, ret = %d\n", ret);
+ printk(KERN_INFO "Cannot set config, ret = %d\n", ret);
discarded = get_discarded_count();
- printk("discarded messages: %d\n", discarded);
+ printk(KERN_INFO "discarded messages: %d\n", discarded);
reset_discarded();
return ret;
struct us_inst_data *us_inst;
us_inst = create_us_inst_data(mb);
- if (us_inst == NULL) {
+ if (us_inst == NULL)
return -EINVAL;
- }
if (!is_end_mb(mb)) {
print_err("to long message, remained=%u", remained_mb(mb));
struct us_inst_data *us_inst;
us_inst = create_us_inst_data(mb);
- if (us_inst == NULL) {
+ if (us_inst == NULL)
return -EINVAL;
- }
if (!is_end_mb(mb)) {
print_err("to long message, remained=%u", remained_mb(mb));
#include "parser_defs.h"
-static int str_to_u32(const char* str, u32 *val)
+static int str_to_u32(const char *str, u32 *val)
{
u32 result;
- if(!str || !*str)
+ if (!str || !*str)
return -EINVAL;
for (result = 0 ; *str; ++str) {
- if (*str < '0' || *str> '9')
+ if (*str < '0' || *str > '9')
return -EINVAL;
result = result * 10 + (*str - '0');
}
li = kmalloc(sizeof(*li), GFP_KERNEL);
- if (li == NULL)
if (li == NULL) {
print_err("out of memory\n");
goto free_path;
}
li->func = kmalloc(sizeof(struct func_inst_data *) * cnt, GFP_KERNEL);
- if (li->func == NULL)
if (li->func == NULL) {
- print_err("out of memory\n");
- goto free_li;
- }
+ print_err("out of memory\n");
+ goto free_li;
+ }
for (i = 0; i < cnt; ++i) {
print_parse_debug("func #%d:\n", i + 1);
}
for (i = 0; i < cnt; ++i) {
- print_parse_debug("app #%d:\n",i+1);
+ print_parse_debug("app #%d:\n", i + 1);
ai = create_app_inst_data(mb);
if (ai == NULL)
goto free_app_inst;
/** Prints debug message. */
#define print_debug(msg, args...) \
- printk(KERN_DEBUG "SWAP_PARSER DEBUG : " msg, ##args)
+ printk(KERN_DEBUG "SWAP_PARSER DEBUG : " msg, ##args)
/** Prints info message. */
#define print_msg(msg, args...) \
- printk(KERN_INFO "SWAP_PARSER : " msg, ##args)
+ printk(KERN_INFO "SWAP_PARSER : " msg, ##args)
/** Prints warning message. */
#define print_warn(msg, args...) \
- printk(KERN_WARNING "SWAP_PARSER WARNING : " msg, ##args)
+ printk(KERN_WARNING "SWAP_PARSER WARNING : " msg, ##args)
/** Prints error message. */
#define print_err(msg, args...) \
- printk(KERN_ERR "SWAP_PARSER ERROR : " msg, ##args)
+ printk(KERN_ERR "SWAP_PARSER ERROR : " msg, ##args)
/** Prints critical error message. */
#define print_crit(msg, args...) \
- printk(KERN_CRIT "SWAP_PARSER CRITICAL : " msg, ##args)
+ printk(KERN_CRIT "SWAP_PARSER CRITICAL : " msg, ##args)
/* debug parse */
#ifdef PARSE_DEBUG
#define print_parse_debug(msg, args...) \
- printk(KERN_DEBUG "SWAP_PARSER DEBUG : " msg, ##args)
+ printk(KERN_DEBUG "SWAP_PARSER DEBUG : " msg, ##args)
#else
#define print_parse_debug(msg, args...) \
- do {} while (0)
+ do {} while (0)
#endif /* PARSE_DEBUG */
#endif /* __SWAP_DRIVER_DEVICE_DEFS_H__ */
struct basic_msg_fmt {
u32 msg_id; /**< Message ID. */
u32 len; /**< Message length. */
-} __attribute__((packed));
+} __packed;
static int msg_handler(void __user *msg)
{
struct basic_msg_fmt bmf;
enum { size_max = 128 * 1024 * 1024 };
- ret = copy_from_user(&bmf, (void*)msg, sizeof(bmf));
+ ret = copy_from_user(&bmf, (void *)msg, sizeof(bmf));
if (ret)
return ret;
size = bmf.len;
if (size >= size_max) {
- printk("%s: too large message, size=%u\n", __func__, size);
+ printk(KERN_INFO "%s: too large message, size=%u\n",
+ __func__, size);
return -ENOMEM;
}
payload = msg + sizeof(bmf);
if (size) {
- ret = copy_from_user(mb.begin, (void*)payload, size);
+ ret = copy_from_user(mb.begin, (void *)payload, size);
if (ret)
goto uninit;
}
struct nameidata nd;
if (path_lookup(path, LOOKUP_FOLLOW, &nd) != 0) {
#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38) */
- printk("failed to lookup dentry for path %s!\n", path);
+ printk(KERN_INFO "failed to lookup dentry for path %s!\n",
+ path);
return NULL;
}
ret = pf_unregister_probe(pfg, dentry, func->addr);
break;
default:
- printk("ERROR: mod_type=0x%x\n", mt);
+ printk(KERN_INFO "ERROR: mod_type=0x%x\n", mt);
ret = -EINVAL;
}
dentry = dentry_by_path(lib->path);
if (dentry == NULL) {
- printk("Cannot get dentry by path %s\n", lib->path);
+ printk(KERN_INFO "Cannot get dentry by path %s\n", lib->path);
return -EINVAL;
}
for (i = 0; i < lib->cnt_func; ++i) {
ret = mod_func_inst(lib->func[i], pfg, dentry, mt);
if (ret) {
- printk("Cannot mod func inst, ret = %d\n", ret);
+ printk(KERN_INFO "Cannot mod func inst, ret = %d\n",
+ ret);
return ret;
}
}
return ret;
}
-static int get_pfg_by_app_info(struct app_info_data *app_info, struct pf_group **pfg)
+static int get_pfg_by_app_info(struct app_info_data *app_info,
+ struct pf_group **pfg)
{
struct dentry *dentry;
*pfg = get_pf_group_by_dentry(dentry, dentry);
break;
default:
- printk("ERROR: app_type=0x%x\n", app_info->app_type);
+ printk(KERN_INFO "ERROR: app_type=0x%x\n", app_info->app_type);
return -EINVAL;
}
ret = get_pfg_by_app_info(app_inst->app_info, &pfg);
if (ret) {
- printk("Cannot get pfg by app info, ret = %d\n", ret);
+ printk(KERN_INFO "Cannot get pfg by app info, ret = %d\n", ret);
return ret;
}
/* TODO: */
dentry = dentry_by_path(app_inst->app_info->exec_path);
if (dentry == NULL) {
- printk("Cannot find dentry by path %s\n",
+ printk(KERN_INFO "Cannot find dentry by path %s\n",
app_inst->app_info->exec_path);
return -EINVAL;
}
ret = mod_func_inst(app_inst->func[i], pfg, dentry, mt);
if (ret) {
- printk("Cannot mod func inst, ret = %d\n", ret);
+ printk(KERN_INFO "Cannot mod func inst, ret = %d\n",
+ ret);
return ret;
}
}
for (i = 0; i < app_inst->cnt_lib; ++i) {
ret = mod_lib_inst(app_inst->lib[i], pfg, mt);
if (ret) {
- printk("Cannot mod lib inst, ret = %d\n", ret);
+ printk(KERN_INFO "Cannot mod lib inst, ret = %d\n",
+ ret);
return ret;
}
}
for (i = 0; i < us_inst->cnt; ++i) {
ret = mod_us_app_inst(us_inst->app_inst[i], mt);
if (ret) {
- printk("Cannot mod us app inst, ret = %d\n", ret);
+ printk(KERN_INFO "Cannot mod us app inst, ret = %d\n",
+ ret);
return ret;
}
}
#include "sampler_timers.h"
-static u64 sampler_timer_quantum = 0;
+static u64 sampler_timer_quantum;
static DEFINE_PER_CPU(struct hrtimer, swap_hrtimer);
static int swap_hrtimer_running;
-static unsigned long sampler_timer_quantum = 0;
+static unsigned long sampler_timer_quantum;
static DEFINE_PER_CPU(struct timer_list, swap_timer);
static int swap_timer_running;
}
static DEFINE_MUTEX(mutex_run);
-static int sampler_run = 0;
+static int sampler_run;
/**
mutex_lock(&mutex_run);
if (sampler_run) {
- printk("sampler profiling is already run!\n");
+ printk(KERN_INFO "sampler profiling is already run!\n");
goto unlock;
}
mutex_lock(&mutex_run);
if (sampler_run == 0) {
- printk("energy profiling is not running!\n");
+ printk(KERN_INFO "energy profiling is not running!\n");
ret = -EINVAL;
goto unlock;
}
{
long offset = insn & 0x3ff;
offset -= insn & 0x400;
- return (insn_addr + 4 + offset * 2);
+ return insn_addr + 4 + offset * 2;
}
-static inline long branch_cond_t16_dest(kprobe_opcode_t insn, unsigned int insn_addr)
+static inline long branch_cond_t16_dest(kprobe_opcode_t insn,
+ unsigned int insn_addr)
{
long offset = insn & 0x7f;
offset -= insn & 0x80;
- return (insn_addr + 4 + offset * 2);
+ return insn_addr + 4 + offset * 2;
}
static inline long branch_t32_dest(kprobe_opcode_t insn, unsigned int insn_addr)
poff -= (insn & 0x400);
if (insn & (1 << 12))
- return ((insn_addr + 4 + (poff << 12) + offset * 4));
+ return insn_addr + 4 + (poff << 12) + offset * 4;
else
- return ((insn_addr + 4 + (poff << 12) + offset * 4) & ~3);
+ return (insn_addr + 4 + (poff << 12) + offset * 4) & ~3;
}
static inline long cbz_t16_dest(kprobe_opcode_t insn, unsigned int insn_addr)
THUMB2_INSN_MATCH(B1, insn) ||
THUMB2_INSN_MATCH(B2, insn) ||
THUMB2_INSN_MATCH(BXJ, insn) ||
- (THUMB2_INSN_MATCH(ADR, insn) && THUMB2_INSN_REG_RD(insn) == 15) ||
+ (THUMB2_INSN_MATCH(ADR, insn) &&
+ THUMB2_INSN_REG_RD(insn) == 15) ||
(THUMB2_INSN_MATCH(LDRW, insn) && THUMB2_INSN_REG_RT(insn) == 15) ||
- (THUMB2_INSN_MATCH(LDRW1, insn) && THUMB2_INSN_REG_RT(insn) == 15) ||
- (THUMB2_INSN_MATCH(LDRHW, insn) && THUMB2_INSN_REG_RT(insn) == 15) ||
- (THUMB2_INSN_MATCH(LDRHW1, insn) && THUMB2_INSN_REG_RT(insn) == 15) ||
- (THUMB2_INSN_MATCH(LDRWL, insn) && THUMB2_INSN_REG_RT(insn) == 15) ||
+ (THUMB2_INSN_MATCH(LDRW1, insn) &&
+ THUMB2_INSN_REG_RT(insn) == 15) ||
+ (THUMB2_INSN_MATCH(LDRHW, insn) &&
+ THUMB2_INSN_REG_RT(insn) == 15) ||
+ (THUMB2_INSN_MATCH(LDRHW1, insn) &&
+ THUMB2_INSN_REG_RT(insn) == 15) ||
+ (THUMB2_INSN_MATCH(LDRWL, insn) &&
+ THUMB2_INSN_REG_RT(insn) == 15) ||
THUMB2_INSN_MATCH(LDMIA, insn) ||
THUMB2_INSN_MATCH(LDMDB, insn) ||
(THUMB2_INSN_MATCH(DP, insn) && THUMB2_INSN_REG_RD(insn) == 15) ||
(THUMB2_INSN_MATCH(RSBW, insn) && THUMB2_INSN_REG_RD(insn) == 15) ||
(THUMB2_INSN_MATCH(RORW, insn) && THUMB2_INSN_REG_RD(insn) == 15) ||
(THUMB2_INSN_MATCH(ROR, insn) && THUMB2_INSN_REG_RD(insn) == 15) ||
- (THUMB2_INSN_MATCH(LSLW1, insn) && THUMB2_INSN_REG_RD(insn) == 15) ||
- (THUMB2_INSN_MATCH(LSLW2, insn) && THUMB2_INSN_REG_RD(insn) == 15) ||
- (THUMB2_INSN_MATCH(LSRW1, insn) && THUMB2_INSN_REG_RD(insn) == 15) ||
- (THUMB2_INSN_MATCH(LSRW2, insn) && THUMB2_INSN_REG_RD(insn) == 15) ||
+ (THUMB2_INSN_MATCH(LSLW1, insn) &&
+ THUMB2_INSN_REG_RD(insn) == 15) ||
+ (THUMB2_INSN_MATCH(LSLW2, insn) &&
+ THUMB2_INSN_REG_RD(insn) == 15) ||
+ (THUMB2_INSN_MATCH(LSRW1, insn) &&
+ THUMB2_INSN_REG_RD(insn) == 15) ||
+ (THUMB2_INSN_MATCH(LSRW2, insn) &&
+ THUMB2_INSN_REG_RD(insn) == 15) ||
/* skip PC, #-imm12 -> SP, #-imm8 and Tegra-hanging instructions */
- (THUMB2_INSN_MATCH(STRW1, insn) && THUMB2_INSN_REG_RN(insn) == 15) ||
- (THUMB2_INSN_MATCH(STRBW1, insn) && THUMB2_INSN_REG_RN(insn) == 15) ||
- (THUMB2_INSN_MATCH(STRHW1, insn) && THUMB2_INSN_REG_RN(insn) == 15) ||
+ (THUMB2_INSN_MATCH(STRW1, insn) &&
+ THUMB2_INSN_REG_RN(insn) == 15) ||
+ (THUMB2_INSN_MATCH(STRBW1, insn) &&
+ THUMB2_INSN_REG_RN(insn) == 15) ||
+ (THUMB2_INSN_MATCH(STRHW1, insn) &&
+ THUMB2_INSN_REG_RN(insn) == 15) ||
(THUMB2_INSN_MATCH(STRW, insn) && THUMB2_INSN_REG_RN(insn) == 15) ||
- (THUMB2_INSN_MATCH(STRHW, insn) && THUMB2_INSN_REG_RN(insn) == 15) ||
+ (THUMB2_INSN_MATCH(STRHW, insn) &&
+ THUMB2_INSN_REG_RN(insn) == 15) ||
(THUMB2_INSN_MATCH(LDRW, insn) && THUMB2_INSN_REG_RN(insn) == 15) ||
- (THUMB2_INSN_MATCH(LDRBW, insn) && THUMB2_INSN_REG_RN(insn) == 15) ||
- (THUMB2_INSN_MATCH(LDRHW, insn) && THUMB2_INSN_REG_RN(insn) == 15) ||
+ (THUMB2_INSN_MATCH(LDRBW, insn) &&
+ THUMB2_INSN_REG_RN(insn) == 15) ||
+ (THUMB2_INSN_MATCH(LDRHW, insn) &&
+ THUMB2_INSN_REG_RN(insn) == 15) ||
/* skip STRDx/LDRDx Rt, Rt2, [Rd, ...] */
- (THUMB2_INSN_MATCH(LDRD, insn) || THUMB2_INSN_MATCH(LDRD1, insn) || THUMB2_INSN_MATCH(STRD, insn))) {
+ (THUMB2_INSN_MATCH(LDRD, insn) || THUMB2_INSN_MATCH(LDRD1, insn) ||
+ THUMB2_INSN_MATCH(STRD, insn))) {
ret = -EFAULT;
}
return ret;
}
-static int prep_pc_dep_insn_execbuf_thumb(kprobe_opcode_t * insns, kprobe_opcode_t insn, int uregs)
+static int prep_pc_dep_insn_execbuf_thumb(kprobe_opcode_t *insns,
+ kprobe_opcode_t insn, int uregs)
{
unsigned char mreg = 0;
unsigned char reg = 0;
reg = ((insn & 0xffff) & uregs) >> 8;
} else {
if (THUMB_INSN_MATCH(MOV3, insn)) {
- if (((((unsigned char) insn) & 0xff) >> 3) == 15) {
+ if (((((unsigned char)insn) & 0xff) >> 3) == 15)
reg = (insn & 0xffff) & uregs;
- } else {
+ else
return 0;
- }
} else {
if (THUMB2_INSN_MATCH(ADR, insn)) {
reg = ((insn >> 16) & uregs) >> 8;
- if (reg == 15) {
+ if (reg == 15)
return 0;
- }
} else {
- if (THUMB2_INSN_MATCH(LDRW, insn) || THUMB2_INSN_MATCH(LDRW1, insn) ||
- THUMB2_INSN_MATCH(LDRHW, insn) || THUMB2_INSN_MATCH(LDRHW1, insn) ||
+ if (THUMB2_INSN_MATCH(LDRW, insn) ||
+ THUMB2_INSN_MATCH(LDRW1, insn) ||
+ THUMB2_INSN_MATCH(LDRHW, insn) ||
+ THUMB2_INSN_MATCH(LDRHW1, insn) ||
THUMB2_INSN_MATCH(LDRWL, insn)) {
reg = ((insn >> 16) & uregs) >> 12;
- if (reg == 15) {
+ if (reg == 15)
return 0;
- }
} else {
- // LDRB.W PC, [PC, #immed] => PLD [PC, #immed], so Rt == PC is skipped
- if (THUMB2_INSN_MATCH(LDRBW, insn) || THUMB2_INSN_MATCH(LDRBW1, insn) ||
+ /* LDRB.W PC, [PC, #immed] =>
+ PLD [PC, #immed],
+ so Rt == PC is skipped */
+ if (THUMB2_INSN_MATCH(LDRBW, insn) ||
+ THUMB2_INSN_MATCH(LDRBW1, insn) ||
THUMB2_INSN_MATCH(LDREX, insn)) {
reg = ((insn >> 16) & uregs) >> 12;
} else {
if (THUMB2_INSN_MATCH(DP, insn)) {
reg = ((insn >> 16) & uregs) >> 12;
- if (reg == 15) {
+ if (reg == 15)
return 0;
- }
} else {
if (THUMB2_INSN_MATCH(RSBW, insn)) {
reg = ((insn >> 12) & uregs) >> 8;
- if (reg == 15){
+ if (reg == 15)
return 0;
- }
} else {
if (THUMB2_INSN_MATCH(RORW, insn)) {
reg = ((insn >> 12) & uregs) >> 8;
- if (reg == 15) {
+ if (reg == 15)
return 0;
- }
} else {
if (THUMB2_INSN_MATCH(ROR, insn) || THUMB2_INSN_MATCH(LSLW1, insn) ||
THUMB2_INSN_MATCH(LSLW2, insn) || THUMB2_INSN_MATCH(LSRW1, insn) ||
THUMB2_INSN_MATCH(LSRW2, insn)) {
reg = ((insn >> 12) & uregs) >> 8;
- if (reg == 15) {
+ if (reg == 15)
return 0;
- }
} else {
if (THUMB2_INSN_MATCH(TEQ1, insn) || THUMB2_INSN_MATCH(TST1, insn)) {
reg = 15;
} else {
- if (THUMB2_INSN_MATCH(TEQ2, insn) || THUMB2_INSN_MATCH(TST2, insn)) {
+ if (THUMB2_INSN_MATCH(TEQ2, insn) || THUMB2_INSN_MATCH(TST2, insn))
reg = THUMB2_INSN_REG_RM(insn);
- }
}
}
}
}
}
- if ((THUMB2_INSN_MATCH(STRW, insn) || THUMB2_INSN_MATCH(STRBW, insn) ||
- THUMB2_INSN_MATCH(STRD, insn) || THUMB2_INSN_MATCH(STRHT, insn) ||
- THUMB2_INSN_MATCH(STRT, insn) || THUMB2_INSN_MATCH(STRHW1, insn) ||
- THUMB2_INSN_MATCH(STRHW, insn)) && THUMB2_INSN_REG_RT(insn) == 15) {
+ if ((THUMB2_INSN_MATCH(STRW, insn) ||
+ THUMB2_INSN_MATCH(STRBW, insn) ||
+ THUMB2_INSN_MATCH(STRD, insn) ||
+ THUMB2_INSN_MATCH(STRHT, insn) ||
+ THUMB2_INSN_MATCH(STRT, insn) ||
+ THUMB2_INSN_MATCH(STRHW1, insn) ||
+ THUMB2_INSN_MATCH(STRHW, insn)) &&
+ THUMB2_INSN_REG_RT(insn) == 15) {
reg = THUMB2_INSN_REG_RT(insn);
}
if (reg == 6 || reg == 7) {
- *((unsigned short*)insns + 0) = (*((unsigned short*)insns + 0) & 0x00ff) | ((1 << mreg) | (1 << (mreg + 1)));
- *((unsigned short*)insns + 1) = (*((unsigned short*)insns + 1) & 0xf8ff) | (mreg << 8);
- *((unsigned short*)insns + 2) = (*((unsigned short*)insns + 2) & 0xfff8) | (mreg + 1);
- *((unsigned short*)insns + 3) = (*((unsigned short*)insns + 3) & 0xffc7) | (mreg << 3);
- *((unsigned short*)insns + 7) = (*((unsigned short*)insns + 7) & 0xf8ff) | (mreg << 8);
- *((unsigned short*)insns + 8) = (*((unsigned short*)insns + 8) & 0xffc7) | (mreg << 3);
- *((unsigned short*)insns + 9) = (*((unsigned short*)insns + 9) & 0xffc7) | ((mreg + 1) << 3);
- *((unsigned short*)insns + 10) = (*((unsigned short*)insns + 10) & 0x00ff) | (( 1 << mreg) | (1 << (mreg + 1)));
+ *((unsigned short *)insns + 0) =
+ (*((unsigned short *)insns + 0) & 0x00ff) |
+ ((1 << mreg) | (1 << (mreg + 1)));
+ *((unsigned short *)insns + 1) =
+ (*((unsigned short *)insns + 1) & 0xf8ff) | (mreg << 8);
+ *((unsigned short *)insns + 2) =
+ (*((unsigned short *)insns + 2) & 0xfff8) | (mreg + 1);
+ *((unsigned short *)insns + 3) =
+ (*((unsigned short *)insns + 3) & 0xffc7) | (mreg << 3);
+ *((unsigned short *)insns + 7) =
+ (*((unsigned short *)insns + 7) & 0xf8ff) | (mreg << 8);
+ *((unsigned short *)insns + 8) =
+ (*((unsigned short *)insns + 8) & 0xffc7) | (mreg << 3);
+ *((unsigned short *)insns + 9) =
+ (*((unsigned short *)insns + 9) & 0xffc7) |
+ ((mreg + 1) << 3);
+ *((unsigned short *)insns + 10) =
+ (*((unsigned short *)insns + 10) & 0x00ff) |
+ ((1 << mreg) | (1 << (mreg + 1)));
}
if (THUMB_INSN_MATCH(APC, insn)) {
- // ADD Rd, PC, #immed_8*4 -> ADD Rd, SP, #immed_8*4
- *((unsigned short*)insns + 4) = ((insn & 0xffff) | 0x800); // ADD Rd, SP, #immed_8*4
+ /* ADD Rd, PC, #immed_8*4 -> ADD Rd, SP, #immed_8*4 */
+ *((unsigned short *)insns + 4) = ((insn & 0xffff) | 0x800);
} else {
if (THUMB_INSN_MATCH(LRO3, insn)) {
- // LDR Rd, [PC, #immed_8*4] -> LDR Rd, [SP, #immed_8*4]
- *((unsigned short*)insns + 4) = ((insn & 0xffff) + 0x5000); // LDR Rd, [SP, #immed_8*4]
+ /* LDR Rd, [PC, #immed_8*4] ->
+ * LDR Rd, [SP, #immed_8*4] */
+ *((unsigned short *)insns + 4) =
+ ((insn & 0xffff) + 0x5000);
} else {
if (THUMB_INSN_MATCH(MOV3, insn)) {
- // MOV Rd, PC -> MOV Rd, SP
- *((unsigned short*)insns + 4) = ((insn & 0xffff) ^ 0x10); // MOV Rd, SP
+ /* MOV Rd, PC -> MOV Rd, SP */
+ *((unsigned short *)insns + 4) =
+ ((insn & 0xffff) ^ 0x10);
} else {
if (THUMB2_INSN_MATCH(ADR, insn)) {
- // ADDW Rd, PC, #imm -> ADDW Rd, SP, #imm
- insns[2] = (insn & 0xfffffff0) | 0x0d; // ADDW Rd, SP, #imm
+ /* ADDW Rd,PC,#imm -> ADDW Rd,SP,#imm */
+ insns[2] = (insn & 0xfffffff0) | 0x0d;
} else {
- if (THUMB2_INSN_MATCH(LDRW, insn) || THUMB2_INSN_MATCH(LDRBW, insn) ||
+ if (THUMB2_INSN_MATCH(LDRW, insn) ||
+ THUMB2_INSN_MATCH(LDRBW, insn) ||
THUMB2_INSN_MATCH(LDRHW, insn)) {
- // LDR.W Rt, [PC, #-<imm_12>] -> LDR.W Rt, [SP, #-<imm_8>]
- // !!!!!!!!!!!!!!!!!!!!!!!!
- // !!! imm_12 vs. imm_8 !!!
- // !!!!!!!!!!!!!!!!!!!!!!!!
- insns[2] = (insn & 0xf0fffff0) | 0x0c00000d; // LDR.W Rt, [SP, #-<imm_8>]
+ /* LDR.W Rt, [PC, #-<imm_12>] ->
+ * LDR.W Rt, [SP, #-<imm_8>]
+ * !!!!!!!!!!!!!!!!!!!!!!!!
+ * !!! imm_12 vs. imm_8 !!!
+ * !!!!!!!!!!!!!!!!!!!!!!!! */
+ insns[2] = (insn & 0xf0fffff0) | 0x0c00000d;
} else {
- if (THUMB2_INSN_MATCH(LDRW1, insn) || THUMB2_INSN_MATCH(LDRBW1, insn) ||
- THUMB2_INSN_MATCH(LDRHW1, insn) || THUMB2_INSN_MATCH(LDRD, insn) ||
- THUMB2_INSN_MATCH(LDRD1, insn) || THUMB2_INSN_MATCH(LDREX, insn)) {
- // LDRx.W Rt, [PC, #+<imm_12>] -> LDRx.W Rt, [SP, #+<imm_12>] (+/-imm_8 for LDRD Rt, Rt2, [PC, #<imm_8>]
- insns[2] = (insn & 0xfffffff0) | 0xd; // LDRx.W Rt, [SP, #+<imm_12>]
+ if (THUMB2_INSN_MATCH(LDRW1, insn) ||
+ THUMB2_INSN_MATCH(LDRBW1, insn) ||
+ THUMB2_INSN_MATCH(LDRHW1, insn) ||
+ THUMB2_INSN_MATCH(LDRD, insn) ||
+ THUMB2_INSN_MATCH(LDRD1, insn) ||
+ THUMB2_INSN_MATCH(LDREX, insn)) {
+ /* LDRx.W Rt, [PC, #+<imm_12>] ->
+ * LDRx.W Rt, [SP, #+<imm_12>]
+ (+/-imm_8 for LDRD Rt, Rt2, [PC, #<imm_8>] */
+ insns[2] = (insn & 0xfffffff0) | 0xd;
} else {
if (THUMB2_INSN_MATCH(MUL, insn)) {
- insns[2] = (insn & 0xfff0ffff) | 0x000d0000; // MUL Rd, Rn, SP
+ insns[2] = (insn & 0xfff0ffff) | 0x000d0000; /* MUL Rd, Rn, SP */
} else {
if (THUMB2_INSN_MATCH(DP, insn)) {
- if (THUMB2_INSN_REG_RM(insn) == 15) {
- insns[2] = (insn & 0xfff0ffff) | 0x000d0000; // DP Rd, Rn, PC
- } else if (THUMB2_INSN_REG_RN(insn) == 15) {
- insns[2] = (insn & 0xfffffff0) | 0xd; // DP Rd, PC, Rm
- }
+ if (THUMB2_INSN_REG_RM(insn) == 15)
+ insns[2] = (insn & 0xfff0ffff) | 0x000d0000; /* DP Rd, Rn, PC */
+ else if (THUMB2_INSN_REG_RN(insn) == 15)
+ insns[2] = (insn & 0xfffffff0) | 0xd; /* DP Rd, PC, Rm */
} else {
if (THUMB2_INSN_MATCH(LDRWL, insn)) {
- // LDRx.W Rt, [PC, #<imm_12>] -> LDRx.W Rt, [SP, #+<imm_12>] (+/-imm_8 for LDRD Rt, Rt2, [PC, #<imm_8>]
- insns[2] = (insn & 0xfffffff0) | 0xd; // LDRx.W Rt, [SP, #+<imm_12>]
+ /* LDRx.W Rt, [PC, #<imm_12>] ->
+ * LDRx.W Rt, [SP, #+<imm_12>]
+ * (+/-imm_8 for LDRD Rt, Rt2, [PC, #<imm_8>] */
+ insns[2] = (insn & 0xfffffff0) | 0xd;
} else {
if (THUMB2_INSN_MATCH(RSBW, insn)) {
- insns[2] = (insn & 0xfffffff0) | 0xd; // RSB{S}.W Rd, PC, #<const> -> RSB{S}.W Rd, SP, #<const>
+ insns[2] = (insn & 0xfffffff0) | 0xd; /* RSB{S}.W Rd, PC, #<const> -> RSB{S}.W Rd, SP, #<const> */
} else {
if (THUMB2_INSN_MATCH(RORW, insn) || THUMB2_INSN_MATCH(LSLW1, insn) || THUMB2_INSN_MATCH(LSRW1, insn)) {
- if ((THUMB2_INSN_REG_RM(insn) == 15) && (THUMB2_INSN_REG_RN(insn) == 15)) {
- insns[2] = (insn & 0xfffdfffd); // ROR.W Rd, PC, PC
- } else if (THUMB2_INSN_REG_RM(insn) == 15) {
- insns[2] = (insn & 0xfff0ffff) | 0xd0000; // ROR.W Rd, Rn, PC
- } else if (THUMB2_INSN_REG_RN(insn) == 15) {
- insns[2] = (insn & 0xfffffff0) | 0xd; // ROR.W Rd, PC, Rm
- }
+ if ((THUMB2_INSN_REG_RM(insn) == 15) && (THUMB2_INSN_REG_RN(insn) == 15))
+ insns[2] = (insn & 0xfffdfffd); /* ROR.W Rd, PC, PC */
+ else if (THUMB2_INSN_REG_RM(insn) == 15)
+ insns[2] = (insn & 0xfff0ffff) | 0xd0000; /* ROR.W Rd, Rn, PC */
+ else if (THUMB2_INSN_REG_RN(insn) == 15)
+ insns[2] = (insn & 0xfffffff0) | 0xd; /* ROR.W Rd, PC, Rm */
} else {
- if (THUMB2_INSN_MATCH(ROR, insn) || THUMB2_INSN_MATCH(LSLW2, insn) || THUMB2_INSN_MATCH(LSRW2, insn)) {
- insns[2] = (insn & 0xfff0ffff) | 0xd0000; // ROR{S} Rd, PC, #<const> -> ROR{S} Rd, SP, #<const>
- }
+ if (THUMB2_INSN_MATCH(ROR, insn) || THUMB2_INSN_MATCH(LSLW2, insn) || THUMB2_INSN_MATCH(LSRW2, insn))
+ insns[2] = (insn & 0xfff0ffff) | 0xd0000; /* ROR{S} Rd, PC, #<const> -> ROR{S} Rd, SP, #<const> */
}
}
}
}
if (THUMB2_INSN_MATCH(STRW, insn) || THUMB2_INSN_MATCH(STRBW, insn)) {
- insns[2] = (insn & 0xfff0ffff) | 0x000d0000; // STRx.W Rt, [Rn, SP]
+ insns[2] = (insn & 0xfff0ffff) | 0x000d0000; /* STRx.W Rt, [Rn, SP] */
} else {
if (THUMB2_INSN_MATCH(STRD, insn) || THUMB2_INSN_MATCH(STRHT, insn) ||
THUMB2_INSN_MATCH(STRT, insn) || THUMB2_INSN_MATCH(STRHW1, insn)) {
- if (THUMB2_INSN_REG_RN(insn) == 15) {
- insns[2] = (insn & 0xfffffff0) | 0xd; // STRD/T/HT{.W} Rt, [SP, ...]
- } else {
+ if (THUMB2_INSN_REG_RN(insn) == 15)
+ insns[2] = (insn & 0xfffffff0) | 0xd; /* STRD/T/HT{.W} Rt, [SP, ...] */
+ else
insns[2] = insn;
- }
} else {
if (THUMB2_INSN_MATCH(STRHW, insn) && (THUMB2_INSN_REG_RN(insn) == 15)) {
- if (THUMB2_INSN_REG_RN(insn) == 15) {
- insns[2] = (insn & 0xf0fffff0) | 0x0c00000d; // STRH.W Rt, [SP, #-<imm_8>]
- } else {
+ if (THUMB2_INSN_REG_RN(insn) == 15)
+ insns[2] = (insn & 0xf0fffff0) | 0x0c00000d; /* STRH.W Rt, [SP, #-<imm_8>] */
+ else
insns[2] = insn;
- }
}
}
}
- // STRx PC, xxx
+ /* STRx PC, xxx */
if ((reg == 15) && (THUMB2_INSN_MATCH(STRW, insn) ||
THUMB2_INSN_MATCH(STRBW, insn) ||
THUMB2_INSN_MATCH(STRD, insn) ||
THUMB2_INSN_MATCH(STRHT, insn) ||
THUMB2_INSN_MATCH(STRT, insn) ||
THUMB2_INSN_MATCH(STRHW1, insn) ||
- THUMB2_INSN_MATCH(STRHW, insn) )) {
+ THUMB2_INSN_MATCH(STRHW, insn))) {
insns[2] = (insns[2] & 0x0fffffff) | 0xd0000000;
}
if (THUMB2_INSN_MATCH(TEQ1, insn) || THUMB2_INSN_MATCH(TST1, insn)) {
- insns[2] = (insn & 0xfffffff0) | 0xd; // TEQ SP, #<const>
+ insns[2] = (insn & 0xfffffff0) | 0xd; /* TEQ SP, #<const> */
} else {
- if (THUMB2_INSN_MATCH(TEQ2, insn) || THUMB2_INSN_MATCH(TST2, insn)) {
- if ((THUMB2_INSN_REG_RN(insn) == 15) && (THUMB2_INSN_REG_RM(insn) == 15)) {
- insns[2] = (insn & 0xfffdfffd); // TEQ/TST PC, PC
- } else if (THUMB2_INSN_REG_RM(insn) == 15) {
- insns[2] = (insn & 0xfff0ffff) | 0xd0000; // TEQ/TST Rn, PC
- } else if (THUMB2_INSN_REG_RN(insn) == 15) {
- insns[2] = (insn & 0xfffffff0) | 0xd; // TEQ/TST PC, Rm
- }
+ if (THUMB2_INSN_MATCH(TEQ2, insn) ||
+ THUMB2_INSN_MATCH(TST2, insn)) {
+ if ((THUMB2_INSN_REG_RN(insn) == 15) &&
+ (THUMB2_INSN_REG_RM(insn) == 15))
+ insns[2] = (insn & 0xfffdfffd); /* TEQ/TST PC, PC */
+ else if (THUMB2_INSN_REG_RM(insn) == 15)
+ insns[2] = (insn & 0xfff0ffff) | 0xd0000; /* TEQ/TST Rn, PC */
+ else if (THUMB2_INSN_REG_RN(insn) == 15)
+ insns[2] = (insn & 0xfffffff0) | 0xd; /* TEQ/TST PC, Rm */
}
}
p->safe_thumb = 1;
if (vaddr & 0x01) {
- printk("Error in %s at %d: attempt to register kprobe at an unaligned address\n", __FILE__, __LINE__);
+ printk(KERN_INFO "Error in %s at %d: attempt to register "
+ "kprobe at an unaligned address\n", __FILE__, __LINE__);
return -EINVAL;
}
- if (!arch_check_insn_thumb(insn)) {
+ if (!arch_check_insn_thumb(insn))
p->safe_thumb = 0;
- }
uregs = 0;
pc_dep = 0;
if (THUMB_INSN_MATCH(APC, insn) || THUMB_INSN_MATCH(LRO3, insn)) {
uregs = 0x0700; /* 8-10 */
pc_dep = 1;
- } else if (THUMB_INSN_MATCH(MOV3, insn) && (((((unsigned char)insn) & 0xff) >> 3) == 15)) {
+ } else if (THUMB_INSN_MATCH(MOV3, insn) &&
+ (((((unsigned char)insn) & 0xff) >> 3) == 15)) {
/* MOV Rd, PC */
uregs = 0x07;
pc_dep = 1;
} else if THUMB2_INSN_MATCH(ADR, insn) {
uregs = 0x0f00; /* Rd 8-11 */
pc_dep = 1;
- } else if (((THUMB2_INSN_MATCH(LDRW, insn) || THUMB2_INSN_MATCH(LDRW1, insn) ||
- THUMB2_INSN_MATCH(LDRBW, insn) || THUMB2_INSN_MATCH(LDRBW1, insn) ||
- THUMB2_INSN_MATCH(LDRHW, insn) || THUMB2_INSN_MATCH(LDRHW1, insn) ||
- THUMB2_INSN_MATCH(LDRWL, insn)) && THUMB2_INSN_REG_RN(insn) == 15) ||
+ } else if (((THUMB2_INSN_MATCH(LDRW, insn) ||
+ THUMB2_INSN_MATCH(LDRW1, insn) ||
+ THUMB2_INSN_MATCH(LDRBW, insn) ||
+ THUMB2_INSN_MATCH(LDRBW1, insn) ||
+ THUMB2_INSN_MATCH(LDRHW, insn) ||
+ THUMB2_INSN_MATCH(LDRHW1, insn) ||
+ THUMB2_INSN_MATCH(LDRWL, insn)) &&
+ THUMB2_INSN_REG_RN(insn) == 15) ||
THUMB2_INSN_MATCH(LDREX, insn) ||
- ((THUMB2_INSN_MATCH(STRW, insn) || THUMB2_INSN_MATCH(STRBW, insn) ||
- THUMB2_INSN_MATCH(STRHW, insn) || THUMB2_INSN_MATCH(STRHW1, insn)) &&
- (THUMB2_INSN_REG_RN(insn) == 15 || THUMB2_INSN_REG_RT(insn) == 15)) ||
- ((THUMB2_INSN_MATCH(STRT, insn) || THUMB2_INSN_MATCH(STRHT, insn)) &&
- (THUMB2_INSN_REG_RN(insn) == 15 || THUMB2_INSN_REG_RT(insn) == 15))) {
+ ((THUMB2_INSN_MATCH(STRW, insn) ||
+ THUMB2_INSN_MATCH(STRBW, insn) ||
+ THUMB2_INSN_MATCH(STRHW, insn) ||
+ THUMB2_INSN_MATCH(STRHW1, insn)) &&
+ (THUMB2_INSN_REG_RN(insn) == 15 ||
+ THUMB2_INSN_REG_RT(insn) == 15)) ||
+ ((THUMB2_INSN_MATCH(STRT, insn) ||
+ THUMB2_INSN_MATCH(STRHT, insn)) &&
+ (THUMB2_INSN_REG_RN(insn) == 15 ||
+ THUMB2_INSN_REG_RT(insn) == 15))) {
uregs = 0xf000; /* Rt 12-15 */
pc_dep = 1;
- } else if ((THUMB2_INSN_MATCH(LDRD, insn) || THUMB2_INSN_MATCH(LDRD1, insn)) && (THUMB2_INSN_REG_RN(insn) == 15)) {
+ } else if ((THUMB2_INSN_MATCH(LDRD, insn) ||
+ THUMB2_INSN_MATCH(LDRD1, insn)) &&
+ (THUMB2_INSN_REG_RN(insn) == 15)) {
uregs = 0xff00; /* Rt 12-15, Rt2 8-11 */
pc_dep = 1;
- } else if (THUMB2_INSN_MATCH(MUL, insn) && THUMB2_INSN_REG_RM(insn) == 15) {
+ } else if (THUMB2_INSN_MATCH(MUL, insn) &&
+ THUMB2_INSN_REG_RM(insn) == 15) {
uregs = 0xf;
pc_dep = 1;
- } else if (THUMB2_INSN_MATCH(DP, insn) && (THUMB2_INSN_REG_RN(insn) == 15 || THUMB2_INSN_REG_RM(insn) == 15)) {
+ } else if (THUMB2_INSN_MATCH(DP, insn) &&
+ (THUMB2_INSN_REG_RN(insn) == 15 ||
+ THUMB2_INSN_REG_RM(insn) == 15)) {
uregs = 0xf000; /* Rd 12-15 */
pc_dep = 1;
- } else if (THUMB2_INSN_MATCH(STRD, insn) && ((THUMB2_INSN_REG_RN(insn) == 15) || (THUMB2_INSN_REG_RT(insn) == 15) || THUMB2_INSN_REG_RT2(insn) == 15)) {
+ } else if (THUMB2_INSN_MATCH(STRD, insn) &&
+ ((THUMB2_INSN_REG_RN(insn) == 15) ||
+ (THUMB2_INSN_REG_RT(insn) == 15) ||
+ THUMB2_INSN_REG_RT2(insn) == 15)) {
uregs = 0xff00; /* Rt 12-15, Rt2 8-11 */
pc_dep = 1;
- } else if (THUMB2_INSN_MATCH(RSBW, insn) && THUMB2_INSN_REG_RN(insn) == 15) {
+ } else if (THUMB2_INSN_MATCH(RSBW, insn) &&
+ THUMB2_INSN_REG_RN(insn) == 15) {
uregs = 0x0f00; /* Rd 8-11 */
pc_dep = 1;
- } else if (THUMB2_INSN_MATCH (RORW, insn) && (THUMB2_INSN_REG_RN(insn) == 15 || THUMB2_INSN_REG_RM(insn) == 15)) {
+ } else if (THUMB2_INSN_MATCH(RORW, insn) &&
+ (THUMB2_INSN_REG_RN(insn) == 15 ||
+ THUMB2_INSN_REG_RM(insn) == 15)) {
uregs = 0x0f00;
pc_dep = 1;
- } else if ((THUMB2_INSN_MATCH(ROR, insn) || THUMB2_INSN_MATCH(LSLW2, insn) || THUMB2_INSN_MATCH(LSRW2, insn)) && THUMB2_INSN_REG_RM(insn) == 15) {
+ } else if ((THUMB2_INSN_MATCH(ROR, insn) ||
+ THUMB2_INSN_MATCH(LSLW2, insn) ||
+ THUMB2_INSN_MATCH(LSRW2, insn)) &&
+ THUMB2_INSN_REG_RM(insn) == 15) {
uregs = 0x0f00; /* Rd 8-11 */
pc_dep = 1;
- } else if ((THUMB2_INSN_MATCH(LSLW1, insn) || THUMB2_INSN_MATCH(LSRW1, insn)) && (THUMB2_INSN_REG_RN(insn) == 15 || THUMB2_INSN_REG_RM(insn) == 15)) {
+ } else if ((THUMB2_INSN_MATCH(LSLW1, insn) ||
+ THUMB2_INSN_MATCH(LSRW1, insn)) &&
+ (THUMB2_INSN_REG_RN(insn) == 15 ||
+ THUMB2_INSN_REG_RM(insn) == 15)) {
uregs = 0x0f00; /* Rd 8-11 */
pc_dep = 1;
- } else if ((THUMB2_INSN_MATCH(TEQ1, insn) || THUMB2_INSN_MATCH(TST1, insn)) && THUMB2_INSN_REG_RN(insn) == 15) {
+ } else if ((THUMB2_INSN_MATCH(TEQ1, insn) ||
+ THUMB2_INSN_MATCH(TST1, insn)) &&
+ THUMB2_INSN_REG_RN(insn) == 15) {
uregs = 0xf0000; /* Rn 0-3 (16-19) */
pc_dep = 1;
- } else if ((THUMB2_INSN_MATCH(TEQ2, insn) || THUMB2_INSN_MATCH(TST2, insn)) &&
- (THUMB2_INSN_REG_RN(insn) == 15 || THUMB2_INSN_REG_RM(insn) == 15)) {
+ } else if ((THUMB2_INSN_MATCH(TEQ2, insn) ||
+ THUMB2_INSN_MATCH(TST2, insn)) &&
+ (THUMB2_INSN_REG_RN(insn) == 15 ||
+ THUMB2_INSN_REG_RM(insn) == 15)) {
uregs = 0xf0000; /* Rn 0-3 (16-19) */
pc_dep = 1;
}
if (unlikely(uregs && pc_dep)) {
memcpy(tramp, pc_dep_insn_execbuf_thumb, tramp_len);
if (prep_pc_dep_insn_execbuf_thumb(tramp, insn, uregs) != 0) {
- printk("Error in %s at %d: failed to prepare exec buffer for insn %lx!",
+ printk(KERN_INFO "Error in %s at %d: failed to "
+ "prepare exec buffer for insn %lx!",
__FILE__, __LINE__, insn);
p->safe_thumb = 1;
}
addr = vaddr + 4;
- *((unsigned short*)tramp + 13) = 0xdeff;
- *((unsigned short*)tramp + 14) = addr & 0x0000ffff;
- *((unsigned short*)tramp + 15) = addr >> 16;
+ *((unsigned short *)tramp + 13) = 0xdeff;
+ *((unsigned short *)tramp + 14) = addr & 0x0000ffff;
+ *((unsigned short *)tramp + 15) = addr >> 16;
if (!is_thumb2(insn)) {
addr = vaddr + 2;
- *((unsigned short*)tramp + 16) = (addr & 0x0000ffff) | 0x1;
- *((unsigned short*)tramp + 17) = addr >> 16;
+ *((unsigned short *)tramp + 16) =
+ (addr & 0x0000ffff) | 0x1;
+ *((unsigned short *)tramp + 17) = addr >> 16;
} else {
addr = vaddr + 4;
- *((unsigned short*)tramp + 16) = (addr & 0x0000ffff) | 0x1;
- *((unsigned short*)tramp + 17) = addr >> 16;
+ *((unsigned short *)tramp + 16) =
+ (addr & 0x0000ffff) | 0x1;
+ *((unsigned short *)tramp + 17) = addr >> 16;
}
} else {
memcpy(tramp, gen_insn_execbuf_thumb, tramp_len);
- *((unsigned short*)tramp + 13) = 0xdeff;
+ *((unsigned short *)tramp + 13) = 0xdeff;
if (!is_thumb2(insn)) {
addr = vaddr + 2;
- *((unsigned short*)tramp + 2) = insn;
- *((unsigned short*)tramp + 16) = (addr & 0x0000ffff) | 0x1;
- *((unsigned short*)tramp + 17) = addr >> 16;
+ *((unsigned short *)tramp + 2) = insn;
+ *((unsigned short *)tramp + 16) =
+ (addr & 0x0000ffff) | 0x1;
+ *((unsigned short *)tramp + 17) = addr >> 16;
} else {
addr = vaddr + 4;
tramp[1] = insn;
- *((unsigned short*)tramp + 16) = (addr & 0x0000ffff) | 0x1;
- *((unsigned short*)tramp + 17) = addr >> 16;
+ *((unsigned short *)tramp + 16) =
+ (addr & 0x0000ffff) | 0x1;
+ *((unsigned short *)tramp + 17) = addr >> 16;
}
}
if (THUMB_INSN_MATCH(B2, insn)) {
memcpy(tramp, b_off_insn_execbuf_thumb, tramp_len);
- *((unsigned short*)tramp + 13) = 0xdeff;
+ *((unsigned short *)tramp + 13) = 0xdeff;
addr = branch_t16_dest(insn, vaddr);
- *((unsigned short*)tramp + 14) = (addr & 0x0000ffff) | 0x1;
- *((unsigned short*)tramp + 15) = addr >> 16;
- *((unsigned short*)tramp + 16) = 0;
- *((unsigned short*)tramp + 17) = 0;
+ *((unsigned short *)tramp + 14) = (addr & 0x0000ffff) | 0x1;
+ *((unsigned short *)tramp + 15) = addr >> 16;
+ *((unsigned short *)tramp + 16) = 0;
+ *((unsigned short *)tramp + 17) = 0;
} else if (THUMB_INSN_MATCH(B1, insn)) {
memcpy(tramp, b_cond_insn_execbuf_thumb, tramp_len);
- *((unsigned short*)tramp + 13) = 0xdeff;
- *((unsigned short*)tramp + 0) |= (insn & 0xf00);
+ *((unsigned short *)tramp + 13) = 0xdeff;
+ *((unsigned short *)tramp + 0) |= (insn & 0xf00);
addr = branch_cond_t16_dest(insn, vaddr);
- *((unsigned short*)tramp + 14) = (addr & 0x0000ffff) | 0x1;
- *((unsigned short*)tramp + 15) = addr >> 16;
+ *((unsigned short *)tramp + 14) = (addr & 0x0000ffff) | 0x1;
+ *((unsigned short *)tramp + 15) = addr >> 16;
addr = vaddr + 2;
- *((unsigned short*)tramp + 16) = (addr & 0x0000ffff) | 0x1;
- *((unsigned short*)tramp + 17) = addr >> 16;
+ *((unsigned short *)tramp + 16) = (addr & 0x0000ffff) | 0x1;
+ *((unsigned short *)tramp + 17) = addr >> 16;
} else if (THUMB_INSN_MATCH(BLX2, insn) ||
THUMB_INSN_MATCH(BX, insn)) {
memcpy(tramp, b_r_insn_execbuf_thumb, tramp_len);
- *((unsigned short*)tramp + 13) = 0xdeff;
- *((unsigned short*)tramp + 4) = insn;
+ *((unsigned short *)tramp + 13) = 0xdeff;
+ *((unsigned short *)tramp + 4) = insn;
addr = vaddr + 2;
- *((unsigned short*)tramp + 16) = (addr & 0x0000ffff) | 0x1;
- *((unsigned short*)tramp + 17) = addr >> 16;
+ *((unsigned short *)tramp + 16) = (addr & 0x0000ffff) | 0x1;
+ *((unsigned short *)tramp + 17) = addr >> 16;
} else if (THUMB2_INSN_MATCH(BLX1, insn) ||
THUMB2_INSN_MATCH(BL, insn)) {
memcpy(tramp, blx_off_insn_execbuf_thumb, tramp_len);
- *((unsigned short*)tramp + 13) = 0xdeff;
+ *((unsigned short *)tramp + 13) = 0xdeff;
addr = branch_t32_dest(insn, vaddr);
- *((unsigned short*)tramp + 14) = (addr & 0x0000ffff);
- *((unsigned short*)tramp + 15) = addr >> 16;
+ *((unsigned short *)tramp + 14) = (addr & 0x0000ffff);
+ *((unsigned short *)tramp + 15) = addr >> 16;
addr = vaddr + 4;
- *((unsigned short*)tramp + 16) = (addr & 0x0000ffff) | 0x1;
- *((unsigned short*)tramp + 17) = addr >> 16;
+ *((unsigned short *)tramp + 16) = (addr & 0x0000ffff) | 0x1;
+ *((unsigned short *)tramp + 17) = addr >> 16;
} else if (THUMB_INSN_MATCH(CBZ, insn)) {
memcpy(tramp, cbz_insn_execbuf_thumb, tramp_len);
- *((unsigned short*)tramp + 13) = 0xdeff;
+ *((unsigned short *)tramp + 13) = 0xdeff;
/* zero out original branch displacement (imm5 = 0; i = 0) */
- *((unsigned short*)tramp + 0) = insn & (~0x2f8);
+ *((unsigned short *)tramp + 0) = insn & (~0x2f8);
/* replace it with 8 bytes offset in execbuf (imm5 = 0b00010) */
- *((unsigned short*)tramp + 0) |= 0x20;
+ *((unsigned short *)tramp + 0) |= 0x20;
addr = cbz_t16_dest(insn, vaddr);
- *((unsigned short*)tramp + 14) = (addr & 0x0000ffff) | 0x1;
- *((unsigned short*)tramp + 15) = addr >> 16;
+ *((unsigned short *)tramp + 14) = (addr & 0x0000ffff) | 0x1;
+ *((unsigned short *)tramp + 15) = addr >> 16;
addr = vaddr + 2;
- *((unsigned short*)tramp + 16) = (addr & 0x0000ffff) | 0x1;
- *((unsigned short*)tramp + 17) = addr >> 16;
+ *((unsigned short *)tramp + 16) = (addr & 0x0000ffff) | 0x1;
+ *((unsigned short *)tramp + 17) = addr >> 16;
}
return 0;
unsigned long insn;
if (vaddr & 0x01) {
- printk("Error in %s at %d: attempt to register uprobe "
- "at an unaligned address\n", __FILE__, __LINE__);
+ printk(KERN_INFO "Error in %s at %d: attempt "
+ "to register uprobe at an unaligned address\n",
+ __FILE__, __LINE__);
return -EINVAL;
}
arch_copy_trampoline_thumb_uprobe(up);
if ((p->safe_arm) && (p->safe_thumb)) {
- printk("Error in %s at %d: failed "
+ printk(KERN_INFO "Error in %s at %d: failed "
"arch_copy_trampoline_*_uprobe() (both) "
"[tgid=%u, addr=%lx, data=%lx]\n",
__FILE__, __LINE__, task->tgid, vaddr, insn);
up->atramp.utramp = swap_slot_alloc(up->sm);
if (up->atramp.utramp == NULL) {
- printk("Error: swap_slot_alloc failed (%08lx)\n", vaddr);
+ printk(KERN_INFO "Error: swap_slot_alloc failed (%08lx)\n",
+ vaddr);
return -ENOMEM;
}
/* Set flag of current mode */
ri->sp = (kprobe_opcode_t *)((long)ri->sp | !!thumb_mode(regs));
- if (thumb_mode(regs)) {
+ if (thumb_mode(regs))
regs->ARM_lr = (unsigned long)(ri->rp->up.kp.ainsn.insn) + 0x1b;
- } else {
- regs->ARM_lr = (unsigned long)(ri->rp->up.kp.ainsn.insn + UPROBES_TRAMP_RET_BREAK_IDX);
- }
+ else
+ regs->ARM_lr = (unsigned long)(ri->rp->up.kp.ainsn.insn +
+ UPROBES_TRAMP_RET_BREAK_IDX);
}
/**
retval = read_proc_vm_atomic(task, (unsigned long)stack,
buf, sizeof(buf));
if (retval != sizeof(buf)) {
- printk("---> %s (%d/%d): failed to read stack from %08lx\n",
- task->comm, task->tgid, task->pid,
+ printk(KERN_INFO "---> %s (%d/%d): failed to read "
+ "stack from %08lx\n", task->comm, task->tgid, task->pid,
(unsigned long)stack);
retval = -EFAULT;
goto check_lr;
goto check_lr;
}
- printk("---> %s (%d/%d): trampoline found at "
+ printk(KERN_INFO "---> %s (%d/%d): trampoline found at "
"%08lx (%08lx /%+d) - %p\n",
task->comm, task->tgid, task->pid,
(unsigned long)found, (unsigned long)sp,
&ri->ret_addr,
sizeof(ri->ret_addr));
if (retval != sizeof(ri->ret_addr)) {
- printk("---> %s (%d/%d): failed to write value to %08lx",
+ printk(KERN_INFO "---> %s (%d/%d): "
+ "failed to write value to %08lx",
task->comm, task->tgid, task->pid, (unsigned long)found);
retval = -EFAULT;
} else {
check_lr: /* check lr anyway */
if (ra == (unsigned long)tramp) {
- printk("---> %s (%d/%d): trampoline found at "
+ printk(KERN_INFO "---> %s (%d/%d): trampoline found at "
"lr = %08lx - %p\n",
task->comm, task->tgid, task->pid,
ra, ri->rp->up.kp.addr);
swap_set_ret_addr(uregs, (unsigned long)ri->ret_addr);
retval = 0;
} else if (retval) {
- printk("---> %s (%d/%d): trampoline NOT found at "
+ printk(KERN_INFO "---> %s (%d/%d): trampoline NOT found at "
"sp = %08lx, lr = %08lx - %p\n",
task->comm, task->tgid, task->pid,
(unsigned long)sp, ra, ri->rp->up.kp.addr);
struct uprobe *up = container_of(p, struct uprobe, kp);
struct ujprobe *jp = container_of(up, struct ujprobe, up);
- kprobe_pre_entry_handler_t pre_entry = (kprobe_pre_entry_handler_t)jp->pre_entry;
+ kprobe_pre_entry_handler_t pre_entry =
+ (kprobe_pre_entry_handler_t)jp->pre_entry;
entry_point_t entry = (entry_point_t)jp->entry;
if (pre_entry) {
{
return thumb_mode(regs) ?
(unsigned long)(p->ainsn.insn) + 0x1b :
- (unsigned long)(p->ainsn.insn + UPROBES_TRAMP_RET_BREAK_IDX);
+ (unsigned long)(p->ainsn.insn +
+ UPROBES_TRAMP_RET_BREAK_IDX);
}
/**
if (thumb_mode(regs) && !is_thumb2(p->opcode)) {
u16 tmp = p->opcode >> 16;
write_proc_vm_atomic(current,
- (unsigned long)((u16*)p->addr + 1), &tmp, 2);
+ (unsigned long)((u16 *)p->addr + 1), &tmp, 2);
flush_insns(p->addr, 4);
}
}
tramp = up->atramp.tramp_thumb;
break;
default:
- printk("Error in %s at %d: we are in arm mode "
+ printk(KERN_INFO "Error in %s at %d: we are in arm mode "
"(!) and check instruction was fail "
"(%0lX instruction at %p address)!\n",
__FILE__, __LINE__, p->opcode, p->addr);
p = get_ukprobe_by_insn_slot(tramp_addr, tgid, regs);
if (p == NULL) {
- printk("no_uprobe: Not one of ours: let "
+ printk(KERN_INFO "no_uprobe: Not one of ours: let "
"kernel handle it %p\n", addr);
return 1;
}
struct uprobe *up = kp2up(p);
if (make_trampoline(up, regs)) {
- printk("no_uprobe live\n");
+ printk(KERN_INFO "no_uprobe live\n");
return 0;
}
add_uprobe_table(p);
}
- if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ if (!p->pre_handler || !p->pre_handler(p, regs))
prepare_singlestep(p, regs);
- }
}
return 0;
}
void arch_opcode_analysis_uretprobe(struct uretprobe *rp);
-void arch_prepare_uretprobe(struct uretprobe_instance *ri, struct pt_regs *regs);
+void arch_prepare_uretprobe(struct uretprobe_instance *ri,
+ struct pt_regs *regs);
int arch_disarm_urp_inst(struct uretprobe_instance *ri,
struct task_struct *task);
ptr = (u32 *)regs->ARM_sp + n - 4;
if (get_user(addr, ptr))
- printk("failed to dereference a pointer, ptr=%p\n", ptr);
+ printk(KERN_INFO "failed to dereference a pointer, ptr=%p\n",
+ ptr);
return addr;
}
* @brief Uprobe control block
*/
struct uprobe_ctlblk {
- unsigned long flags; /**< Flags */
- struct kprobe *p; /**< Pointer to the uprobe's kprobe */
+ unsigned long flags; /**< Flags */
+ struct kprobe *p; /**< Pointer to the uprobe's kprobe */
};
static unsigned long trampoline_addr(struct uprobe *up)
panic("failed to read memory %p!\n", p->addr);
/* TODO: this is a workaround */
if (tramp[0] == call_relative_opcode) {
- printk("cannot install probe: 1st instruction is call\n");
+ printk(KERN_INFO "cannot install probe: 1st instruction is call\n");
return -1;
}
{
struct uprobe *up = container_of(p, struct uprobe, kp);
struct ujprobe *jp = container_of(up, struct ujprobe, up);
- kprobe_pre_entry_handler_t pre_entry = (kprobe_pre_entry_handler_t)jp->pre_entry;
+ kprobe_pre_entry_handler_t pre_entry =
+ (kprobe_pre_entry_handler_t)jp->pre_entry;
entry_point_t entry = (entry_point_t)jp->entry;
unsigned long args[6];
/* FIXME some user space apps crash if we clean interrupt bit */
- //regs->EREG(flags) &= ~IF_MASK;
+ /* regs->EREG(flags) &= ~IF_MASK; */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
trace_hardirqs_off();
#endif
/* read first 6 args from stack */
- if (!read_proc_vm_atomic(current, regs->EREG(sp) + 4, args, sizeof(args)))
- panic("failed to read user space func arguments %lx!\n", regs->EREG(sp) + 4);
+ if (!read_proc_vm_atomic(current, regs->EREG(sp) + 4,
+ args, sizeof(args)))
+ panic("failed to read user space func arguments %lx!\n",
+ regs->EREG(sp) + 4);
if (pre_entry)
p->ss_addr[smp_processor_id()] = (kprobe_opcode_t *)
unsigned long ra = trampoline_addr(&ri->rp->up);
ri->sp = (kprobe_opcode_t *)regs->sp;
- if (!read_proc_vm_atomic(current, regs->EREG(sp), &(ri->ret_addr), sizeof(ri->ret_addr)))
- panic("failed to read user space func ra %lx!\n", regs->EREG(sp));
+ if (!read_proc_vm_atomic(current, regs->EREG(sp), &(ri->ret_addr),
+ sizeof(ri->ret_addr)))
+ panic("failed to read user space func ra %lx!\n",
+ regs->EREG(sp));
if (!write_proc_vm_atomic(current, regs->EREG(sp), &ra, sizeof(ra)))
- panic("failed to write user space func ra %lx!\n", regs->EREG(sp));
+ panic("failed to write user space func ra %lx!\n",
+ regs->EREG(sp));
}
/**
unsigned long tramp_addr = trampoline_addr(&ri->rp->up);
len = read_proc_vm_atomic(task, sp, &ret_addr, sizeof(ret_addr));
if (len != sizeof(ret_addr)) {
- printk("---> %s (%d/%d): failed to read stack from %08lx\n",
+ printk(KERN_INFO "---> %s (%d/%d): failed to read stack from %08lx\n",
task->comm, task->tgid, task->pid, sp);
return -EFAULT;
}
len = write_proc_vm_atomic(task, sp, &ri->ret_addr,
sizeof(ri->ret_addr));
if (len != sizeof(ri->ret_addr)) {
- printk("---> %s (%d/%d): failed to write "
+ printk(KERN_INFO "---> %s (%d/%d): failed to write "
"orig_ret_addr to %08lx",
task->comm, task->tgid, task->pid, sp);
return -EFAULT;
}
} else {
- printk("---> %s (%d/%d): trampoline NOT found at sp = %08lx\n",
+ printk(KERN_INFO "---> %s (%d/%d): trampoline NOT found at sp = %08lx\n",
task->comm, task->tgid, task->pid, sp);
return -ENOENT;
}
static void set_user_jmp_op(void *from, void *to)
{
- struct __arch_jmp_op
- {
+ struct __arch_jmp_op {
char op;
long raddr;
- } __attribute__ ((packed)) jop;
+ } __packed jop;
jop.raddr = (long)(to) - ((long)(from) + 5);
jop.op = RELATIVEJUMP_INSTRUCTION;
- if (!write_proc_vm_atomic(current, (unsigned long)from, &jop, sizeof(jop)))
+ if (!write_proc_vm_atomic(current, (unsigned long)from, &jop,
+ sizeof(jop)))
panic("failed to write jump opcode to user space %p!\n", from);
}
-static void resume_execution(struct kprobe *p, struct pt_regs *regs, unsigned long flags)
+static void resume_execution(struct kprobe *p,
+ struct pt_regs *regs,
+ unsigned long flags)
{
unsigned long *tos, tos_dword = 0;
unsigned long copy_eip = (unsigned long)p->ainsn.insn;
regs->EREG(flags) &= ~TF_MASK;
tos = (unsigned long *)&tos_dword;
- if (!read_proc_vm_atomic(current, regs->EREG(sp), &tos_dword, sizeof(tos_dword)))
- panic("failed to read dword from top of the user space stack %lx!\n", regs->EREG(sp));
+ if (!read_proc_vm_atomic(current, regs->EREG(sp), &tos_dword,
+ sizeof(tos_dword)))
+ panic("failed to read dword from top of the user space stack "
+ "%lx!\n", regs->EREG(sp));
- if (!read_proc_vm_atomic(current, (unsigned long)p->ainsn.insn, insns, 2 * sizeof(kprobe_opcode_t)))
- panic("failed to read first 2 opcodes of instruction copy from user space %p!\n", p->ainsn.insn);
+ if (!read_proc_vm_atomic(current, (unsigned long)p->ainsn.insn, insns,
+ 2 * sizeof(kprobe_opcode_t)))
+ panic("failed to read first 2 opcodes of instruction copy "
+ "from user space %p!\n", p->ainsn.insn);
switch (insns[0]) {
- case 0x9c: /* pushfl */
- *tos &= ~(TF_MASK | IF_MASK);
- *tos |= flags & (TF_MASK | IF_MASK);
- break;
- case 0xc2: /* iret/ret/lret */
- case 0xc3:
- case 0xca:
- case 0xcb:
- case 0xcf:
- case 0xea: /* jmp absolute -- eip is correct */
- /* eip is already adjusted, no more changes required */
- p->ainsn.boostable = 1;
- goto no_change;
- case 0xe8: /* call relative - Fix return addr */
- *tos = orig_eip + (*tos - copy_eip);
- break;
- case 0x9a: /* call absolute -- same as call absolute, indirect */
+ case 0x9c: /* pushfl */
+ *tos &= ~(TF_MASK | IF_MASK);
+ *tos |= flags & (TF_MASK | IF_MASK);
+ break;
+ case 0xc2: /* iret/ret/lret */
+ case 0xc3:
+ case 0xca:
+ case 0xcb:
+ case 0xcf:
+ case 0xea: /* jmp absolute -- eip is correct */
+ /* eip is already adjusted, no more changes required */
+ p->ainsn.boostable = 1;
+ goto no_change;
+ case 0xe8: /* call relative - Fix return addr */
+ *tos = orig_eip + (*tos - copy_eip);
+ break;
+ case 0x9a: /* call absolute -- same as call absolute, indirect */
+ *tos = orig_eip + (*tos - copy_eip);
+
+ if (!write_proc_vm_atomic(current,
+ regs->EREG(sp),
+ &tos_dword,
+ sizeof(tos_dword)))
+ panic("failed to write dword to top of the"
+ " user space stack %lx!\n",
+ regs->EREG(sp));
+
+ goto no_change;
+ case 0xff:
+ if ((insns[1] & 0x30) == 0x10) {
+ /*
+ * call absolute, indirect
+ * Fix return addr; eip is correct.
+ * But this is not boostable
+ */
*tos = orig_eip + (*tos - copy_eip);
- if (!write_proc_vm_atomic(current, regs->EREG (sp), &tos_dword, sizeof(tos_dword)))
- panic("failed to write dword to top of the user space stack %lx!\n", regs->EREG (sp));
+ if (!write_proc_vm_atomic(current, regs->EREG(sp),
+ &tos_dword,
+ sizeof(tos_dword)))
+ panic("failed to write dword to top of the "
+ "user space stack %lx!\n",
+ regs->EREG(sp));
goto no_change;
- case 0xff:
- if ((insns[1] & 0x30) == 0x10) {
- /*
- * call absolute, indirect
- * Fix return addr; eip is correct.
- * But this is not boostable
- */
- *tos = orig_eip + (*tos - copy_eip);
-
- if (!write_proc_vm_atomic(current, regs->EREG(sp), &tos_dword, sizeof(tos_dword)))
- panic("failed to write dword to top of the user space stack %lx!\n", regs->EREG(sp));
-
- goto no_change;
- } else if (((insns[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
- ((insns[1] & 0x31) == 0x21)) {
- /* jmp far, absolute indirect */
- /* eip is correct. And this is boostable */
- p->ainsn.boostable = 1;
- goto no_change;
- }
- case 0xf3:
- if (insns[1] == 0xc3)
- /* repz ret special handling: no more changes */
- goto no_change;
- break;
- default:
- break;
+ } else if (((insns[1] & 0x31) == 0x20) || /* jmp near, absolute
+ * indirect */
+ ((insns[1] & 0x31) == 0x21)) {
+ /* jmp far, absolute indirect */
+ /* eip is correct. And this is boostable */
+ p->ainsn.boostable = 1;
+ goto no_change;
+ }
+ case 0xf3:
+ if (insns[1] == 0xc3)
+ /* repz ret special handling: no more changes */
+ goto no_change;
+ break;
+ default:
+ break;
}
- if (!write_proc_vm_atomic(current, regs->EREG(sp), &tos_dword, sizeof(tos_dword)))
- panic("failed to write dword to top of the user space stack %lx!\n", regs->EREG(sp));
+ if (!write_proc_vm_atomic(current, regs->EREG(sp), &tos_dword,
+ sizeof(tos_dword)))
+ panic("failed to write dword to top of the user space stack "
+ "%lx!\n", regs->EREG(sp));
if (p->ainsn.boostable == 0) {
- if ((regs->EREG(ip) > copy_eip) && (regs->EREG(ip) - copy_eip) + 5 < MAX_INSN_SIZE) {
+ if ((regs->EREG(ip) > copy_eip) && (regs->EREG(ip) - copy_eip) +
+ 5 < MAX_INSN_SIZE) {
/*
* These instructions can be executed directly if it
* jumps back to correct address.
*/
- set_user_jmp_op((void *) regs->EREG(ip), (void *)orig_eip + (regs->EREG(ip) - copy_eip));
+ set_user_jmp_op((void *) regs->EREG(ip),
+ (void *)orig_eip +
+ (regs->EREG(ip) - copy_eip));
p->ainsn.boostable = 1;
} else {
p->ainsn.boostable = -1;
tramp = swap_slot_alloc(up->sm);
if (tramp == 0) {
- printk("trampoline out of memory\n");
+ printk(KERN_INFO "trampoline out of memory\n");
return -ENOMEM;
}
p = get_ukprobe_by_insn_slot(tramp_addr, tgid, regs);
if (p == NULL) {
- printk("no_uprobe\n");
+ printk(KERN_INFO "no_uprobe\n");
return 0;
}
return 1;
}
-static int uprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data)
+static int uprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
{
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
switch (val) {
#ifdef CONFIG_KPROBES
- case DIE_INT3:
+ case DIE_INT3:
#else
- case DIE_TRAP:
+ case DIE_TRAP:
#endif
- if (uprobe_handler(args->regs))
- ret = NOTIFY_STOP;
- break;
- case DIE_DEBUG:
- if (post_uprobe_handler(args->regs))
- ret = NOTIFY_STOP;
- break;
- default:
- break;
+ if (uprobe_handler(args->regs))
+ ret = NOTIFY_STOP;
+ break;
+ case DIE_DEBUG:
+ if (post_uprobe_handler(args->regs))
+ ret = NOTIFY_STOP;
+ break;
+ default:
+ break;
}
return ret;
return 0;
}
-void arch_prepare_uretprobe(struct uretprobe_instance *ri, struct pt_regs *regs);
+void arch_prepare_uretprobe(struct uretprobe_instance *ri,
+ struct pt_regs *regs);
int arch_disarm_urp_inst(struct uretprobe_instance *ri,
struct task_struct *task);
unsigned long arch_get_trampoline_addr(struct kprobe *p, struct pt_regs *regs);
/* 1 - return address saved on top of the stack */
ptr = (u32 *)regs->sp + n + 1;
if (get_user(addr, ptr))
- printk("failed to dereference a pointer, ptr=%p\n", ptr);
+ printk(KERN_INFO "failed to dereference a pointer, ptr=%p\n",
+ ptr);
return addr;
}
struct kprobe *p;
DECLARE_NODE_PTR_FOR_HLIST(node);
- // print uprobe table
+ /* print uprobe table */
for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
head = &uprobe_insn_slot_table[i];
swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
- printk("####### find U tgid=%u, addr=%x\n",
+ printk(KERN_INFO "####### find U tgid=%u, addr=%x\n",
p->tgid, p->addr);
}
}
list_for_each_entry_rcu(kp, &p->list, list) {
if (kp->pre_handler) {
ret = kp->pre_handler(kp, regs);
- if (ret) {
+ if (ret)
return ret;
- }
}
}
return 0;
}
-static void aggr_post_uhandler(struct kprobe *p, struct pt_regs *regs, unsigned long flags)
+static void aggr_post_uhandler(struct kprobe *p, struct pt_regs *regs,
+ unsigned long flags)
{
struct kprobe *kp;
list_for_each_entry_rcu(kp, &p->list, list) {
- if (kp->post_handler) {
+ if (kp->post_handler)
kp->post_handler(kp, regs, flags);
- }
}
}
-static int aggr_fault_uhandler(struct kprobe *p, struct pt_regs *regs, int trapnr)
+static int aggr_fault_uhandler(struct kprobe *p,
+ struct pt_regs *regs,
+ int trapnr)
{
return 0;
}
static int add_new_uprobe(struct kprobe *old_p, struct kprobe *p)
{
if (p->break_handler) {
- if (old_p->break_handler) {
+ if (old_p->break_handler)
return -EEXIST;
- }
list_add_tail_rcu(&p->list, &old_p->list);
old_p->break_handler = aggr_break_uhandler;
} else {
- list_add_rcu (&p->list, &old_p->list);
+ list_add_rcu(&p->list, &old_p->list);
}
- if (p->post_handler && !old_p->post_handler) {
+ if (p->post_handler && !old_p->post_handler)
old_p->post_handler = aggr_post_uhandler;
- }
return 0;
}
ap->pre_handler = aggr_pre_uhandler;
ap->fault_handler = aggr_fault_uhandler;
- if (p->post_handler) {
+ if (p->post_handler)
ap->post_handler = aggr_post_uhandler;
- }
- if (p->break_handler) {
+ if (p->break_handler)
ap->break_handler = aggr_break_uhandler;
- }
INIT_LIST_HEAD(&ap->list);
list_add_rcu(&p->list, &ap->list);
ret = add_new_uprobe(old_p, p);
} else {
struct uprobe *uap = kzalloc(sizeof(*uap), GFP_KERNEL);
- if (!uap) {
+ if (!uap)
return -ENOMEM;
- }
uap->task = kp2up(p)->task;
ap = up2kp(uap);
void disarm_uprobe(struct kprobe *p, struct task_struct *task)
{
int ret = write_proc_vm_atomic(task, (unsigned long)p->addr,
- &p->opcode, sizeof(p->opcode));
+ &p->opcode, sizeof(p->opcode));
if (!ret) {
panic("disarm_uprobe: failed to write memory "
"tgid=%u, addr=%p!\n", task->tgid, p->addr);
static void init_uprobes_insn_slots(void)
{
int i;
- for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
+ for (i = 0; i < UPROBE_TABLE_SIZE; ++i)
INIT_HLIST_HEAD(&uprobe_insn_slot_table[i]);
- }
}
static void init_uprobe_table(void)
{
int i;
- for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
+ for (i = 0; i < UPROBE_TABLE_SIZE; ++i)
INIT_HLIST_HEAD(&uprobe_table[i]);
- }
}
static void init_uretprobe_inst_table(void)
{
int i;
- for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
- INIT_HLIST_HEAD (&uretprobe_inst_table[i]);
- }
+ for (i = 0; i < UPROBE_TABLE_SIZE; ++i)
+ INIT_HLIST_HEAD(&uretprobe_inst_table[i]);
}
/**
head = &uprobe_table[hash_ptr(addr, UPROBE_HASH_BITS)];
swap_hlist_for_each_entry_rcu(p, node, head, hlist) {
- if (p->addr == addr && kp2up(p)->task->tgid == tgid) {
+ if (p->addr == addr && kp2up(p)->task->tgid == tgid)
return p;
- }
}
return NULL;
void add_uprobe_table(struct kprobe *p)
{
INIT_HLIST_NODE(&p->is_hlist);
- hlist_add_head_rcu(&p->is_hlist, &uprobe_insn_slot_table[hash_ptr(p->ainsn.insn, UPROBE_HASH_BITS)]);
+ hlist_add_head_rcu(&p->is_hlist,
+ &uprobe_insn_slot_table[hash_ptr(p->ainsn.insn,
+ UPROBE_HASH_BITS)]);
}
/**
* @return Pointer to the kprobe on success,\n
* NULL otherwise.
*/
-struct kprobe *get_ukprobe_by_insn_slot(void *addr, pid_t tgid, struct pt_regs *regs)
+struct kprobe *get_ukprobe_by_insn_slot(void *addr,
+ pid_t tgid,
+ struct pt_regs *regs)
{
struct hlist_head *head;
struct kprobe *p;
/* TODO: test - two processes invokes instrumented function */
head = &uprobe_insn_slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
swap_hlist_for_each_entry_rcu(p, node, head, is_hlist) {
- if (p->ainsn.insn == addr && kp2up(p)->task->tgid == tgid) {
+ if (p->ainsn.insn == addr && kp2up(p)->task->tgid == tgid)
return p;
- }
}
return NULL;
static struct hlist_head *uretprobe_inst_table_head(void *hash_key)
{
- return &uretprobe_inst_table[hash_ptr (hash_key, UPROBE_HASH_BITS)];
+ return &uretprobe_inst_table[hash_ptr(hash_key, UPROBE_HASH_BITS)];
}
/* Called with uretprobe_lock held */
struct uretprobe_instance *inst;
int i;
-#if 1//def CONFIG_PREEMPT
+#if 1 /* def CONFIG_PREEMPT */
rp->maxactive += max(COMMON_URP_NR, 2 * NR_CPUS);
#else
rp->maxacpptive += NR_CPUS;
}
if (!alloc_nodes_uretprobe(rp)) {
- swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
+ swap_hlist_for_each_entry(ri, node,
+ &rp->free_instances, uflist) {
return ri;
}
}
return NULL;
}
-// ===================================================================
+/* =================================================================== */
/**
* @brief Registers uprobe.
struct kprobe *p, *old_p;
p = &up->kp;
- if (!p->addr) {
+ if (!p->addr)
return -EINVAL;
- }
DBPRINTF("p->addr = 0x%p p = 0x%p\n", p->addr, p);
-// thumb address = address-1;
+/* thumb address = address-1; */
#if defined(CONFIG_ARM)
- // TODO: must be corrected in 'bundle'
- if ((unsigned long) p->addr & 0x01) {
- p->addr = (kprobe_opcode_t *)((unsigned long)p->addr & 0xfffffffe);
- }
+ /* TODO: must be corrected in 'bundle' */
+ if ((unsigned long) p->addr & 0x01)
+ p->addr = (kprobe_opcode_t *)((unsigned long)p->addr &
+ 0xfffffffe);
#endif
p->ainsn.insn = NULL;
p->count = 0;
#endif
- // get the first item
+ /* get the first item */
old_p = get_ukprobe(p->addr, kp2up(p)->task->tgid);
if (old_p) {
#ifdef CONFIG_ARM
goto out;
}
- DBPRINTF ("before out ret = 0x%x\n", ret);
+ DBPRINTF("before out ret = 0x%x\n", ret);
- // TODO: add uprobe (must be in function)
+ /* TODO: add uprobe (must be in function) */
INIT_HLIST_NODE(&p->hlist);
- hlist_add_head_rcu(&p->hlist, &uprobe_table[hash_ptr(p->addr, UPROBE_HASH_BITS)]);
+ hlist_add_head_rcu(&p->hlist,
+ &uprobe_table[hash_ptr(p->addr, UPROBE_HASH_BITS)]);
arm_uprobe(up);
out:
p = &up->kp;
old_p = get_ukprobe(p->addr, kp2up(p)->task->tgid);
- if (unlikely(!old_p)) {
+ if (unlikely(!old_p))
return;
- }
if (p != old_p) {
list_for_each_entry_rcu(list_p, &old_p->list, list) {
kfree(old_p);
}
- if (!in_atomic()) {
+ if (!in_atomic())
synchronize_sched();
- }
remove_uprobe(up);
} else {
- if (p->break_handler) {
+ if (p->break_handler)
old_p->break_handler = NULL;
- }
if (p->post_handler) {
- list_for_each_entry_rcu (list_p, &old_p->list, list) {
+ list_for_each_entry_rcu(list_p, &old_p->list, list) {
if (list_p->post_handler) {
cleanup_p = 2;
break;
}
}
- if (cleanup_p == 0) {
+ if (cleanup_p == 0)
old_p->post_handler = NULL;
- }
}
}
}
* dereference error. That is why we check whether this node
* really belongs to the hlist.
*/
- if (!(hlist_unhashed(&jp->up.kp.is_hlist))) {
+ if (!(hlist_unhashed(&jp->up.kp.is_hlist)))
hlist_del_rcu(&jp->up.kp.is_hlist);
- }
}
EXPORT_SYMBOL_GPL(__swap_unregister_ujprobe);
return 0;
#endif
- /* TODO: consider to only swap the RA after the last pre_handler fired */
+ /* TODO: consider to only swap the
+ * RA after the last pre_handler fired */
spin_lock_irqsave(&uretprobe_lock, flags);
/* TODO: test - remove retprobe after func entry but before its exit */
- if ((ri = get_free_urp_inst(rp)) != NULL) {
+ ri = get_free_urp_inst(rp);
+ if (ri != NULL) {
ri->rp = rp;
ri->task = current;
int i, ret = 0;
struct uretprobe_instance *inst;
- DBPRINTF ("START\n");
+ DBPRINTF("START\n");
rp->up.kp.pre_handler = pre_handler_uretprobe;
rp->up.kp.post_handler = NULL;
/* Pre-allocate memory for max kretprobe instances */
if (rp->maxactive <= 0) {
-#if 1//def CONFIG_PREEMPT
+#if 1 /* def CONFIG_PREEMPT */
rp->maxactive = max(10, 2 * NR_CPUS);
#else
rp->maxactive = NR_CPUS;
* @param task Pointer to the child task struct.
* @return 0
*/
-int swap_disarm_urp_inst_for_task(struct task_struct *parent, struct task_struct *task)
+int swap_disarm_urp_inst_for_task(struct task_struct *parent,
+ struct task_struct *task)
{
unsigned long flags;
struct uretprobe_instance *ri;
head = uretprobe_inst_table_head(parent->mm);
swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
- if (parent == ri->task) {
+ if (parent == ri->task)
arch_disarm_urp_inst(ri, task);
- }
}
spin_unlock_irqrestore(&uretprobe_lock, flags);
head = uretprobe_inst_table_head(task->mm);
swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
if (ri->task == task) {
- printk("%s (%d/%d): pending urp inst: %08lx\n",
+ printk(KERN_INFO "%s (%d/%d): pending urp inst: %08lx\n",
task->comm, task->tgid, task->pid,
(unsigned long)ri->rp->up.kp.addr);
arch_disarm_urp_inst(ri, task);
struct uretprobe_instance *ri;
__swap_unregister_uprobe(&rp->up, disarm);
- spin_lock_irqsave (&uretprobe_lock, flags);
+ spin_lock_irqsave(&uretprobe_lock, flags);
while ((ri = get_used_urp_inst(rp)) != NULL) {
if (arch_disarm_urp_inst(ri, ri->task) != 0)
- printk("%s (%d/%d): cannot disarm urp instance (%08lx)\n",
- ri->task->comm, ri->task->tgid, ri->task->pid,
- (unsigned long)rp->up.kp.addr);
+ printk(KERN_INFO "%s (%d/%d): "
+ "cannot disarm urp instance (%08lx)\n",
+ ri->task->comm, ri->task->tgid, ri->task->pid,
+ (unsigned long)rp->up.kp.addr);
recycle_urp_inst(ri);
}
if (hlist_empty(&rp->used_instances)) {
struct kprobe *p = &rp->up.kp;
- if (!(hlist_unhashed(&p->is_hlist))) {
+ if (!(hlist_unhashed(&p->is_hlist)))
hlist_del_rcu(&p->is_hlist);
- }
}
while ((ri = get_used_urp_inst(rp)) != NULL) {
head = &uprobe_table[i];
swap_hlist_for_each_entry_safe(p, node, tnode, head, hlist) {
if (kp2up(p)->task->tgid == task->tgid) {
- struct uprobe *up = container_of(p, struct uprobe, kp);
- printk("%s: delete uprobe at %p[%lx] for "
- "%s/%d\n", __func__, p->addr,
+ struct uprobe *up =
+ container_of(p, struct uprobe, kp);
+ printk(KERN_INFO "%s: delete uprobe at %p[%lx]"
+ " for %s/%d\n", __func__, p->addr,
(unsigned long)p->opcode,
task->comm, task->pid);
swap_unregister_uprobe(up);
SWAP_LIGHT_INIT_MODULE(once, swap_arch_init_uprobes, swap_arch_exit_uprobes,
NULL, NULL);
-MODULE_LICENSE ("GPL");
+MODULE_LICENSE("GPL");
/**
* @brief Uprobe pre-entry handler.
*/
-typedef unsigned long (*uprobe_pre_entry_handler_t)(void *priv_arg, struct pt_regs * regs);
+typedef unsigned long (*uprobe_pre_entry_handler_t)(void *priv_arg,
+ struct pt_regs *regs);
/**
* @struct ujprobe
* @brief Stores ujprobe data, based on uprobe.
*/
struct ujprobe {
- struct uprobe up; /**< Uprobe for this ujprobe */
- void *entry; /**< Probe handling code to jump to */
+ struct uprobe up; /**< Uprobe for this ujprobe */
+ void *entry; /**< Probe handling code to jump to */
/** Handler which will be called before 'entry' */
uprobe_pre_entry_handler_t pre_entry;
- void *priv_arg; /**< Private args for handler */
- char *args; /**< Function args format string */
+ void *priv_arg; /**< Private args for handler */
+ char *args; /**< Function args format string */
};
struct uretprobe_instance;
/**
* @brief Uretprobe handler.
*/
-typedef int (*uretprobe_handler_t)(struct uretprobe_instance *, struct pt_regs *);
+typedef int (*uretprobe_handler_t)(struct uretprobe_instance *,
+ struct pt_regs *);
/**
* @strict uretprobe
void swap_ujprobe_return(void);
struct kprobe *get_ukprobe(void *addr, pid_t tgid);
-struct kprobe *get_ukprobe_by_insn_slot(void *addr, pid_t tgid, struct pt_regs *regs);
+struct kprobe *get_ukprobe_by_insn_slot(void *addr,
+ pid_t tgid,
+ struct pt_regs *regs);
static inline struct uprobe *kp2up(struct kprobe *p)
{
* ============================================================================
*/
-static struct dentry *us_manager_dir = NULL;
+static struct dentry *us_manager_dir;
/**
* @brief Destroy debugfs for us_manager
if (us_manager_dir == NULL)
return -ENOMEM;
- dentry = debugfs_create_file(US_MANAGER_TASKS, 0600, us_manager_dir, NULL,
- &fops_tasks);
+ dentry = debugfs_create_file(US_MANAGER_TASKS, 0600, us_manager_dir,
+ NULL, &fops_tasks);
if (dentry == NULL)
goto fail;
ret = swap_register_kretprobe(&mf_kretprobe);
if (ret)
- printk("swap_register_kretprobe(handle_mm_fault) ret=%d!\n",
+ printk(KERN_INFO "swap_register_kretprobe(handle_mm_fault) ret=%d!\n",
ret);
return ret;
ret = swap_register_kprobe(&ctx_task_kprobe);
if (ret)
- printk("swap_register_kprobe(workaround) ret=%d!\n", ret);
+ printk(KERN_INFO "swap_register_kprobe(workaround) ret=%d!\n",
+ ret);
return ret;
}
*/
static atomic_t copy_process_cnt = ATOMIC_INIT(0);
-static void recover_child(struct task_struct *child_task, struct sspt_proc *proc)
+static void recover_child(struct task_struct *child_task,
+ struct sspt_proc *proc)
{
sspt_proc_uninstall(proc, child_task, US_DISARM);
swap_disarm_urp_inst_for_task(current, child_task);
/* Delete uprobs in children at fork */
static int ret_handler_cp(struct kretprobe_instance *ri, struct pt_regs *regs)
{
- struct task_struct *task = (struct task_struct *)regs_return_value(regs);
+ struct task_struct *task =
+ (struct task_struct *)regs_return_value(regs);
- if(!task || IS_ERR(task))
+ if (!task || IS_ERR(task))
goto out;
- if(task->mm != current->mm) { /* check flags CLONE_VM */
+ if (task->mm != current->mm) { /* check flags CLONE_VM */
rm_uprobes_child(task);
}
out:
ret = swap_register_kretprobe(&cp_kretprobe);
if (ret)
- printk("swap_register_kretprobe(copy_process) ret=%d!\n", ret);
+ printk(KERN_INFO
+ "swap_register_kretprobe(copy_process) ret=%d!\n", ret);
return ret;
}
ret = swap_register_kprobe(&mr_kprobe);
if (ret)
- printk("swap_register_kprobe(mm_release) ret=%d!\n", ret);
+ printk(KERN_INFO
+ "swap_register_kprobe(mm_release) ret=%d!\n", ret);
return ret;
}
if (file->vm_start >= end)
continue;
- if (file->vm_start >= start) {
+ if (file->vm_start >= start)
sspt_file_uninstall(file, task, US_UNINSTALL);
- } else {
- /* TODO: uninstall pages: start..file->vm_end */
- }
+ /* TODO: else: uninstall pages: * start..file->vm_end */
}
sspt_proc_insert_files(proc, &head);
ret = swap_register_kretprobe(&unmap_kretprobe);
if (ret)
- printk("swap_register_kprobe(do_munmap) ret=%d!\n", ret);
+ printk(KERN_INFO "swap_register_kprobe(do_munmap) ret=%d!\n",
+ ret);
return ret;
}
ret = swap_register_kretprobe(&mmap_kretprobe);
if (ret)
- printk("swap_register_kretprobe(do_mmap_pgoff) ret=%d!\n", ret);
+ printk(KERN_INFO "swap_register_kretprobe(do_mmap_pgoff) ret=%d!\n",
+ ret);
return ret;
}
struct task_struct *task;
};
-static int entry_handler_comm(struct kretprobe_instance *ri, struct pt_regs *regs)
+static int entry_handler_comm(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
{
struct comm_data *data = (struct comm_data *)ri->data;
ret = swap_register_kretprobe(&comm_kretprobe);
if (ret)
- printk("swap_register_kretprobe(set_task_comm) ret=%d!\n",
+ printk(KERN_INFO "swap_register_kretprobe(set_task_comm) ret=%d!\n",
ret);
return ret;
return 0;
not_found:
- printk("ERROR: symbol '%s' not found\n", sym);
+ printk(KERN_INFO "ERROR: symbol '%s' not found\n", sym);
return -ESRCH;
}
ip = find_img_ip(file, addr);
if (ip == NULL) {
- printk("Warning: no ip found in img, addr = %lx\n", addr);
+ printk(KERN_INFO "Warning: no ip found in img, addr = %lx\n",
+ addr);
return -EINVAL;
}
{
struct img_ip *ip;
- printk("### d_iname=%s\n", file->dentry->d_iname);
+ printk(KERN_INFO "### d_iname=%s\n", file->dentry->d_iname);
list_for_each_entry(ip, &file->ip_list, list) {
img_ip_print(ip);
/* debug */
void img_ip_print(struct img_ip *ip)
{
- printk("### addr=8%lx, args=%s\n", ip->addr, ip->args);
+ printk(KERN_INFO "### addr=8%lx, args=%s\n",
+ ip->addr, ip->args);
}
/* debug */
list_del(&file->list);
}
-static struct img_file *find_img_file(struct img_proc *proc, struct dentry *dentry)
+static struct img_file *find_img_file(struct img_proc *proc,
+ struct dentry *dentry)
{
struct img_file *file;
ret = img_file_add_ip(file, addr, args, ret_type);
if (ret) {
- printk("Cannot add ip to img file\n");
+ printk(KERN_INFO "Cannot add ip to img file\n");
free_img_file(file);
- }
- else
+ } else {
img_add_file_by_list(proc, file);
+ }
return ret;
}
* @param args Function address
* @return Error code
*/
-int img_proc_del_ip(struct img_proc *proc, struct dentry *dentry, unsigned long addr)
+int img_proc_del_ip(struct img_proc *proc,
+ struct dentry *dentry,
+ unsigned long addr)
{
int ret;
struct img_file *file;
{
struct img_file *file;
- printk("### img_proc_print:\n");
+ printk(KERN_INFO "### img_proc_print:\n");
list_for_each_entry(file, &proc->file_list, list) {
img_file_print(file);
}
int img_proc_add_ip(struct img_proc *proc, struct dentry *dentry,
unsigned long addr, const char *args, char ret_type);
-int img_proc_del_ip(struct img_proc *proc, struct dentry *dentry, unsigned long addr);
+int img_proc_del_ip(struct img_proc *proc,
+ struct dentry *dentry,
+ unsigned long addr);
/* debug */
void img_proc_print(struct img_proc *proc);
}
if (proc) {
- if (pfg_first) {
+ if (pfg_first)
first_install(task, proc, pfg_first);
- } else {
+ else
subsequent_install(task, proc, page_addr);
- }
}
}
struct vm_area_struct *vma;
struct mm_struct *mm = task->mm;
- if (mm == NULL) {
+ if (mm == NULL)
return 0;
- }
for (vma = mm->mmap; vma; vma = vma->vm_next) {
- if (check_vma(vma) && vma->vm_file->f_dentry == dentry) {
+ if (check_vma(vma) && vma->vm_file->f_dentry == dentry)
return 1;
- }
}
return 0;
* @param filter Pointer to the proc_filter struct
* @param task Pointer to the task_struct struct
*/
-#define check_task_f(filter, task) (filter)->call(filter, task)
+#define check_task_f(filter, task) ((filter)->call(filter, task))
void set_pf_by_dentry(struct proc_filter *pf, struct dentry *dentry,
void *priv);
ip->retprobe.handler = ret_handler;
ip->retprobe.entry_handler = entry_handler;
} else {
- printk("Cannot kmalloc in create_ip function!\n");
+ printk(KERN_INFO "Cannot kmalloc in create_ip function!\n");
}
return ip;
unsigned long addr = (unsigned long)ip->retprobe.up.kp.addr;
unsigned long offset = addr - file->vm_start;
- printk("swap_register_uretprobe() failure %d (%s:%lx|%lx)\n",
- ret, name, offset, (unsigned long)ip->retprobe.up.kp.opcode);
+ printk(KERN_INFO "swap_register_uretprobe() failure %d "
+ "(%s:%lx|%lx)\n", ret, name, offset,
+ (unsigned long)ip->retprobe.up.kp.opcode);
}
return ret;
return 0;
}
-static inline int sspt_unregister_usprobe(struct task_struct *task, struct us_ip *ip, enum US_FLAGS flag)
+static inline int sspt_unregister_usprobe(struct task_struct *task,
+ struct us_ip *ip,
+ enum US_FLAGS flag)
{
int err = 0;
static inline void print_jprobe(struct jprobe *jp)
{
- printk("### JP: entry=%lx, pre_entry=%lx\n",
+ printk(KERN_INFO "### JP: entry=%lx, pre_entry=%lx\n",
(unsigned long)jp->entry, (unsigned long)jp->pre_entry);
}
static inline void print_retprobe(struct uretprobe *rp)
{
- printk("### RP: handler=%lx\n",
+ printk(KERN_INFO "### RP: handler=%lx\n",
(unsigned long)rp->handler);
}
static inline void print_ip(struct us_ip *ip, int i)
{
- printk("### addr[%2d]=%lx, R_addr=%lx\n",
+ printk(KERN_INFO "### addr[%2d]=%lx, R_addr=%lx\n",
i, (unsigned long)ip->offset,
(unsigned long)ip->retprobe.up.kp.addr);
print_retprobe(&ip->retprobe);
int i = 0;
struct us_ip *ip;
- printk("### offset=%lx\n", page->offset);
- printk("### no install:\n");
+ printk(KERN_INFO "### offset=%lx\n", page->offset);
+ printk(KERN_INFO "### no install:\n");
list_for_each_entry(ip, &page->ip_list_no_inst, list) {
print_ip(ip, i);
++i;
}
- printk("### install:\n");
+ printk(KERN_INFO "### install:\n");
list_for_each_entry(ip, &page->ip_list_inst, list) {
print_ip(ip, i);
++i;
DECLARE_NODE_PTR_FOR_HLIST(node);
if (file == NULL) {
- printk("### file_p == NULL\n");
+ printk(KERN_INFO "### file_p == NULL\n");
return;
}
table_size = (1 << file->page_probes_hash_bits);
name = (file->dentry) ? file->dentry->d_iname : NA;
- printk("### print_file_probes: path=%s, d_iname=%s, table_size=%lu, vm_start=%lx\n",
- file->dentry->d_iname, name, table_size, file->vm_start);
+ printk(KERN_INFO "### print_file_probes: path=%s, d_iname=%s, "
+ "table_size=%lu, vm_start=%lx\n",
+ file->dentry->d_iname, name, table_size, file->vm_start);
for (i = 0; i < table_size; ++i) {
head = &file->page_probes_table[i];
{
struct sspt_file *file;
- printk("### print_proc_probes\n");
+ printk(KERN_INFO "### print_proc_probes\n");
list_for_each_entry(file, &proc->file_list, list) {
print_file_probes(file);
}
- printk("### print_proc_probes\n");
+ printk(KERN_INFO "### print_proc_probes\n");
}
/*
int i;
int cnt = task_inst_info->libs_count;
printk( "### BUNDLE PRINT START ###\n");
- printk("\n### BUNDLE PRINT START ###\n");
- printk("### task_inst_info.libs_count=%d\n", cnt);
+ printk(KERN_INFO "\n### BUNDLE PRINT START ###\n");
+ printk(KERN_INFO "### task_inst_info.libs_count=%d\n", cnt);
for (i = 0; i < cnt; ++i) {
int j;
us_proc_lib_t *lib = &task_inst_info->p_libs[i];
int cnt_j = lib->ips_count;
char *path = lib->path;
- printk("### path=%s, cnt_j=%d\n", path, cnt_j);
+ printk(KERN_INFO "### path=%s, cnt_j=%d\n", path, cnt_j);
for (j = 0; j < cnt_j; ++j) {
us_proc_ip_t *ips = &lib->p_ips[j];
unsigned long offset = ips->offset;
- printk("### offset=%lx\n", offset);
+ printk(KERN_INFO "### offset=%lx\n", offset);
}
}
- printk("### BUNDLE PRINT END ###\n");
+ printk(KERN_INFO "### BUNDLE PRINT END ###\n");
}
*/
static DEFINE_SPINLOCK(feature_img_lock);
static LIST_HEAD(feature_img_list);
-static struct sspt_feature_data *create_feature_data(struct sspt_feature_img *img)
+static struct sspt_feature_data *create_feature_data(
+ struct sspt_feature_img *img)
{
struct sspt_feature_data *fd;
struct sspt_feature_img *fi;
fi = kmalloc(sizeof(*fi), GFP_ATOMIC);
- if(fi) {
+ if (fi) {
INIT_LIST_HEAD(&fi->list);
fi->alloc = alloc;
fi->free = free;
* @param free Callback to release data
* @return Feature ID
*/
-sspt_feature_id_t sspt_register_feature(void *(*alloc)(void), void (*free)(void *data))
+sspt_feature_id_t sspt_register_feature(void *(*alloc)(void),
+ void (*free)(void *data))
{
struct sspt_feature_img *fi;
struct sspt_feature;
-typedef void * sspt_feature_id_t; /**< @brief sspt feature ID type */
+typedef void *sspt_feature_id_t; /**< @brief sspt feature ID type */
#define SSPT_FEATURE_ID_BAD NULL /**< @def SSPT_FEATURE_ID_BAD */
struct sspt_feature *sspt_create_feature(void);
static int calculation_hash_bits(int cnt)
{
int bits;
- for (bits = 1; cnt >>= 1; ++bits);
+ for (bits = 1; cnt >>= 1; ++bits)
+ ;
return bits;
}
obj->vm_start = 0;
obj->vm_end = 0;
- obj->page_probes_hash_bits = calculation_hash_bits(page_cnt);//PAGE_PROBES_HASH_BITS;
+ obj->page_probes_hash_bits = calculation_hash_bits(page_cnt);
table_size = (1 << obj->page_probes_hash_bits);
- obj->page_probes_table = kmalloc(sizeof(*obj->page_probes_table)*table_size, GFP_ATOMIC);
+ obj->page_probes_table =
+ kmalloc(sizeof(*obj->page_probes_table)*table_size,
+ GFP_ATOMIC);
- for (i = 0; i < table_size; ++i) {
+ for (i = 0; i < table_size; ++i)
INIT_HLIST_HEAD(&obj->page_probes_table[i]);
- }
}
return obj;
static void sspt_add_page(struct sspt_file *file, struct sspt_page *page)
{
page->file = file;
- hlist_add_head(&page->hlist, &file->page_probes_table[hash_ptr((void *)page->offset,
- file->page_probes_hash_bits)]);
+ hlist_add_head(&page->hlist,
+ &file->page_probes_table[hash_ptr(
+ (void *)page->offset,
+ file->page_probes_hash_bits)]);
}
-static struct sspt_page *sspt_find_page(struct sspt_file *file, unsigned long offset)
+static struct sspt_page *sspt_find_page(struct sspt_file *file,
+ unsigned long offset)
{
struct hlist_head *head;
struct sspt_page *page;
DECLARE_NODE_PTR_FOR_HLIST(node);
- head = &file->page_probes_table[hash_ptr((void *)offset, file->page_probes_hash_bits)];
+ head = &file->page_probes_table[hash_ptr((void *)offset,
+ file->page_probes_hash_bits)];
swap_hlist_for_each_entry(page, node, head, hlist) {
- if (page->offset == offset) {
+ if (page->offset == offset)
return page;
- }
}
return NULL;
}
-static struct sspt_page *sspt_find_page_or_new(struct sspt_file *file, unsigned long offset)
+static struct sspt_page *sspt_find_page_or_new(struct sspt_file *file,
+ unsigned long offset)
{
struct sspt_page *page = sspt_find_page(file, offset);
* @param page Page address
* @return Pointer to the sspt_page struct
*/
-struct sspt_page *sspt_find_page_mapped(struct sspt_file *file, unsigned long page)
+struct sspt_page *sspt_find_page_mapped(struct sspt_file *file,
+ unsigned long page)
{
unsigned long offset;
if (file->vm_start > page || file->vm_end < page) {
- // TODO: or panic?!
- printk("ERROR: file_p[vm_start..vm_end] <> page: file_p[vm_start=%lx, vm_end=%lx, d_iname=%s] page=%lx\n",
- file->vm_start, file->vm_end, file->dentry->d_iname, page);
+ /* TODO: or panic?! */
+ printk(KERN_INFO "ERROR: file_p[vm_start..vm_end] <> page: "
+ "file_p[vm_start=%lx, vm_end=%lx, "
+ "d_iname=%s] page=%lx\n",
+ file->vm_start, file->vm_end,
+ file->dentry->d_iname, page);
return NULL;
}
void sspt_file_add_ip(struct sspt_file *file, unsigned long offset,
const char *args, char ret_type)
{
- struct sspt_page *page = sspt_find_page_or_new(file, offset & PAGE_MASK);
+ struct sspt_page *page =
+ sspt_find_page_or_new(file, offset & PAGE_MASK);
- // FIXME: delete ip
+ /* FIXME: delete ip */
struct us_ip *ip = create_ip(offset, args, ret_type);
sspt_add_ip(page, ip);
* @param offset_addr File offset
* @return Pointer to the sspt_page struct
*/
-struct sspt_page *sspt_get_page(struct sspt_file *file, unsigned long offset_addr)
+struct sspt_page *sspt_get_page(struct sspt_file *file,
+ unsigned long offset_addr)
{
unsigned long offset = offset_addr & PAGE_MASK;
struct sspt_page *page = sspt_find_page_or_new(file, offset);
for (i = 0; i < table_size; ++i) {
head = &file->page_probes_table[i];
swap_hlist_for_each_entry_safe(page, node, tmp, head, hlist) {
- if (sspt_page_is_installed(page)) {
+ if (sspt_page_is_installed(page))
return 1;
- }
}
}
* @param flag Action for probes
* @return Void
*/
-int sspt_file_uninstall(struct sspt_file *file, struct task_struct *task, enum US_FLAGS flag)
+int sspt_file_uninstall(struct sspt_file *file,
+ struct task_struct *task,
+ enum US_FLAGS flag)
{
int i, err = 0;
int table_size = (1 << file->page_probes_hash_bits);
for (i = 0; i < table_size; ++i) {
head = &file->page_probes_table[i];
- swap_hlist_for_each_entry_safe (page, node, tmp, head, hlist) {
+ swap_hlist_for_each_entry_safe(page, node, tmp, head, hlist) {
err = sspt_unregister_page(page, flag, task);
if (err != 0) {
- printk("ERROR sspt_file_uninstall: err=%d\n", err);
+ printk(KERN_INFO "ERROR sspt_file_uninstall: "
+ "err=%d\n", err);
return err;
}
}
}
- if (flag != US_DISARM) {
+ if (flag != US_DISARM)
file->loaded = 0;
- }
return err;
}
{
file->vm_start = vma->vm_start;
file->vm_end = vma->vm_end;
-
-// ptr_pack_task_event_info(task, DYN_LIB_PROBE_ID, RECORD_ENTRY, "dspdd",
-// task->tgid, file->dentry->d_iname, vma->vm_start,
-// vma->vm_end - vma->vm_start, 0);
}
void sspt_file_add_ip(struct sspt_file *file, unsigned long offset,
const char *args, char ret_type);
-struct sspt_page *sspt_get_page(struct sspt_file *file, unsigned long offset_addr);
+struct sspt_page *sspt_get_page(struct sspt_file *file,
+ unsigned long offset_addr);
void sspt_put_page(struct sspt_page *page);
int sspt_file_check_install_pages(struct sspt_file *file);
void sspt_file_install(struct sspt_file *file);
-int sspt_file_uninstall(struct sspt_file *file, struct task_struct *task, enum US_FLAGS flag);
+int sspt_file_uninstall(struct sspt_file *file,
+ struct task_struct *task,
+ enum US_FLAGS flag);
void sspt_file_set_mapping(struct sspt_file *file, struct vm_area_struct *vma);
#endif /* __SSPT_FILE__ */
if (list_empty(&page->ip_list_no_inst)) {
struct task_struct *task = page->file->proc->task;
- printk("page %lx in %s task[tgid=%u, pid=%u] already installed\n",
- page->offset, file->dentry->d_iname, task->tgid, task->pid);
+ printk(KERN_INFO "page %lx in %s task[tgid=%u, pid=%u] "
+ "already installed\n",
+ page->offset, file->dentry->d_iname,
+ task->tgid, task->pid);
goto unlock;
}
list_for_each_entry(ip, &ip_list_tmp, list) {
err = sspt_unregister_usprobe(task, ip, flag);
if (err != 0) {
- //TODO: ERROR
+ /* TODO: ERROR */
break;
}
}
- head = (flag == US_DISARM) ? &page->ip_list_inst : &page->ip_list_no_inst;
+ head = (flag == US_DISARM) ?
+ &page->ip_list_inst : &page->ip_list_no_inst;
spin_lock(&page->lock);
static LIST_HEAD(proc_probes_list);
static DEFINE_RWLOCK(sspt_proc_rwlock);
-void sspt_proc_del_all_filters(struct sspt_proc *proc);
/**
* @brief Global read lock for sspt_proc
struct sspt_proc *proc, *tmp;
list_for_each_entry_safe(proc, tmp, &proc_probes_list, list) {
- if (proc->tgid == task->tgid) {
+ if (proc->tgid == task->tgid)
return proc;
- }
}
return NULL;
void *priv)
{
struct sspt_proc *proc = sspt_proc_get_by_task(task);
- if (proc == NULL) {
+ if (proc == NULL)
proc = sspt_proc_create(task, priv);
- }
return proc;
}
* @param dentry Dentry of file
* @return Pointer on the sspt_file struct
*/
-struct sspt_file *sspt_proc_find_file(struct sspt_proc *proc, struct dentry *dentry)
+struct sspt_file *sspt_proc_find_file(struct sspt_proc *proc,
+ struct dentry *dentry)
{
struct sspt_file *file;
list_for_each_entry(file, &proc->file_list, list) {
- if (dentry == file->dentry) {
+ if (dentry == file->dentry)
return file;
- }
}
return NULL;
}
page = sspt_find_page_mapped(file, page_addr);
- if (page) {
+ if (page)
sspt_register_page(page, file);
- }
}
}
}
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (check_vma(vma)) {
struct dentry *dentry = vma->vm_file->f_dentry;
- struct sspt_file *file = sspt_proc_find_file(proc, dentry);
+ struct sspt_file *file =
+ sspt_proc_find_file(proc, dentry);
if (file) {
if (!file->loaded) {
file->loaded = 1;
* @param flag Action for probes
* @return Error code
*/
-int sspt_proc_uninstall(struct sspt_proc *proc, struct task_struct *task, enum US_FLAGS flag)
+int sspt_proc_uninstall(struct sspt_proc *proc,
+ struct task_struct *task,
+ enum US_FLAGS flag)
{
int err = 0;
struct sspt_file *file;
list_for_each_entry_rcu(file, &proc->file_list, list) {
err = sspt_file_uninstall(file, task, flag);
if (err != 0) {
- printk("ERROR sspt_proc_uninstall: err=%d\n", err);
+ printk(KERN_INFO "ERROR sspt_proc_uninstall: err=%d\n",
+ err);
return err;
}
}
void *priv);
void sspt_proc_free_all(void);
-struct sspt_file *sspt_proc_find_file(struct sspt_proc *proc, struct dentry *dentry);
+struct sspt_file *sspt_proc_find_file(struct sspt_proc *proc,
+ struct dentry *dentry);
struct sspt_file *sspt_proc_find_file_or_new(struct sspt_proc *proc,
struct dentry *dentry);
void sspt_proc_install_page(struct sspt_proc *proc, unsigned long page_addr);
void sspt_proc_install(struct sspt_proc *proc);
-int sspt_proc_uninstall(struct sspt_proc *proc, struct task_struct *task, enum US_FLAGS flag);
+int sspt_proc_uninstall(struct sspt_proc *proc,
+ struct task_struct *task,
+ enum US_FLAGS flag);
int sspt_proc_get_files_by_region(struct sspt_proc *proc,
struct list_head *head,
int ret = 0;
if (usm_get_status() == ST_OFF) {
- printk("US instrumentation is not running!\n");
+ printk(KERN_INFO "US instrumentation is not running!\n");
ret = -EINVAL;
goto put;
}
st = usm_get_status();
if (st == ST_ON) {
- printk("US instrumentation is already run!\n");
+ printk(KERN_INFO "US instrumentation is already run!\n");
goto put;
}
SWAP_LIGHT_INIT_MODULE(once_helper, init_us_manager, exit_us_manager,
init_debugfs_us_manager, exit_debugfs_us_manager);
-MODULE_LICENSE ("GPL");
+MODULE_LICENSE("GPL");
static void sm_free_us(struct slot_manager *sm, void *ptr)
{
/*
- * E. G.: This code provides kernel dump because of rescheduling while atomic.
- * As workaround, this code was commented. In this case we will have memory leaks
- * for instrumented process, but instrumentation process should functionate correctly.
- * Planned that good solution for this problem will be done during redesigning KProbe
- * for improving supportability and performance.
+ * E. G.: This code provides kernel dump because of rescheduling while
+ * atomic. As workaround, this code was commented. In this case we will
+ * have memory leaks for instrumented process, but instrumentation
+ * process should functionate correctly. Planned that good solution for
+ * this problem will be done during redesigning KProbe for improving
+ * supportability and performance.
*/
#if 0
struct task_struct *task = sm->data;
* === BUFFER ===
* ============================================================================
*/
-static char *common_buf = NULL;
+static char *common_buf;
enum { subbuf_size = 8*1024 };
enum { common_buf_size = subbuf_size * NR_CPUS };
* === INIT/EXIT ===
* ============================================================================
*/
-static struct dentry *writer_dir = NULL;
+static struct dentry *writer_dir;
/**
* @brief Removes writer debugfs.
if (dentry == NULL)
goto fail;
- dentry = debugfs_create_file("available_filters", 0600, writer_dir, NULL, &fops_available_filters);
+ dentry = debugfs_create_file("available_filters", 0600, writer_dir,
+ NULL, &fops_available_filters);
if (dentry == NULL)
goto fail;
- dentry = debugfs_create_file("filter", 0600, writer_dir, NULL, &fops_filter);
+ dentry = debugfs_create_file("filter", 0600,
+ writer_dir, NULL, &fops_filter);
if (dentry == NULL)
goto fail;
*/
void event_filter_exit(void)
{
- event_filter_unregister(&filter_none);;
+ event_filter_unregister(&filter_none);
}
/* Regs manipulations */
#if defined(CONFIG_ARM)
-#define get_regs_ip(regs) regs->ARM_pc /**< Get pc reg. */
-#define get_regs_ret_func(regs) regs->ARM_lr /**< Get lr reg. */
-#define get_regs_ret_val(regs) regs->ARM_r0 /**< Get ret val. */
-#define get_regs_stack_ptr(regs) regs->ARM_sp /**< Get stack pointer. */
+#define get_regs_ip(regs) (regs->ARM_pc) /**< Get pc reg. */
+#define get_regs_ret_func(regs) (regs->ARM_lr) /**< Get lr reg. */
+#define get_regs_ret_val(regs) (regs->ARM_r0) /**< Get ret val. */
+#define get_regs_stack_ptr(regs) (regs->ARM_sp) /**< Get stack pointer. */
#elif defined(CONFIG_X86_32)
-#define get_regs_ip(regs) regs->ip - 1 /**< Get ip. */
-#define get_regs_ret_val(regs) regs->ax /**< Get ret val. */
-#define get_regs_stack_ptr(regs) regs->sp /**< Get stack pointer. */
+#define get_regs_ip(regs) (regs->ip - 1) /**< Get ip. */
+#define get_regs_ret_val(regs) (regs->ax) /**< Get ret val. */
+#define get_regs_stack_ptr(regs) (regs->sp) /**< Get stack pointer. */
static inline u32 get_regs_ret_func(struct pt_regs *regs)
{
if (user_mode(regs)) {
sp = (u32 *)regs->sp;
if (get_user(addr, sp))
- printk("failed to dereference a pointer, sp=%p, "
+ printk(KERN_INFO "failed to dereference a pointer, sp=%p, "
"pc=%lx\n", sp, get_regs_ip(regs));
} else {
sp = (u32 *)kernel_stack_pointer(regs);
/* Get first 4 args from registers */
switch (args_in_regs) {
- case 3:
- args[3] = regs->ARM_r3;
- case 2:
- args[2] = regs->ARM_r2;
- case 1:
- args[1] = regs->ARM_r1;
- case 0:
- args[0] = regs->ARM_r0;
+ case 3:
+ args[3] = regs->ARM_r3;
+ case 2:
+ args[2] = regs->ARM_r2;
+ case 1:
+ args[1] = regs->ARM_r1;
+ case 0:
+ args[0] = regs->ARM_r0;
}
/* Get other args from stack */
unsigned long *args_in_sp = (unsigned long *)regs->ARM_sp +
i - stack_args;
if (get_user(args[i], args_in_sp))
- printk("failed to dereference a pointer, addr=%p\n",
+ printk(KERN_INFO "failed to dereference a pointer, addr=%p\n",
args_in_sp);
}
* address should be put.
* @return Pointer to the string with shared mem area name.
*/
-const char *get_shared_kmem(struct mm_struct *mm, unsigned long *start,
- unsigned long *end)
+const char *get_shared_kmem(struct mm_struct *mm,
+ unsigned long *start,
+ unsigned long *end)
{
*start = CONFIG_VECTORS_BASE;
*end = CONFIG_VECTORS_BASE + PAGE_SIZE;
stack_args = 6;
switch (args_in_regs) {
- case 5:
- args[5] = regs->bp;
- case 4:
- args[4] = regs->di;
- case 3:
- args[3] = regs->si;
- case 2:
- args[2] = regs->dx;
- case 1:
- args[1] = regs->cx;
- case 0:
- args[0] = regs->bx;
+ case 5:
+ args[5] = regs->bp;
+ case 4:
+ args[4] = regs->di;
+ case 3:
+ args[3] = regs->si;
+ case 2:
+ args[2] = regs->dx;
+ case 1:
+ args[1] = regs->cx;
+ case 0:
+ args[0] = regs->bx;
}
}
unsigned long *args_in_sp = (unsigned long *)regs->sp +
1 + i - stack_args;
if (get_user(args[i], args_in_sp))
- printk("failed to dereference a pointer, addr=%p\n",
+ printk(KERN_INFO "failed to dereference a pointer, addr=%p\n",
args_in_sp);
}
};
static char *cpu_buf[NR_CPUS];
-static u32 seq_num = 0;
-static unsigned int discarded = 0;
+static u32 seq_num;
+static unsigned int discarded;
/**
* @brief Initializes new message.
u64 time; /**< Message time */
u32 len; /**< Message length */
char payload[0]; /**< Message itself */
-} __attribute__((packed));
+} __packed;
#if 0 /* debug */
static void print_hex(char *ptr, int len)
{
int i;
- printk("print_hex:\n");
- for (i = 0; i < len; ++i) {
- printk("[%x] [%3d]=%2x\n", &ptr[i], i, ptr[i]);
- }
+ printk(KERN_INFO "print_hex:\n");
+ for (i = 0; i < len; ++i)
+ printk(KERN_INFO "[%x] [%3d]=%2x\n", &ptr[i], i, ptr[i]);
}
#endif
struct basic_msg_fmt *bmf = (struct basic_msg_fmt *)data;
result = swap_buffer_write(bmf, bmf->len + sizeof(*bmf));
- if (result < 0) {
+ if (result < 0)
discarded++;
- }
return result;
}
bmf->time = timespec2time(&ts);
}
-static char* pack_basic_msg_fmt(char *buf, enum MSG_ID id)
+static char *pack_basic_msg_fmt(char *buf, enum MSG_ID id)
{
struct basic_msg_fmt *bmf = (struct basic_msg_fmt *)buf;
struct proc_info_top {
u32 pid; /**< Process PID */
char comm[0]; /**< Message */
-} __attribute__((packed));
+} __packed;
/**
* @struct proc_info_bottom
u64 low_addr; /**< Low address */
u64 high_addr; /**< High address */
char bin_path[0]; /**< Binary path */
-} __attribute__((packed));
+} __packed;
/**
* @struct proc_info_part
struct proc_info_part {
u32 lib_cnt; /**< Library count */
char libs[0]; /**< Libraries */
-} __attribute__((packed));
+} __packed;
/**
* @struct lib_obj
u64 low_addr; /**< Low library address */
u64 high_addr; /**< High library address */
char lib_path[0]; /**< Library path */
-} __attribute__((packed));
+} __packed;
static char *pack_path(char *buf, struct file *file)
{
!(vma->vm_flags & (VM_READ | VM_MAYREAD)));
}
-static struct vm_area_struct *find_vma_exe_by_dentry(struct mm_struct *mm, struct dentry *dentry)
+static struct vm_area_struct *find_vma_exe_by_dentry(struct mm_struct *mm,
+ struct dentry *dentry)
{
struct vm_area_struct *vma;
return vma;
}
-static char *pack_shared_kmem(char *lib_obj, struct mm_struct *mm,
- u32 *lib_cnt_p)
+static char *pack_shared_kmem(char *lib_obj,
+ struct mm_struct *mm,
+ u32 *lib_cnt_p)
{
struct lib_obj *so = (struct lib_obj *)lib_obj;
char *so_obj;
*/
struct proc_terminate {
u32 pid; /**< Process ID */
-} __attribute__((packed));
+} __packed;
static char *pack_proc_terminate(char *payload, struct task_struct *task)
{
u64 low_addr; /**< Low address */
u64 high_addr; /**< High address */
char bin_path[0]; /**< Binary path */
-} __attribute__((packed));
+} __packed;
static char *pack_proc_map(char *payload, struct vm_area_struct *vma)
{
u32 pid; /**< Process ID */
u64 low_addr; /**< Low address */
u64 high_addr; /**< High address */
-} __attribute__((packed));
+} __packed;
static char *pack_proc_unmap(char *payload, unsigned long start,
unsigned long end)
struct proc_comm {
u32 pid; /**< Process ID */
char comm[0]; /**< Comm */
-} __attribute__((packed));
+} __packed;
static char *pack_proc_comm(char *data, struct task_struct *task)
{
- struct proc_comm *pcomm= (struct proc_comm *)data;
+ struct proc_comm *pcomm = (struct proc_comm *)data;
pcomm->pid = task->tgid;
u64 pc_addr; /**< Instruction pointer address */
u32 tid; /**< Thread ID */
u32 cpu_num; /**< CPU number */
-} __attribute__((packed));
+} __packed;
static char *pack_sample(char *payload, struct pt_regs *regs)
{
u32 cpu_num; /**< CPU number */
u32 cnt_args; /**< Count of args */
char args[0]; /**< Args format string */
-} __attribute__((packed));
+} __packed;
static char *pack_msg_func_entry(char *payload, const char *fmt,
unsigned long func_addr, struct pt_regs *regs,
/* FIXME: len = 1024 */
ret = pack_args(args, 1024, fmt, regs);
if (ret < 0) {
- printk("ERROR: !!!!!\n");
+ printk(KERN_INFO "ERROR: !!!!!\n");
goto put_buf;
}
u64 caller_pc_addr; /**< Return address */
u32 cpu_num; /**< CPU number */
char ret_val[0]; /**< Return value */
-} __attribute__((packed));
+} __packed;
static int pack_msg_ret_val(char *buf, int len, char ret_type,
struct pt_regs *regs)
ret = pack_msg_ret_val(mfe->ret_val, len, ret_type, regs);
if (ret < 0) {
- printk("ERROR: packing MSG_FUNCTION_EXIT (ret=%d)\n", ret);
+ printk(KERN_INFO "ERROR: packing MSG_FUNCTION_EXIT (ret=%d)\n",
+ ret);
return ret;
}
u32 pid; /**< PID */
u32 tid; /**< TID */
u32 cpu_num; /**< CPU number */
-} __attribute__((packed));
+} __packed;
static char *pack_msg_context_switch(char *payload, struct pt_regs *regs)
{
*/
struct msg_err {
char msg[0]; /**< Error message string */
-} __attribute__((packed));
+} __packed;
static char *pack_msg_err(char *payload, const char *fmt, va_list args)
{