2 * Kernel Probes (KProbes)
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
23 * Dynamic Binary Instrumentation Module based on KProbes
24 * modules/kprobe/dbi_insn_slots.c
26 * This program is free software; you can redistribute it and/or modify
27 * it under the terms of the GNU General Public License as published by
28 * the Free Software Foundation; either version 2 of the License, or
29 * (at your option) any later version.
31 * This program is distributed in the hope that it will be useful,
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
34 * GNU General Public License for more details.
36 * You should have received a copy of the GNU General Public License
37 * along with this program; if not, write to the Free Software
38 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
40 * Copyright (C) Samsung Electronics, 2006-2012
42 * 2008-2009 Alexey Gerenkov <a.gerenkov@samsung.com> User-Space
43 * Probes initial implementation; Support x86/ARM/MIPS for both user and kernel spaces.
44 * 2010 Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for separating core and arch parts
45 * 2012-2013 Vyacheslav Cherkashin <v.cherkashin@samsung.com> new memory allocator for slots
48 #include "dbi_insn_slots.h"
49 #include <linux/module.h>
50 #include <linux/rculist.h>
51 #include <linux/slab.h>
52 #include <linux/spinlock.h>
56 unsigned long first_available;
57 unsigned long count_available;
66 struct hlist_node hlist;
70 static void chunk_init(struct chunk *chunk, void *data, size_t size, size_t size_block)
75 spin_lock_init(&chunk->lock);
76 chunk->data = (unsigned long *)data;
77 chunk->first_available = 0;
78 chunk->count_available = size / size_block;
79 chunk->size = chunk->count_available;
81 chunk->index = kmalloc(sizeof(*chunk->index)*chunk->count_available, GFP_ATOMIC);
84 for (i = 0; i != chunk->count_available; ++p) {
89 static void chunk_uninit(struct chunk *chunk)
94 static void* chunk_allocate(struct chunk *chunk, size_t size_block)
98 if (!chunk->count_available) {
102 spin_lock(&chunk->lock);
103 ret = chunk->data + chunk->first_available*size_block;
104 chunk->first_available = chunk->index[chunk->first_available];
105 --chunk->count_available;
106 spin_unlock(&chunk->lock);
111 static void chunk_deallocate(struct chunk *chunk, void *p, size_t size_block)
113 unsigned long idx = ((unsigned long *)p - chunk->data)/size_block;
115 spin_lock(&chunk->lock);
116 chunk->index[idx] = chunk->first_available;
117 chunk->first_available = idx;
118 ++chunk->count_available;
119 spin_unlock(&chunk->lock);
122 static inline int chunk_check_ptr(struct chunk *chunk, void *p, size_t size)
124 if (( chunk->data <= (unsigned long *)p) &&
125 ((chunk->data + size/sizeof(chunk->data)) > (unsigned long *)p)) {
132 static inline int chunk_free(struct chunk *chunk)
134 return (chunk->count_available == chunk->size);
137 static struct fixed_alloc *create_fixed_alloc(struct slot_manager *sm)
140 struct fixed_alloc *fa;
142 fa = kmalloc(sizeof(*fa), GFP_ATOMIC);
147 data = sm->alloc(sm);
153 chunk_init(&fa->chunk, data, PAGE_SIZE/sizeof(unsigned long), sm->slot_size);
158 static void free_fixed_alloc(struct slot_manager *sm, struct fixed_alloc *fa)
160 chunk_uninit(&fa->chunk);
161 sm->free(sm, fa->chunk.data);
166 void *alloc_insn_slot(struct slot_manager *sm)
169 struct fixed_alloc *fa;
170 struct hlist_node *pos;
172 hlist_for_each_entry_rcu(fa, pos, &sm->page_list, hlist) {
173 free_slot = chunk_allocate(&fa->chunk, sm->slot_size);
178 fa = create_fixed_alloc(sm);
182 INIT_HLIST_NODE(&fa->hlist);
183 hlist_add_head_rcu(&fa->hlist, &sm->page_list);
185 return chunk_allocate(&fa->chunk, sm->slot_size);
187 EXPORT_SYMBOL_GPL(alloc_insn_slot);
189 void free_insn_slot(struct slot_manager *sm, void *slot)
191 struct fixed_alloc *fa;
192 struct hlist_node *pos;
194 hlist_for_each_entry_rcu(fa, pos, &sm->page_list, hlist) {
195 if (!chunk_check_ptr(&fa->chunk, slot, PAGE_SIZE))
198 chunk_deallocate(&fa->chunk, slot, sm->slot_size);
200 if (chunk_free(&fa->chunk)) {
201 hlist_del_rcu(&fa->hlist);
202 free_fixed_alloc(sm, fa);
208 panic("free_insn_slot: slot=%p is not data base\n", slot);
210 EXPORT_SYMBOL_GPL(free_insn_slot);