[REFACTOR] move and rename /un/register_us_page_probe()
[kernel/swap-modules.git] / driver / ec.c
1 ////////////////////////////////////////////////////////////////////////////////////
2 //
3 //      FILE:           ec.c
4 //
5 //      DESCRIPTION:
6 //      This file is C++ source for SWAP driver.
7 //
8 //      SEE ALSO:       ec.h
9 //      AUTHOR:         L.Komkov, A.Gerenkov
10 //      COMPANY NAME:   Samsung Research Center in Moscow
11 //      DEPT NAME:      Advanced Software Group
12 //      CREATED:        2008.02.15
13 //      VERSION:        1.0
14 //      REVISION DATE:  2008.12.03
15 //
16 ////////////////////////////////////////////////////////////////////////////////////
17
18 #include "module.h"
19 #include "ec.h"
20
21 ////////////////////////////////////////////////////////////////////////////////////////////
22
23 ec_info_t ec_info = {
24         .ec_state = EC_STATE_IDLE,
25         .m_nMode = 0L,
26         .buffer_size = EC_BUFFER_SIZE_DEFAULT,
27         .ignored_events_count = 0,
28 };
29
30 ////////////////////////////////////////////////////////////////////////////////////////////
31
32 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38)
33 spinlock_t ec_spinlock; // protects 'ec_info'
34 #else /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38) */
35 spinlock_t ec_spinlock = SPIN_LOCK_UNLOCKED;
36 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38) */
37
38 ec_probe_info_t ec_probe_info = {
39         .probe_id = -1,
40         .probe_selected = 0,
41         .jprobe_active = 0,
42         .retprobe_active = 0,
43         .address = NULL,
44 };
45
46 ec_state_t GetECState(void) { return ec_info.ec_state; };
47
48 void reset_ec_info_nolock(void)
49 {
50         ec_info.trace_size = 0;
51         ec_info.first = 0;
52         ec_info.after_last = 0;
53         ec_info.ignored_events_count = 0;
54         ec_info.saved_events_count = 0;
55         ec_info.discarded_events_count = 0;
56         ec_info.collision_count = 0;
57         ec_info.lost_events_count = 0;
58 }
59
60 void ResetECInfo(void) 
61 {
62         unsigned long spinlock_flags = 0L;
63
64         spin_lock_irqsave (&ec_spinlock, spinlock_flags);
65         reset_ec_info_nolock();
66         spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
67 }
68
69 void CleanECInfo(void) 
70 {
71         unsigned long spinlock_flags = 0L;
72
73         spin_lock_irqsave (&ec_spinlock, spinlock_flags);
74         ec_info.buffer_effect = 0;
75         spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
76
77         ResetECInfo();
78
79 }
80
81 int IsECMode(unsigned long nMask)
82 {
83         return ((ec_info.m_nMode & nMask) != 0);
84 }
85
86 int IsContinuousRetrieval(void)
87 {
88         return IsECMode(MODEMASK_CONTINUOUS_RETRIEVAL);
89 }
90
91 int SetECMode(unsigned long nECMode)
92 {
93         unsigned long spinlock_flags = 0L;
94
95         if((nECMode & MODEMASK_CONTINUOUS_RETRIEVAL) != 0) {
96                 if(EnableContinuousRetrieval() == -1) {
97                         EPRINTF("Cannot enable continuous retrieval!");
98                         return -1;
99                 }
100         } else {
101                 if(DisableContinuousRetrieval() == -1) {
102                         EPRINTF("Cannot disable continuous retrieval!");
103                         return -1;
104                 }
105         }
106
107         spin_lock_irqsave (&ec_spinlock, spinlock_flags);
108         ec_info.m_nMode = nECMode;
109         reset_ec_info_nolock();
110         spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
111
112         return 0;
113 }
114
115 unsigned long GetECMode(void)
116 {
117         unsigned long ec_mode;
118         unsigned long spinlock_flags = 0L;
119
120         spin_lock_irqsave(&ec_spinlock, spinlock_flags);
121         ec_mode = ec_info.m_nMode;
122         spin_unlock_irqrestore(&ec_spinlock, spinlock_flags);
123
124         return ec_mode;
125 }
126
127 int is_java_inst_enabled(void)
128 {
129         return !!(GetECMode() & MODEMASK_JAVA_INST);
130 }
131
132 #if defined(__DEBUG)
133 static UNUSED char * ec_state_name (ec_state_t ec_state)
134 {
135         static char *ec_state_names[EC_STATE_TAG_COUNT] = { "IDLE", "ATTACHED", "ACTIVE", "STOPPED" };
136
137         if (((unsigned) ec_info.ec_state) < EC_STATE_TAG_COUNT)
138         {
139                 return ec_state_names[ec_info.ec_state];
140         }
141         else
142         {
143                 return "<unknown>";
144         }
145 }
146 #endif /* defined(__DEBUG) */
147
148
149 /*
150     On user request user space EC may change state in the following order:
151         IDLE -> ATTACHED (on "attach")
152         IDLE | ATTACHED -> ACTIVE (on "activate")
153         ATTACHED | ACTIVE | STOPPED -> IDLE (on "stop"/"detach")
154 */
155 int ec_user_attach (void)
156 {
157         unsigned long spinlock_flags;
158         int result;
159
160         spin_lock_irqsave (&ec_spinlock, spinlock_flags);       // make other CPUs wait
161         if (EC_STATE_IDLE == ec_info.ec_state)
162         {
163                 int tmp;
164                 struct timeval tv;
165                 struct cond *p_cond;
166                 struct event_tmpl *p_tmpl;
167
168                 ec_info.ec_state = EC_STATE_ATTACHED;
169
170                 /* save 'start' time */
171                 do_gettimeofday(&tv);
172                 memcpy(&last_attach_time, &tv, sizeof(struct timeval));
173
174                 /* unpause if paused */
175                 paused = 0;
176
177                 /* if there is at least one start condition in the list
178                    we are paused at the beginning */
179                 list_for_each_entry(p_cond, &cond_list.list, list) {
180                         p_tmpl = &p_cond->tmpl;
181                         if (p_tmpl->type == ET_TYPE_START_COND) {
182                                 paused = 1;
183                                 break;
184                         }
185                 }
186
187                 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);  // open our data for other CPUs
188
189                 //first of all put event with event format
190                 tmp = event_mask;
191                 event_mask = 0;
192                 pack_event_info(EVENT_FMT_PROBE_ID, RECORD_ENTRY, "x", tmp);
193                 event_mask = tmp;
194
195                 result = inst_usr_space_proc();
196                 if (result == 0)        // instrument user space process
197                         result = set_kernel_probes();
198                 // FIXME: SAFETY CHECK
199                 if (result)
200                 {               // return to safe state
201                         unset_kernel_probes();
202
203                         spin_lock_irqsave (&ec_spinlock, spinlock_flags);       // make other CPUs wait
204                         ec_info.ec_state = EC_STATE_IDLE;
205                         spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);  // open our data for other CPUs
206                 }
207                 // FIXME: SAFETY CHECK
208
209                 notify_user (EVENT_EC_STATE_CHANGE);
210
211         }
212         else
213         {
214
215                 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);  // open our data for other CPUs
216                 result = -EINVAL;
217
218         }
219
220         return result;
221 }
222
223 int ec_user_activate (void)
224 {
225         unsigned long spinlock_flags;
226         int result;
227
228         spin_lock_irqsave (&ec_spinlock, spinlock_flags);       // make other CPUs wait
229         if (EC_STATE_IDLE == ec_info.ec_state)
230         {
231                 int tmp;
232                 ec_info.ec_state = EC_STATE_ACTIVE;
233                 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);  // open our data for other CPUs
234                 //first of all put event with event format
235                 tmp = event_mask;
236                 event_mask = 0;
237                 pack_event_info(EVENT_FMT_PROBE_ID, RECORD_ENTRY, "x", tmp);
238                 event_mask = tmp;
239
240                 result = inst_usr_space_proc();
241                 if (result == 0)        // instrument user space process
242                         result = set_kernel_probes();
243
244                 // FIXME: SAFETY CHECK
245                 if (result)
246                 {               // return to safe state
247                         unset_kernel_probes();
248
249                         spin_lock_irqsave (&ec_spinlock, spinlock_flags);       // make other CPUs wait
250                         ec_info.ec_state = EC_STATE_IDLE;
251                         spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);  // open our data for other CPUs
252                 }
253                 // FIXME: SAFETY CHECK
254
255                 notify_user (EVENT_EC_STATE_CHANGE);
256
257         }
258         else if (EC_STATE_ATTACHED == ec_info.ec_state)
259         {
260
261                 ec_info.ec_state = EC_STATE_ACTIVE;
262                 result = 0;
263                 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);  // open our data for other CPUs
264
265                 notify_user (EVENT_EC_STATE_CHANGE);
266
267         }
268         else
269         {
270
271                 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);  // open our data for other CPUs
272                 result = -EINVAL;
273         }
274
275         return result;
276 }
277
278 int ec_user_stop (void)
279 {
280         unsigned long spinlock_flags;
281         int result = 0, ret = 0;
282
283         spin_lock_irqsave (&ec_spinlock, spinlock_flags);       // make other CPUs wait
284         if (EC_STATE_ATTACHED == ec_info.ec_state || EC_STATE_ACTIVE == ec_info.ec_state || EC_STATE_STOPPED == ec_info.ec_state)
285         {
286
287                 ec_info.ec_state = EC_STATE_IDLE;
288                 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);  // open our data for other CPUs
289
290                 ret = deinst_usr_space_proc ();
291                 result = unset_kernel_probes();
292                 if (result == 0)
293                         result = ret;
294
295                 notify_user (EVENT_EC_STATE_CHANGE);
296
297         }
298         else
299         {
300
301                 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);  // open our data for other CPUs
302                 result = -EINVAL;
303
304         }
305
306         return result;
307 }
308
309 /*
310     Kernel space EC may change state in the following order:
311         ATTACHED -> ACTIVE (when start condition is satisfied)
312         ACTIVE -> STOPPED (when stop condition is satisfied)
313 */
314 int ec_kernel_activate (void)
315 {
316         unsigned long spinlock_flags;
317         int result;
318
319         spin_lock_irqsave (&ec_spinlock, spinlock_flags);       // make other CPUs wait
320         if (EC_STATE_ATTACHED == ec_info.ec_state)
321         {
322                 ec_info.ec_state = EC_STATE_ACTIVE;
323                 result = 0;
324         }
325         else
326         {
327                 result = -EINVAL;
328         }
329         spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);  // open our data for other CPUs
330
331         notify_user (EVENT_EC_STATE_CHANGE);
332
333         return result;
334 }
335
336 int ec_kernel_stop (void)
337 {
338         unsigned long spinlock_flags;
339         int result;
340
341         spin_lock_irqsave (&ec_spinlock, spinlock_flags);       // make other CPUs wait
342         if (EC_STATE_ACTIVE == ec_info.ec_state)
343         {
344                 ec_info.ec_state = EC_STATE_STOPPED;
345                 result = 0;
346         }
347         else
348         {
349                 result = -EINVAL;
350         }
351         spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);  // open our data for other CPUs
352
353         notify_user (EVENT_EC_STATE_CHANGE);
354
355         return result;
356 }
357
358 // Copies EC info to user space
359 // Since "copy_to_user" may block, an intermediate copy of ec_info is used here
360 int copy_ec_info_to_user_space (ec_info_t * p_user_ec_info)
361 {
362         /*
363            WARNING: to avoid stack overflow the following data structure was made
364            static. As result, simultaneous users of this function will share it
365            and must use additional synchronization to avoid collisions.
366          */
367         // FIXME: synchronization is necessary here (ec_info_copy must be locked).
368         static ec_info_t ec_info_copy;
369         unsigned long spinlock_flags;
370         int result;
371
372         // ENTER_CRITICAL_SECTION
373         // lock semaphore here
374
375
376         // ENTER_CRITICAL_SECTION
377         spin_lock_irqsave (&ec_spinlock, spinlock_flags);       // make other CPUs wait
378
379         // copy
380         memcpy (&ec_info_copy, &ec_info, sizeof (ec_info_copy));
381
382         // LEAVE_CRITICAL_SECTION
383         spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);  // open our data for other CPUs
384
385
386         result = copy_to_user ((void __user *) p_user_ec_info, &ec_info_copy, sizeof (ec_info_t));
387
388         // LEAVE_CRITICAL_SECTION
389         // unlock semaphore here
390
391         if (result)
392         {
393                 EPRINTF ("copy_to_user(%08X,%08X)=%d", (unsigned) p_user_ec_info, (unsigned) &ec_info_copy, result);
394                 result = -EFAULT;
395         }
396         return result;
397 }