Update from product codes
[profile/mobile/platform/kernel/linux-3.10-sc7730.git] / drivers / power / load_analyzer_workqueue.c
1 /* drivers/power/load_analyzer_workqueue.c */
2
3
4 static unsigned long long calc_delta_time_work(unsigned int cpu, unsigned int index)
5 {
6         unsigned long long run_start_time, run_end_time;
7
8
9         run_start_time = cpu_work_history_view[index][cpu].start_time;
10         if (run_start_time < section_start_time)
11                 run_start_time = section_start_time;
12
13         run_end_time = cpu_work_history_view[index][cpu].end_time;
14
15         if (run_end_time < section_start_time)
16                 return 0;
17
18         if (run_end_time > section_end_time)
19                 run_end_time = section_end_time;
20
21         return  run_end_time - run_start_time;
22 }
23
24
25 static void add_work_to_list(unsigned int cpu, unsigned int index)
26 {
27         struct cpu_work_runtime_tag *new_work;
28
29         new_work
30                 = kmalloc(sizeof(struct cpu_work_runtime_tag), GFP_KERNEL);
31
32         new_work->occup_time =  calc_delta_time_work(cpu, index);
33
34         new_work->cnt = 1;
35         new_work->task = cpu_work_history_view[index][cpu].task;
36         new_work->pid = cpu_work_history_view[index][cpu].pid;
37
38         new_work->work = cpu_work_history_view[index][cpu].work;
39         new_work->func = cpu_work_history_view[index][cpu].func;
40         pr_info("%s %d\n", __FUNCTION__, __LINE__);
41
42         if (new_work->occup_time != 0) {
43                 INIT_LIST_HEAD(&new_work->list);
44                 list_add_tail(&new_work->list, &work_headlist);
45         } else
46                 kfree(new_work);
47
48         return;
49 }
50
51
52 static void del_work_list(void)
53 {
54         struct cpu_work_runtime_tag *curr;
55         struct list_head *p, *n;
56
57         list_for_each_prev_safe(p, n, &work_headlist) {
58                 curr = list_entry(p, struct cpu_work_runtime_tag, list);
59                 kfree(curr);
60         }
61         work_headlist.prev = NULL;
62         work_headlist.next = NULL;
63
64 }
65
66
67 static int comp_list_occuptime(struct list_head *list1, struct list_head *list2)
68 {
69         struct cpu_work_runtime_tag *list1_struct, *list2_struct;
70
71         int ret = 0;
72          list1_struct = list_entry(list1, struct cpu_work_runtime_tag, list);
73          list2_struct = list_entry(list2, struct cpu_work_runtime_tag, list);
74
75         if (list1_struct->occup_time > list2_struct->occup_time)
76                 ret = 1;
77         else if (list1_struct->occup_time < list2_struct->occup_time)
78                 ret = -1;
79         else
80                 ret  = 0;
81
82         return ret;
83 }
84
85 static unsigned int view_workfn_list(char *buf, unsigned int buf_size, unsigned int ret)
86 {
87         struct list_head *p;
88         struct cpu_work_runtime_tag *curr;
89         unsigned int cnt = 0, list_num = 0;
90
91         list_for_each(p, &work_headlist) {
92                 curr = list_entry(p, struct cpu_work_runtime_tag, list);
93                 list_num++;
94         }
95
96         for (cnt = 0; cnt < list_num; cnt++) {
97                 list_for_each(p, &work_headlist) {
98                 curr = list_entry(p, struct cpu_work_runtime_tag, list);
99                         if (p->next != &work_headlist) {
100                                 if (comp_list_occuptime(p, p->next) == -1)
101                                         swap_process_list(p, p->next);
102                         }
103                 }
104         }
105
106         cnt = 1;
107         list_for_each(p, &work_headlist) {
108                 curr = list_entry(p, struct cpu_work_runtime_tag, list);
109                 if (ret < buf_size - 1) {
110                         ret +=  snprintf(buf + ret, buf_size - ret,
111                         "[%2d] %32pf(%4d) %16s %11lld[ns]\n"
112                         , cnt++, curr->func, curr->cnt ,curr->task->comm ,curr->occup_time);
113                 }
114         }
115
116         return ret;
117 }
118
119 static struct cpu_work_runtime_tag *search_exist_workfn(work_func_t func)
120 {
121         struct list_head *p;
122         struct cpu_work_runtime_tag *curr;
123
124         list_for_each(p, &work_headlist) {
125                 curr = list_entry(p, struct cpu_work_runtime_tag, list);
126                 if (curr->func == func)
127                         return curr;
128         }
129         return NULL;
130 }
131
132 static void clc_work_run_time(unsigned int cpu
133                         , unsigned int start_cnt, unsigned int end_cnt)
134 {
135         unsigned  int cnt = 0,  start_array_num;
136         unsigned int end_array_num, end_array_num_plus1;
137         unsigned int i, loop_cnt;
138         struct cpu_work_runtime_tag *work_runtime_data;
139         unsigned long long t1, t2;
140
141         start_array_num
142             = (cpu_load_freq_history_view[start_cnt].work_history_cnt[cpu] + 1)
143                 % cpu_work_history_num;
144
145         section_start_time
146                 = cpu_load_freq_history_view[start_cnt].time_stamp;
147         section_end_time
148                 = cpu_load_freq_history_view[end_cnt].time_stamp;
149
150         end_array_num
151         = cpu_load_freq_history_view[end_cnt].work_history_cnt[cpu];
152         end_array_num_plus1
153         = (cpu_load_freq_history_view[end_cnt].work_history_cnt[cpu] + 1)
154                         % cpu_work_history_num;
155
156         t1 = cpu_work_history_view[end_array_num][cpu].end_time;
157         t2 = cpu_work_history_view[end_array_num_plus1][cpu].end_time;
158
159         if (t2 < t1)
160                 end_array_num_plus1 = end_array_num;
161
162         total_time = section_end_time - section_start_time;
163
164         if (work_headlist.next != NULL)
165                 del_work_list();
166
167         INIT_LIST_HEAD(&work_headlist);
168
169         if (end_array_num_plus1 >= start_array_num)
170                 loop_cnt = end_array_num_plus1-start_array_num + 1;
171         else
172                 loop_cnt = end_array_num_plus1
173                                 + cpu_work_history_num - start_array_num + 1;
174
175         for (i = start_array_num, cnt = 0; cnt < loop_cnt; cnt++, i++) {
176                 if (i >= cpu_work_history_num)
177                         i = 0;
178
179                 work_runtime_data = search_exist_workfn(cpu_work_history_view[i][cpu].func);
180                 if (work_runtime_data == NULL)
181                         add_work_to_list(cpu, i);
182                 else {
183                         work_runtime_data->occup_time
184                                 += calc_delta_time_work(cpu, i);
185                         work_runtime_data->cnt++;
186                 }
187         }
188
189 }
190
191 static unsigned int  work_time_list_view(unsigned int cpu,
192                         unsigned int start_cnt, unsigned int end_cnt
193                         , char *buf, unsigned int buf_size, unsigned int ret)
194 {
195         unsigned  int i = 0, start_array_num, data_line, cnt=0;
196         unsigned int end_array_num, start_array_num_for_time;
197
198         start_array_num_for_time
199         = cpu_load_freq_history_view[start_cnt].work_history_cnt[cpu];
200         start_array_num
201         = (cpu_load_freq_history_view[start_cnt].work_history_cnt[cpu]+1)
202                         % cpu_work_history_num;
203         end_array_num
204                 = cpu_load_freq_history_view[end_cnt].work_history_cnt[cpu];
205
206         total_time = section_end_time - section_start_time;
207
208         if (end_cnt == start_cnt+1) {
209                 ret +=  snprintf(buf + ret, buf_size - ret,
210                         "[%d] TOTAL SECTION TIME = %lld[ns]\n[%5d]~[%5d]/(%lld ~ %lld)\n\n"
211                         , end_cnt, total_time
212                         , (cpu_load_freq_history_view[start_cnt].work_history_cnt[cpu]\
213                                                         + 1)    % cpu_task_history_num
214                         , (cpu_load_freq_history_view[end_cnt].work_history_cnt[cpu]\
215                                                         + 1)    % cpu_task_history_num
216                         , cpu_load_freq_history_view[start_cnt].time_stamp
217                         , cpu_load_freq_history_view[end_cnt].time_stamp);
218         } else {
219                 ret +=  snprintf(buf + ret, buf_size - ret,
220                         "[%d~%d] TOTAL SECTION TIME = %lld[ns]\n[%5d]~[%5d]/(%lld ~ %lld)\n\n"
221                         , get_index(start_cnt, cpu_work_history_num, 1)
222                         , end_cnt, total_time
223                         , (cpu_load_freq_history_view[start_cnt].work_history_cnt[cpu]\
224                                                         + 1)    % cpu_task_history_num
225                         , (cpu_load_freq_history_view[end_cnt].work_history_cnt[cpu]\
226                                                         + 1)    % cpu_task_history_num
227                         , cpu_load_freq_history_view[start_cnt].time_stamp
228                         , cpu_load_freq_history_view[end_cnt].time_stamp);
229         }
230
231         end_array_num = get_index(end_array_num, cpu_work_history_num, 2);
232
233         if (end_array_num >= start_array_num_for_time)
234                 data_line = end_array_num -start_array_num_for_time + 1;
235         else {
236                 data_line = (end_array_num + cpu_work_history_num) \
237                                 -start_array_num_for_time + 1;
238         }
239         cnt = start_array_num_for_time;
240
241         for (i = 0; i < data_line; i++) {
242                 u64 delta_time;
243                 char *p_name;
244                 char task_name[80] = {0,};
245                 unsigned int pid = 0;
246
247                 if (cnt > cpu_work_history_num-1)
248                         cnt = 0;
249
250                 delta_time = cpu_work_history_view[cnt][cpu].end_time \
251                                 -cpu_work_history_view[cnt][cpu].start_time;
252
253                 pid = cpu_work_history_view[cnt][cpu].pid;
254                 if (cpu_work_history_view[cnt][cpu].task->pid == pid) {
255                         p_name = cpu_work_history_view[cnt][cpu].task->comm;
256
257                 } else {
258                         if(search_killed_task(pid, task_name) < 0) {
259                                 snprintf(task_name, sizeof(task_name) \
260                                                         , "NOT found task");
261                         }
262                         p_name = task_name;
263                 }
264
265                 if (ret >= buf_size)
266                         break;
267
268                 ret +=  snprintf(buf + ret, buf_size - ret,
269                         "[%d] %32pf @ %24pf  [%16s]   %lld ~ %lld %10lld[ns] \n", cnt
270                         , cpu_work_history_view[cnt][cpu].func
271                         , cpu_work_history_view[cnt][cpu].work
272                         , p_name
273                         , cpu_work_history_view[cnt][cpu].start_time
274                         , cpu_work_history_view[cnt][cpu].end_time
275                         , delta_time );
276                 cnt++;
277         }
278
279         return ret;
280
281 }
282
283
284 u64  get_load_analyzer_time(void)
285 {
286         return cpu_clock(UINT_MAX);
287 }
288
289 void __slp_store_work_history(struct work_struct *work, work_func_t func
290                                                 , u64 start_time, u64 end_time)
291 {
292         unsigned int cnt, cpu;
293         struct task_struct *task;
294
295         if (cpu_work_history_onoff == 0)
296                 return ;
297
298         cpu = raw_smp_processor_id();
299         task = current;
300
301         if (++cpu_work_history_cnt[cpu] >= cpu_work_history_num)
302                 cpu_work_history_cnt[cpu] = 0;
303         cnt = cpu_work_history_cnt[cpu];
304
305         cpu_work_history[cnt][cpu].start_time = start_time;
306         cpu_work_history[cnt][cpu].end_time = end_time;
307 //      cpu_work_history[cnt][cpu].occup_time = end_time - start_time;
308         cpu_work_history[cnt][cpu].task = task;
309         cpu_work_history[cnt][cpu].pid = task->pid;
310         cpu_work_history[cnt][cpu].work = work;
311         cpu_work_history[cnt][cpu].func = func;
312
313
314 }
315
316
317 int check_work_valid_range(unsigned int cpu, unsigned int start_cnt
318                                                 , unsigned int end_cnt)
319 {
320         int ret = 0;
321         unsigned long long t1, t2;
322         unsigned int end_sched_cnt = 0, end_sched_cnt_margin;
323         unsigned int load_cnt, last_load_cnt, overflow = 0;
324         unsigned int cnt, search_cnt;
325         unsigned int upset = 0;
326
327         unsigned int i;
328
329         if (cpu >= CPU_NUM)
330                 return WRONG_CPU_NUM;
331
332         t1 = cpu_load_freq_history_view[start_cnt].time_stamp;
333         t2 = cpu_load_freq_history_view[end_cnt].time_stamp;
334
335         if ((t2 <= t1) || (t1 == 0) || (t2 == 0)) {
336                 pr_info("[time error] t1=%lld t2=%lld\n", t1, t2);
337                 return WRONG_TIME_STAMP;
338         }
339
340         last_load_cnt = cpu_load_freq_history_view_cnt;
341
342         cnt = cpu_load_freq_history_view[last_load_cnt].work_history_cnt[cpu];
343         t1 = cpu_task_history_view[cnt][cpu].time;
344         search_cnt = cnt;
345         for (i = 0;  i < cpu_task_history_num; i++) {
346                 search_cnt = get_index(search_cnt, cpu_task_history_num, 1);
347                 t2 = cpu_task_history_view[search_cnt][cpu].time;
348
349                 if (t2 < t1) {
350                         end_sched_cnt = search_cnt;
351                         break;
352                 }
353
354                 if (i >= cpu_task_history_num - 1)
355                         end_sched_cnt = cnt;
356         }
357
358         load_cnt = last_load_cnt;
359         for (i = 0;  i < cpu_load_history_num; i++) {
360                 unsigned int sched_cnt, sched_before_cnt;
361                 unsigned int sched_before_cnt_margin;
362
363                 sched_cnt
364                         = cpu_load_freq_history_view[load_cnt]\
365                                                 .work_history_cnt[cpu];
366                 load_cnt = get_index(load_cnt, cpu_load_history_num, -1);
367
368                 sched_before_cnt
369                         = cpu_load_freq_history_view[load_cnt]\
370                                                 .work_history_cnt[cpu];
371
372                 if (sched_before_cnt > sched_cnt)
373                         upset++;
374
375                 end_sched_cnt_margin
376                         = get_index(end_sched_cnt, cpu_work_history_num, 1);
377                 sched_before_cnt_margin
378                         = get_index(sched_before_cnt, cpu_work_history_num, -1);
379
380                 /* "end_sched_cnt -1" is needed
381                   *  because of calulating schedule time */
382                 if ((upset >= 2) || ((upset == 1)
383                         && (sched_before_cnt_margin < end_sched_cnt_margin))) {
384                         overflow = 1;
385                         pr_err("[LA] overflow cpu=%d upset=%d sched_before_cnt_margin=%d" \
386                                 "end_sched_cnt_margin=%d end_sched_cnt=%d" \
387                                 "sched_before_cnt=%d sched_cnt=%d load_cnt=%d" \
388                                 , cpu , upset, sched_before_cnt_margin
389                                 , end_sched_cnt_margin, end_sched_cnt
390                                 , sched_before_cnt, sched_cnt, load_cnt);
391                         break;
392                 }
393
394                 if (load_cnt == start_cnt)
395                         break;
396         }
397
398         if (overflow == 0)
399                 ret = 0;
400         else {
401                 ret = OVERFLOW_ERROR;
402                 pr_info("[overflow error]\n");
403         }
404         return ret;
405 }
406
407 static ssize_t check_work_read(struct file *file,
408         char __user *buffer, size_t count, loff_t *ppos)
409 {
410         static char *buf = NULL;
411         int buf_size = (PAGE_SIZE * 256);
412         unsigned int i, ret = 0, size_for_copy = count;
413         int ret_check_valid;
414         static unsigned int rest_size = 0;
415
416         unsigned int start_cnt = cpu_work_history_show_start_cnt;
417         unsigned int end_cnt = cpu_work_history_show_end_cnt;
418         unsigned long  msec_rem;
419         unsigned long long t;
420         unsigned int cpu;
421
422         if (*ppos < 0 || !count)
423                 return -EINVAL;
424
425         if (*ppos == 0) {
426                 buf = vmalloc(buf_size);
427
428                 if (!buf)
429                         return -ENOMEM;
430
431                 if ((end_cnt) == (start_cnt + 1)) {
432                         ret +=  snprintf(buf + ret, buf_size - ret
433                                 , "=======================================" \
434                                 "========================================" \
435                                 "========================================\n");
436
437                         ret +=  snprintf(buf + ret, buf_size - ret
438                                 , "    TIME       CPU0_F  CPU1_F   CPU_LOCK"
439                                 "    [INDEX]\tCPU0 \tCPU1 \tONLINE \tNR_RUN\n");
440                         ret = show_cpu_load_freq_sub((int)end_cnt+2
441                                                         , 5, buf, buf_size, ret);
442                         ret +=  snprintf(buf + ret, buf_size - ret, "\n\n");
443                 }
444
445                 for (i = 0; i < CPU_NUM ; i++) {
446                         if (cpu_work_history_show_select_cpu != -1)
447                                 if (i != cpu_work_history_show_select_cpu)
448                                         continue;
449
450                         cpu = i;
451                         ret_check_valid = check_work_valid_range(cpu, start_cnt, end_cnt);
452                         if (ret_check_valid < 0)        {
453                                 ret +=  snprintf(buf + ret, buf_size - ret
454                                         , "[ERROR] cpu[%d] Invalid range !!! err=%d\n"
455                                         , cpu, ret_check_valid);
456                                 pr_info("[ERROR] cpu[%d] Invalid range !!! err=%d\n"
457                                         , cpu, ret_check_valid);
458                                 continue;
459                         }
460
461                         clc_work_run_time(i, start_cnt, end_cnt);
462
463                         t = total_time;
464                         msec_rem = do_div(t, 1000000);
465
466                         if (end_cnt == start_cnt+1) {
467                                 ret +=  snprintf(buf + ret, buf_size - ret,
468                                 "[%d] TOTAL SECTION TIME = %ld.%ld[ms]\n\n"
469                                 , end_cnt       , (unsigned long)t, msec_rem);
470                         } else {
471                                 ret +=  snprintf(buf + ret, buf_size - ret,
472                                 "[%d~%d] TOTAL SECTION TIME = %ld.%ld[ms]\n\n"
473                                 , start_cnt+1, end_cnt, (unsigned long)t
474                                 , msec_rem);
475                         }
476
477                         ret += snprintf(buf + ret, buf_size - ret,
478                                 "######################################"
479                                 " CPU %d ###############################\n", i);
480
481                         if (cpu_work_history_show_select_cpu == -1)
482                                 ret = view_workfn_list(buf, buf_size, ret);
483                         else if (i == cpu_work_history_show_select_cpu)
484                                 ret = view_workfn_list(buf, buf_size, ret);
485
486                         if (ret < buf_size - 1)
487                                 ret +=  snprintf(buf + ret, buf_size - ret, "\n\n");
488                 }
489
490                 if (ret <= count) {
491                         size_for_copy = ret;
492                         rest_size = 0;
493                 } else {
494                         size_for_copy = count;
495                         rest_size = ret -size_for_copy;
496                 }
497         }else {
498                 if (rest_size <= count) {
499                         size_for_copy = rest_size;
500                         rest_size = 0;
501                 } else {
502                         size_for_copy = count;
503                         rest_size -= size_for_copy;
504                 }
505         }
506
507         if (size_for_copy >  0) {
508                 int offset = (int) *ppos;
509                 if (copy_to_user(buffer, buf + offset , size_for_copy)) {
510                         vfree(buf);
511                         return -EFAULT;
512                 }
513                 *ppos += size_for_copy;
514         } else
515                 vfree(buf);
516
517         return size_for_copy;
518 }
519
520 static ssize_t check_work_write(struct file *file,
521                                       const char __user *user_buf, size_t count,
522                                       loff_t *ppos)
523 {
524         set_cpu_load_freq_history_array_range(user_buf);
525
526         cpu_work_history_show_select_cpu = cpu_task_history_show_select_cpu;
527         cpu_work_history_show_start_cnt= cpu_task_history_show_start_cnt;
528         cpu_work_history_show_end_cnt = cpu_task_history_show_end_cnt;
529
530         return count;
531 }
532
533
534 static int check_work_detail_sub(char *buf, int buf_size)
535 {
536         int ret = 0, i = 0, ret_check_valid = 0;
537         unsigned int start_cnt = cpu_work_history_show_start_cnt;
538         unsigned int end_cnt = cpu_work_history_show_end_cnt;
539         unsigned int cpu;
540
541         for (i = 0; i < CPU_NUM; i++) {
542
543                 if (cpu_work_history_show_select_cpu != -1)
544                         if (i != cpu_work_history_show_select_cpu)
545                                 continue;
546                 cpu = i;
547                 ret_check_valid = check_work_valid_range(cpu, start_cnt, end_cnt);
548                 if (ret_check_valid < 0)        {
549                         ret +=  snprintf(buf + ret, buf_size - ret
550                                 , "[ERROR] cpu[%d] Invalid range !!! err=%d\n"
551                                 , cpu, ret_check_valid);
552                         pr_info("[ERROR] cpu[%d] Invalid range !!! err=%d\n"
553                                 , cpu, ret_check_valid);
554                         continue;
555                 }
556
557                 ret += snprintf(buf + ret, buf_size - ret,
558                         "###########################################"
559                         "################ CPU %d ######################"
560                         "##########################################\n", i);
561
562                 ret = work_time_list_view(i, start_cnt, end_cnt, buf, buf_size, ret);
563
564                 ret += snprintf(buf + ret, buf_size - ret ,"\n\n");
565         }
566
567         return ret;
568 }
569
570 static ssize_t check_work_detail(struct file *file,
571         char __user *buffer, size_t count, loff_t *ppos)
572 {
573         unsigned int size_for_copy;
574
575         size_for_copy = wrapper_for_debug_fs(buffer, count, ppos,\
576                                                                                                         check_work_detail_sub);
577
578         return size_for_copy;
579 }
580
581 static const struct file_operations check_work_fops = {
582         .owner = THIS_MODULE,
583         .read = check_work_read,
584         .write =check_work_write,
585 };
586
587 static const struct file_operations check_work_detail_fops = {
588         .owner = THIS_MODULE,
589         .read = check_work_detail,
590 };
591
592
593 void debugfs_workqueue(struct dentry *d)
594 {
595         if (!debugfs_create_file("check_work", 0600
596                 , d, NULL,&check_work_fops))   \
597                         pr_err("%s : debugfs_create_file, error\n", "check_work");
598         if (!debugfs_create_file("check_work_detail", 0600
599                 , d, NULL,&check_work_detail_fops))   \
600                         pr_err("%s : debugfs_create_file, error\n", "check_work_detail");
601 }
602
603