1 /* drivers/power/load_analyzer_workqueue.c */
4 static unsigned long long calc_delta_time_work(unsigned int cpu, unsigned int index)
6 unsigned long long run_start_time, run_end_time;
9 run_start_time = cpu_work_history_view[index][cpu].start_time;
10 if (run_start_time < section_start_time)
11 run_start_time = section_start_time;
13 run_end_time = cpu_work_history_view[index][cpu].end_time;
15 if (run_end_time < section_start_time)
18 if (run_end_time > section_end_time)
19 run_end_time = section_end_time;
21 return run_end_time - run_start_time;
25 static void add_work_to_list(unsigned int cpu, unsigned int index)
27 struct cpu_work_runtime_tag *new_work;
30 = kmalloc(sizeof(struct cpu_work_runtime_tag), GFP_KERNEL);
32 new_work->occup_time = calc_delta_time_work(cpu, index);
35 new_work->task = cpu_work_history_view[index][cpu].task;
36 new_work->pid = cpu_work_history_view[index][cpu].pid;
38 new_work->work = cpu_work_history_view[index][cpu].work;
39 new_work->func = cpu_work_history_view[index][cpu].func;
40 pr_info("%s %d\n", __FUNCTION__, __LINE__);
42 if (new_work->occup_time != 0) {
43 INIT_LIST_HEAD(&new_work->list);
44 list_add_tail(&new_work->list, &work_headlist);
52 static void del_work_list(void)
54 struct cpu_work_runtime_tag *curr;
55 struct list_head *p, *n;
57 list_for_each_prev_safe(p, n, &work_headlist) {
58 curr = list_entry(p, struct cpu_work_runtime_tag, list);
61 work_headlist.prev = NULL;
62 work_headlist.next = NULL;
67 static int comp_list_occuptime(struct list_head *list1, struct list_head *list2)
69 struct cpu_work_runtime_tag *list1_struct, *list2_struct;
72 list1_struct = list_entry(list1, struct cpu_work_runtime_tag, list);
73 list2_struct = list_entry(list2, struct cpu_work_runtime_tag, list);
75 if (list1_struct->occup_time > list2_struct->occup_time)
77 else if (list1_struct->occup_time < list2_struct->occup_time)
85 static unsigned int view_workfn_list(char *buf, unsigned int buf_size, unsigned int ret)
88 struct cpu_work_runtime_tag *curr;
89 unsigned int cnt = 0, list_num = 0;
91 list_for_each(p, &work_headlist) {
92 curr = list_entry(p, struct cpu_work_runtime_tag, list);
96 for (cnt = 0; cnt < list_num; cnt++) {
97 list_for_each(p, &work_headlist) {
98 curr = list_entry(p, struct cpu_work_runtime_tag, list);
99 if (p->next != &work_headlist) {
100 if (comp_list_occuptime(p, p->next) == -1)
101 swap_process_list(p, p->next);
107 list_for_each(p, &work_headlist) {
108 curr = list_entry(p, struct cpu_work_runtime_tag, list);
109 if (ret < buf_size - 1) {
110 ret += snprintf(buf + ret, buf_size - ret,
111 "[%2d] %32pf(%4d) %16s %11lld[ns]\n"
112 , cnt++, curr->func, curr->cnt ,curr->task->comm ,curr->occup_time);
119 static struct cpu_work_runtime_tag *search_exist_workfn(work_func_t func)
122 struct cpu_work_runtime_tag *curr;
124 list_for_each(p, &work_headlist) {
125 curr = list_entry(p, struct cpu_work_runtime_tag, list);
126 if (curr->func == func)
132 static void clc_work_run_time(unsigned int cpu
133 , unsigned int start_cnt, unsigned int end_cnt)
135 unsigned int cnt = 0, start_array_num;
136 unsigned int end_array_num, end_array_num_plus1;
137 unsigned int i, loop_cnt;
138 struct cpu_work_runtime_tag *work_runtime_data;
139 unsigned long long t1, t2;
142 = (cpu_load_freq_history_view[start_cnt].work_history_cnt[cpu] + 1)
143 % cpu_work_history_num;
146 = cpu_load_freq_history_view[start_cnt].time_stamp;
148 = cpu_load_freq_history_view[end_cnt].time_stamp;
151 = cpu_load_freq_history_view[end_cnt].work_history_cnt[cpu];
153 = (cpu_load_freq_history_view[end_cnt].work_history_cnt[cpu] + 1)
154 % cpu_work_history_num;
156 t1 = cpu_work_history_view[end_array_num][cpu].end_time;
157 t2 = cpu_work_history_view[end_array_num_plus1][cpu].end_time;
160 end_array_num_plus1 = end_array_num;
162 total_time = section_end_time - section_start_time;
164 if (work_headlist.next != NULL)
167 INIT_LIST_HEAD(&work_headlist);
169 if (end_array_num_plus1 >= start_array_num)
170 loop_cnt = end_array_num_plus1-start_array_num + 1;
172 loop_cnt = end_array_num_plus1
173 + cpu_work_history_num - start_array_num + 1;
175 for (i = start_array_num, cnt = 0; cnt < loop_cnt; cnt++, i++) {
176 if (i >= cpu_work_history_num)
179 work_runtime_data = search_exist_workfn(cpu_work_history_view[i][cpu].func);
180 if (work_runtime_data == NULL)
181 add_work_to_list(cpu, i);
183 work_runtime_data->occup_time
184 += calc_delta_time_work(cpu, i);
185 work_runtime_data->cnt++;
191 static unsigned int work_time_list_view(unsigned int cpu,
192 unsigned int start_cnt, unsigned int end_cnt
193 , char *buf, unsigned int buf_size, unsigned int ret)
195 unsigned int i = 0, start_array_num, data_line, cnt=0;
196 unsigned int end_array_num, start_array_num_for_time;
198 start_array_num_for_time
199 = cpu_load_freq_history_view[start_cnt].work_history_cnt[cpu];
201 = (cpu_load_freq_history_view[start_cnt].work_history_cnt[cpu]+1)
202 % cpu_work_history_num;
204 = cpu_load_freq_history_view[end_cnt].work_history_cnt[cpu];
206 total_time = section_end_time - section_start_time;
208 if (end_cnt == start_cnt+1) {
209 ret += snprintf(buf + ret, buf_size - ret,
210 "[%d] TOTAL SECTION TIME = %lld[ns]\n[%5d]~[%5d]/(%lld ~ %lld)\n\n"
211 , end_cnt, total_time
212 , (cpu_load_freq_history_view[start_cnt].work_history_cnt[cpu]\
213 + 1) % cpu_task_history_num
214 , (cpu_load_freq_history_view[end_cnt].work_history_cnt[cpu]\
215 + 1) % cpu_task_history_num
216 , cpu_load_freq_history_view[start_cnt].time_stamp
217 , cpu_load_freq_history_view[end_cnt].time_stamp);
219 ret += snprintf(buf + ret, buf_size - ret,
220 "[%d~%d] TOTAL SECTION TIME = %lld[ns]\n[%5d]~[%5d]/(%lld ~ %lld)\n\n"
221 , get_index(start_cnt, cpu_work_history_num, 1)
222 , end_cnt, total_time
223 , (cpu_load_freq_history_view[start_cnt].work_history_cnt[cpu]\
224 + 1) % cpu_task_history_num
225 , (cpu_load_freq_history_view[end_cnt].work_history_cnt[cpu]\
226 + 1) % cpu_task_history_num
227 , cpu_load_freq_history_view[start_cnt].time_stamp
228 , cpu_load_freq_history_view[end_cnt].time_stamp);
231 end_array_num = get_index(end_array_num, cpu_work_history_num, 2);
233 if (end_array_num >= start_array_num_for_time)
234 data_line = end_array_num -start_array_num_for_time + 1;
236 data_line = (end_array_num + cpu_work_history_num) \
237 -start_array_num_for_time + 1;
239 cnt = start_array_num_for_time;
241 for (i = 0; i < data_line; i++) {
244 char task_name[80] = {0,};
245 unsigned int pid = 0;
247 if (cnt > cpu_work_history_num-1)
250 delta_time = cpu_work_history_view[cnt][cpu].end_time \
251 -cpu_work_history_view[cnt][cpu].start_time;
253 pid = cpu_work_history_view[cnt][cpu].pid;
254 if (cpu_work_history_view[cnt][cpu].task->pid == pid) {
255 p_name = cpu_work_history_view[cnt][cpu].task->comm;
258 if(search_killed_task(pid, task_name) < 0) {
259 snprintf(task_name, sizeof(task_name) \
268 ret += snprintf(buf + ret, buf_size - ret,
269 "[%d] %32pf @ %24pf [%16s] %lld ~ %lld %10lld[ns] \n", cnt
270 , cpu_work_history_view[cnt][cpu].func
271 , cpu_work_history_view[cnt][cpu].work
273 , cpu_work_history_view[cnt][cpu].start_time
274 , cpu_work_history_view[cnt][cpu].end_time
284 u64 get_load_analyzer_time(void)
286 return cpu_clock(UINT_MAX);
289 void __slp_store_work_history(struct work_struct *work, work_func_t func
290 , u64 start_time, u64 end_time)
292 unsigned int cnt, cpu;
293 struct task_struct *task;
295 if (cpu_work_history_onoff == 0)
298 cpu = raw_smp_processor_id();
301 if (++cpu_work_history_cnt[cpu] >= cpu_work_history_num)
302 cpu_work_history_cnt[cpu] = 0;
303 cnt = cpu_work_history_cnt[cpu];
305 cpu_work_history[cnt][cpu].start_time = start_time;
306 cpu_work_history[cnt][cpu].end_time = end_time;
307 // cpu_work_history[cnt][cpu].occup_time = end_time - start_time;
308 cpu_work_history[cnt][cpu].task = task;
309 cpu_work_history[cnt][cpu].pid = task->pid;
310 cpu_work_history[cnt][cpu].work = work;
311 cpu_work_history[cnt][cpu].func = func;
317 int check_work_valid_range(unsigned int cpu, unsigned int start_cnt
318 , unsigned int end_cnt)
321 unsigned long long t1, t2;
322 unsigned int end_sched_cnt = 0, end_sched_cnt_margin;
323 unsigned int load_cnt, last_load_cnt, overflow = 0;
324 unsigned int cnt, search_cnt;
325 unsigned int upset = 0;
330 return WRONG_CPU_NUM;
332 t1 = cpu_load_freq_history_view[start_cnt].time_stamp;
333 t2 = cpu_load_freq_history_view[end_cnt].time_stamp;
335 if ((t2 <= t1) || (t1 == 0) || (t2 == 0)) {
336 pr_info("[time error] t1=%lld t2=%lld\n", t1, t2);
337 return WRONG_TIME_STAMP;
340 last_load_cnt = cpu_load_freq_history_view_cnt;
342 cnt = cpu_load_freq_history_view[last_load_cnt].work_history_cnt[cpu];
343 t1 = cpu_task_history_view[cnt][cpu].time;
345 for (i = 0; i < cpu_task_history_num; i++) {
346 search_cnt = get_index(search_cnt, cpu_task_history_num, 1);
347 t2 = cpu_task_history_view[search_cnt][cpu].time;
350 end_sched_cnt = search_cnt;
354 if (i >= cpu_task_history_num - 1)
358 load_cnt = last_load_cnt;
359 for (i = 0; i < cpu_load_history_num; i++) {
360 unsigned int sched_cnt, sched_before_cnt;
361 unsigned int sched_before_cnt_margin;
364 = cpu_load_freq_history_view[load_cnt]\
365 .work_history_cnt[cpu];
366 load_cnt = get_index(load_cnt, cpu_load_history_num, -1);
369 = cpu_load_freq_history_view[load_cnt]\
370 .work_history_cnt[cpu];
372 if (sched_before_cnt > sched_cnt)
376 = get_index(end_sched_cnt, cpu_work_history_num, 1);
377 sched_before_cnt_margin
378 = get_index(sched_before_cnt, cpu_work_history_num, -1);
380 /* "end_sched_cnt -1" is needed
381 * because of calulating schedule time */
382 if ((upset >= 2) || ((upset == 1)
383 && (sched_before_cnt_margin < end_sched_cnt_margin))) {
385 pr_err("[LA] overflow cpu=%d upset=%d sched_before_cnt_margin=%d" \
386 "end_sched_cnt_margin=%d end_sched_cnt=%d" \
387 "sched_before_cnt=%d sched_cnt=%d load_cnt=%d" \
388 , cpu , upset, sched_before_cnt_margin
389 , end_sched_cnt_margin, end_sched_cnt
390 , sched_before_cnt, sched_cnt, load_cnt);
394 if (load_cnt == start_cnt)
401 ret = OVERFLOW_ERROR;
402 pr_info("[overflow error]\n");
407 static ssize_t check_work_read(struct file *file,
408 char __user *buffer, size_t count, loff_t *ppos)
410 static char *buf = NULL;
411 int buf_size = (PAGE_SIZE * 256);
412 unsigned int i, ret = 0, size_for_copy = count;
414 static unsigned int rest_size = 0;
416 unsigned int start_cnt = cpu_work_history_show_start_cnt;
417 unsigned int end_cnt = cpu_work_history_show_end_cnt;
418 unsigned long msec_rem;
419 unsigned long long t;
422 if (*ppos < 0 || !count)
426 buf = vmalloc(buf_size);
431 if ((end_cnt) == (start_cnt + 1)) {
432 ret += snprintf(buf + ret, buf_size - ret
433 , "=======================================" \
434 "========================================" \
435 "========================================\n");
437 ret += snprintf(buf + ret, buf_size - ret
438 , " TIME CPU0_F CPU1_F CPU_LOCK"
439 " [INDEX]\tCPU0 \tCPU1 \tONLINE \tNR_RUN\n");
440 ret = show_cpu_load_freq_sub((int)end_cnt+2
441 , 5, buf, buf_size, ret);
442 ret += snprintf(buf + ret, buf_size - ret, "\n\n");
445 for (i = 0; i < CPU_NUM ; i++) {
446 if (cpu_work_history_show_select_cpu != -1)
447 if (i != cpu_work_history_show_select_cpu)
451 ret_check_valid = check_work_valid_range(cpu, start_cnt, end_cnt);
452 if (ret_check_valid < 0) {
453 ret += snprintf(buf + ret, buf_size - ret
454 , "[ERROR] cpu[%d] Invalid range !!! err=%d\n"
455 , cpu, ret_check_valid);
456 pr_info("[ERROR] cpu[%d] Invalid range !!! err=%d\n"
457 , cpu, ret_check_valid);
461 clc_work_run_time(i, start_cnt, end_cnt);
464 msec_rem = do_div(t, 1000000);
466 if (end_cnt == start_cnt+1) {
467 ret += snprintf(buf + ret, buf_size - ret,
468 "[%d] TOTAL SECTION TIME = %ld.%ld[ms]\n\n"
469 , end_cnt , (unsigned long)t, msec_rem);
471 ret += snprintf(buf + ret, buf_size - ret,
472 "[%d~%d] TOTAL SECTION TIME = %ld.%ld[ms]\n\n"
473 , start_cnt+1, end_cnt, (unsigned long)t
477 ret += snprintf(buf + ret, buf_size - ret,
478 "######################################"
479 " CPU %d ###############################\n", i);
481 if (cpu_work_history_show_select_cpu == -1)
482 ret = view_workfn_list(buf, buf_size, ret);
483 else if (i == cpu_work_history_show_select_cpu)
484 ret = view_workfn_list(buf, buf_size, ret);
486 if (ret < buf_size - 1)
487 ret += snprintf(buf + ret, buf_size - ret, "\n\n");
494 size_for_copy = count;
495 rest_size = ret -size_for_copy;
498 if (rest_size <= count) {
499 size_for_copy = rest_size;
502 size_for_copy = count;
503 rest_size -= size_for_copy;
507 if (size_for_copy > 0) {
508 int offset = (int) *ppos;
509 if (copy_to_user(buffer, buf + offset , size_for_copy)) {
513 *ppos += size_for_copy;
517 return size_for_copy;
520 static ssize_t check_work_write(struct file *file,
521 const char __user *user_buf, size_t count,
524 set_cpu_load_freq_history_array_range(user_buf);
526 cpu_work_history_show_select_cpu = cpu_task_history_show_select_cpu;
527 cpu_work_history_show_start_cnt= cpu_task_history_show_start_cnt;
528 cpu_work_history_show_end_cnt = cpu_task_history_show_end_cnt;
534 static int check_work_detail_sub(char *buf, int buf_size)
536 int ret = 0, i = 0, ret_check_valid = 0;
537 unsigned int start_cnt = cpu_work_history_show_start_cnt;
538 unsigned int end_cnt = cpu_work_history_show_end_cnt;
541 for (i = 0; i < CPU_NUM; i++) {
543 if (cpu_work_history_show_select_cpu != -1)
544 if (i != cpu_work_history_show_select_cpu)
547 ret_check_valid = check_work_valid_range(cpu, start_cnt, end_cnt);
548 if (ret_check_valid < 0) {
549 ret += snprintf(buf + ret, buf_size - ret
550 , "[ERROR] cpu[%d] Invalid range !!! err=%d\n"
551 , cpu, ret_check_valid);
552 pr_info("[ERROR] cpu[%d] Invalid range !!! err=%d\n"
553 , cpu, ret_check_valid);
557 ret += snprintf(buf + ret, buf_size - ret,
558 "###########################################"
559 "################ CPU %d ######################"
560 "##########################################\n", i);
562 ret = work_time_list_view(i, start_cnt, end_cnt, buf, buf_size, ret);
564 ret += snprintf(buf + ret, buf_size - ret ,"\n\n");
570 static ssize_t check_work_detail(struct file *file,
571 char __user *buffer, size_t count, loff_t *ppos)
573 unsigned int size_for_copy;
575 size_for_copy = wrapper_for_debug_fs(buffer, count, ppos,\
576 check_work_detail_sub);
578 return size_for_copy;
581 static const struct file_operations check_work_fops = {
582 .owner = THIS_MODULE,
583 .read = check_work_read,
584 .write =check_work_write,
587 static const struct file_operations check_work_detail_fops = {
588 .owner = THIS_MODULE,
589 .read = check_work_detail,
593 void debugfs_workqueue(struct dentry *d)
595 if (!debugfs_create_file("check_work", 0600
596 , d, NULL,&check_work_fops)) \
597 pr_err("%s : debugfs_create_file, error\n", "check_work");
598 if (!debugfs_create_file("check_work_detail", 0600
599 , d, NULL,&check_work_detail_fops)) \
600 pr_err("%s : debugfs_create_file, error\n", "check_work_detail");