perf_counter: Add fork event
[platform/kernel/linux-starfive.git] / kernel / perf_counter.c
1 /*
2  * Performance counter core code
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8  *
9  *  For licensing details see kernel-base/COPYING
10  */
11
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/sysfs.h>
19 #include <linux/dcache.h>
20 #include <linux/percpu.h>
21 #include <linux/ptrace.h>
22 #include <linux/vmstat.h>
23 #include <linux/hardirq.h>
24 #include <linux/rculist.h>
25 #include <linux/uaccess.h>
26 #include <linux/syscalls.h>
27 #include <linux/anon_inodes.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/perf_counter.h>
30
31 #include <asm/irq_regs.h>
32
33 /*
34  * Each CPU has a list of per CPU counters:
35  */
36 DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
37
38 int perf_max_counters __read_mostly = 1;
39 static int perf_reserved_percpu __read_mostly;
40 static int perf_overcommit __read_mostly = 1;
41
42 static atomic_t nr_counters __read_mostly;
43 static atomic_t nr_mmap_counters __read_mostly;
44 static atomic_t nr_munmap_counters __read_mostly;
45 static atomic_t nr_comm_counters __read_mostly;
46
47 int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
48 int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
49 int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */
50
51 static atomic64_t perf_counter_id;
52
53 /*
54  * Lock for (sysadmin-configurable) counter reservations:
55  */
56 static DEFINE_SPINLOCK(perf_resource_lock);
57
58 /*
59  * Architecture provided APIs - weak aliases:
60  */
61 extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
62 {
63         return NULL;
64 }
65
66 void __weak hw_perf_disable(void)               { barrier(); }
67 void __weak hw_perf_enable(void)                { barrier(); }
68
69 void __weak hw_perf_counter_setup(int cpu)      { barrier(); }
70
71 int __weak
72 hw_perf_group_sched_in(struct perf_counter *group_leader,
73                struct perf_cpu_context *cpuctx,
74                struct perf_counter_context *ctx, int cpu)
75 {
76         return 0;
77 }
78
79 void __weak perf_counter_print_debug(void)      { }
80
81 static DEFINE_PER_CPU(int, disable_count);
82
83 void __perf_disable(void)
84 {
85         __get_cpu_var(disable_count)++;
86 }
87
88 bool __perf_enable(void)
89 {
90         return !--__get_cpu_var(disable_count);
91 }
92
93 void perf_disable(void)
94 {
95         __perf_disable();
96         hw_perf_disable();
97 }
98
99 void perf_enable(void)
100 {
101         if (__perf_enable())
102                 hw_perf_enable();
103 }
104
105 static void get_ctx(struct perf_counter_context *ctx)
106 {
107         atomic_inc(&ctx->refcount);
108 }
109
110 static void free_ctx(struct rcu_head *head)
111 {
112         struct perf_counter_context *ctx;
113
114         ctx = container_of(head, struct perf_counter_context, rcu_head);
115         kfree(ctx);
116 }
117
118 static void put_ctx(struct perf_counter_context *ctx)
119 {
120         if (atomic_dec_and_test(&ctx->refcount)) {
121                 if (ctx->parent_ctx)
122                         put_ctx(ctx->parent_ctx);
123                 if (ctx->task)
124                         put_task_struct(ctx->task);
125                 call_rcu(&ctx->rcu_head, free_ctx);
126         }
127 }
128
129 /*
130  * Get the perf_counter_context for a task and lock it.
131  * This has to cope with with the fact that until it is locked,
132  * the context could get moved to another task.
133  */
134 static struct perf_counter_context *
135 perf_lock_task_context(struct task_struct *task, unsigned long *flags)
136 {
137         struct perf_counter_context *ctx;
138
139         rcu_read_lock();
140  retry:
141         ctx = rcu_dereference(task->perf_counter_ctxp);
142         if (ctx) {
143                 /*
144                  * If this context is a clone of another, it might
145                  * get swapped for another underneath us by
146                  * perf_counter_task_sched_out, though the
147                  * rcu_read_lock() protects us from any context
148                  * getting freed.  Lock the context and check if it
149                  * got swapped before we could get the lock, and retry
150                  * if so.  If we locked the right context, then it
151                  * can't get swapped on us any more.
152                  */
153                 spin_lock_irqsave(&ctx->lock, *flags);
154                 if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
155                         spin_unlock_irqrestore(&ctx->lock, *flags);
156                         goto retry;
157                 }
158         }
159         rcu_read_unlock();
160         return ctx;
161 }
162
163 /*
164  * Get the context for a task and increment its pin_count so it
165  * can't get swapped to another task.  This also increments its
166  * reference count so that the context can't get freed.
167  */
168 static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
169 {
170         struct perf_counter_context *ctx;
171         unsigned long flags;
172
173         ctx = perf_lock_task_context(task, &flags);
174         if (ctx) {
175                 ++ctx->pin_count;
176                 get_ctx(ctx);
177                 spin_unlock_irqrestore(&ctx->lock, flags);
178         }
179         return ctx;
180 }
181
182 static void perf_unpin_context(struct perf_counter_context *ctx)
183 {
184         unsigned long flags;
185
186         spin_lock_irqsave(&ctx->lock, flags);
187         --ctx->pin_count;
188         spin_unlock_irqrestore(&ctx->lock, flags);
189         put_ctx(ctx);
190 }
191
192 /*
193  * Add a counter from the lists for its context.
194  * Must be called with ctx->mutex and ctx->lock held.
195  */
196 static void
197 list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
198 {
199         struct perf_counter *group_leader = counter->group_leader;
200
201         /*
202          * Depending on whether it is a standalone or sibling counter,
203          * add it straight to the context's counter list, or to the group
204          * leader's sibling list:
205          */
206         if (group_leader == counter)
207                 list_add_tail(&counter->list_entry, &ctx->counter_list);
208         else {
209                 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
210                 group_leader->nr_siblings++;
211         }
212
213         list_add_rcu(&counter->event_entry, &ctx->event_list);
214         ctx->nr_counters++;
215 }
216
217 /*
218  * Remove a counter from the lists for its context.
219  * Must be called with ctx->mutex and ctx->lock held.
220  */
221 static void
222 list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
223 {
224         struct perf_counter *sibling, *tmp;
225
226         if (list_empty(&counter->list_entry))
227                 return;
228         ctx->nr_counters--;
229
230         list_del_init(&counter->list_entry);
231         list_del_rcu(&counter->event_entry);
232
233         if (counter->group_leader != counter)
234                 counter->group_leader->nr_siblings--;
235
236         /*
237          * If this was a group counter with sibling counters then
238          * upgrade the siblings to singleton counters by adding them
239          * to the context list directly:
240          */
241         list_for_each_entry_safe(sibling, tmp,
242                                  &counter->sibling_list, list_entry) {
243
244                 list_move_tail(&sibling->list_entry, &ctx->counter_list);
245                 sibling->group_leader = sibling;
246         }
247 }
248
249 static void
250 counter_sched_out(struct perf_counter *counter,
251                   struct perf_cpu_context *cpuctx,
252                   struct perf_counter_context *ctx)
253 {
254         if (counter->state != PERF_COUNTER_STATE_ACTIVE)
255                 return;
256
257         counter->state = PERF_COUNTER_STATE_INACTIVE;
258         counter->tstamp_stopped = ctx->time;
259         counter->pmu->disable(counter);
260         counter->oncpu = -1;
261
262         if (!is_software_counter(counter))
263                 cpuctx->active_oncpu--;
264         ctx->nr_active--;
265         if (counter->attr.exclusive || !cpuctx->active_oncpu)
266                 cpuctx->exclusive = 0;
267 }
268
269 static void
270 group_sched_out(struct perf_counter *group_counter,
271                 struct perf_cpu_context *cpuctx,
272                 struct perf_counter_context *ctx)
273 {
274         struct perf_counter *counter;
275
276         if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
277                 return;
278
279         counter_sched_out(group_counter, cpuctx, ctx);
280
281         /*
282          * Schedule out siblings (if any):
283          */
284         list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
285                 counter_sched_out(counter, cpuctx, ctx);
286
287         if (group_counter->attr.exclusive)
288                 cpuctx->exclusive = 0;
289 }
290
291 /*
292  * Cross CPU call to remove a performance counter
293  *
294  * We disable the counter on the hardware level first. After that we
295  * remove it from the context list.
296  */
297 static void __perf_counter_remove_from_context(void *info)
298 {
299         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
300         struct perf_counter *counter = info;
301         struct perf_counter_context *ctx = counter->ctx;
302
303         /*
304          * If this is a task context, we need to check whether it is
305          * the current task context of this cpu. If not it has been
306          * scheduled out before the smp call arrived.
307          */
308         if (ctx->task && cpuctx->task_ctx != ctx)
309                 return;
310
311         spin_lock(&ctx->lock);
312         /*
313          * Protect the list operation against NMI by disabling the
314          * counters on a global level.
315          */
316         perf_disable();
317
318         counter_sched_out(counter, cpuctx, ctx);
319
320         list_del_counter(counter, ctx);
321
322         if (!ctx->task) {
323                 /*
324                  * Allow more per task counters with respect to the
325                  * reservation:
326                  */
327                 cpuctx->max_pertask =
328                         min(perf_max_counters - ctx->nr_counters,
329                             perf_max_counters - perf_reserved_percpu);
330         }
331
332         perf_enable();
333         spin_unlock(&ctx->lock);
334 }
335
336
337 /*
338  * Remove the counter from a task's (or a CPU's) list of counters.
339  *
340  * Must be called with ctx->mutex held.
341  *
342  * CPU counters are removed with a smp call. For task counters we only
343  * call when the task is on a CPU.
344  *
345  * If counter->ctx is a cloned context, callers must make sure that
346  * every task struct that counter->ctx->task could possibly point to
347  * remains valid.  This is OK when called from perf_release since
348  * that only calls us on the top-level context, which can't be a clone.
349  * When called from perf_counter_exit_task, it's OK because the
350  * context has been detached from its task.
351  */
352 static void perf_counter_remove_from_context(struct perf_counter *counter)
353 {
354         struct perf_counter_context *ctx = counter->ctx;
355         struct task_struct *task = ctx->task;
356
357         if (!task) {
358                 /*
359                  * Per cpu counters are removed via an smp call and
360                  * the removal is always sucessful.
361                  */
362                 smp_call_function_single(counter->cpu,
363                                          __perf_counter_remove_from_context,
364                                          counter, 1);
365                 return;
366         }
367
368 retry:
369         task_oncpu_function_call(task, __perf_counter_remove_from_context,
370                                  counter);
371
372         spin_lock_irq(&ctx->lock);
373         /*
374          * If the context is active we need to retry the smp call.
375          */
376         if (ctx->nr_active && !list_empty(&counter->list_entry)) {
377                 spin_unlock_irq(&ctx->lock);
378                 goto retry;
379         }
380
381         /*
382          * The lock prevents that this context is scheduled in so we
383          * can remove the counter safely, if the call above did not
384          * succeed.
385          */
386         if (!list_empty(&counter->list_entry)) {
387                 list_del_counter(counter, ctx);
388         }
389         spin_unlock_irq(&ctx->lock);
390 }
391
392 static inline u64 perf_clock(void)
393 {
394         return cpu_clock(smp_processor_id());
395 }
396
397 /*
398  * Update the record of the current time in a context.
399  */
400 static void update_context_time(struct perf_counter_context *ctx)
401 {
402         u64 now = perf_clock();
403
404         ctx->time += now - ctx->timestamp;
405         ctx->timestamp = now;
406 }
407
408 /*
409  * Update the total_time_enabled and total_time_running fields for a counter.
410  */
411 static void update_counter_times(struct perf_counter *counter)
412 {
413         struct perf_counter_context *ctx = counter->ctx;
414         u64 run_end;
415
416         if (counter->state < PERF_COUNTER_STATE_INACTIVE)
417                 return;
418
419         counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
420
421         if (counter->state == PERF_COUNTER_STATE_INACTIVE)
422                 run_end = counter->tstamp_stopped;
423         else
424                 run_end = ctx->time;
425
426         counter->total_time_running = run_end - counter->tstamp_running;
427 }
428
429 /*
430  * Update total_time_enabled and total_time_running for all counters in a group.
431  */
432 static void update_group_times(struct perf_counter *leader)
433 {
434         struct perf_counter *counter;
435
436         update_counter_times(leader);
437         list_for_each_entry(counter, &leader->sibling_list, list_entry)
438                 update_counter_times(counter);
439 }
440
441 /*
442  * Cross CPU call to disable a performance counter
443  */
444 static void __perf_counter_disable(void *info)
445 {
446         struct perf_counter *counter = info;
447         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
448         struct perf_counter_context *ctx = counter->ctx;
449
450         /*
451          * If this is a per-task counter, need to check whether this
452          * counter's task is the current task on this cpu.
453          */
454         if (ctx->task && cpuctx->task_ctx != ctx)
455                 return;
456
457         spin_lock(&ctx->lock);
458
459         /*
460          * If the counter is on, turn it off.
461          * If it is in error state, leave it in error state.
462          */
463         if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
464                 update_context_time(ctx);
465                 update_counter_times(counter);
466                 if (counter == counter->group_leader)
467                         group_sched_out(counter, cpuctx, ctx);
468                 else
469                         counter_sched_out(counter, cpuctx, ctx);
470                 counter->state = PERF_COUNTER_STATE_OFF;
471         }
472
473         spin_unlock(&ctx->lock);
474 }
475
476 /*
477  * Disable a counter.
478  *
479  * If counter->ctx is a cloned context, callers must make sure that
480  * every task struct that counter->ctx->task could possibly point to
481  * remains valid.  This condition is satisifed when called through
482  * perf_counter_for_each_child or perf_counter_for_each because they
483  * hold the top-level counter's child_mutex, so any descendant that
484  * goes to exit will block in sync_child_counter.
485  * When called from perf_pending_counter it's OK because counter->ctx
486  * is the current context on this CPU and preemption is disabled,
487  * hence we can't get into perf_counter_task_sched_out for this context.
488  */
489 static void perf_counter_disable(struct perf_counter *counter)
490 {
491         struct perf_counter_context *ctx = counter->ctx;
492         struct task_struct *task = ctx->task;
493
494         if (!task) {
495                 /*
496                  * Disable the counter on the cpu that it's on
497                  */
498                 smp_call_function_single(counter->cpu, __perf_counter_disable,
499                                          counter, 1);
500                 return;
501         }
502
503  retry:
504         task_oncpu_function_call(task, __perf_counter_disable, counter);
505
506         spin_lock_irq(&ctx->lock);
507         /*
508          * If the counter is still active, we need to retry the cross-call.
509          */
510         if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
511                 spin_unlock_irq(&ctx->lock);
512                 goto retry;
513         }
514
515         /*
516          * Since we have the lock this context can't be scheduled
517          * in, so we can change the state safely.
518          */
519         if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
520                 update_counter_times(counter);
521                 counter->state = PERF_COUNTER_STATE_OFF;
522         }
523
524         spin_unlock_irq(&ctx->lock);
525 }
526
527 static int
528 counter_sched_in(struct perf_counter *counter,
529                  struct perf_cpu_context *cpuctx,
530                  struct perf_counter_context *ctx,
531                  int cpu)
532 {
533         if (counter->state <= PERF_COUNTER_STATE_OFF)
534                 return 0;
535
536         counter->state = PERF_COUNTER_STATE_ACTIVE;
537         counter->oncpu = cpu;   /* TODO: put 'cpu' into cpuctx->cpu */
538         /*
539          * The new state must be visible before we turn it on in the hardware:
540          */
541         smp_wmb();
542
543         if (counter->pmu->enable(counter)) {
544                 counter->state = PERF_COUNTER_STATE_INACTIVE;
545                 counter->oncpu = -1;
546                 return -EAGAIN;
547         }
548
549         counter->tstamp_running += ctx->time - counter->tstamp_stopped;
550
551         if (!is_software_counter(counter))
552                 cpuctx->active_oncpu++;
553         ctx->nr_active++;
554
555         if (counter->attr.exclusive)
556                 cpuctx->exclusive = 1;
557
558         return 0;
559 }
560
561 static int
562 group_sched_in(struct perf_counter *group_counter,
563                struct perf_cpu_context *cpuctx,
564                struct perf_counter_context *ctx,
565                int cpu)
566 {
567         struct perf_counter *counter, *partial_group;
568         int ret;
569
570         if (group_counter->state == PERF_COUNTER_STATE_OFF)
571                 return 0;
572
573         ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
574         if (ret)
575                 return ret < 0 ? ret : 0;
576
577         if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
578                 return -EAGAIN;
579
580         /*
581          * Schedule in siblings as one group (if any):
582          */
583         list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
584                 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
585                         partial_group = counter;
586                         goto group_error;
587                 }
588         }
589
590         return 0;
591
592 group_error:
593         /*
594          * Groups can be scheduled in as one unit only, so undo any
595          * partial group before returning:
596          */
597         list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
598                 if (counter == partial_group)
599                         break;
600                 counter_sched_out(counter, cpuctx, ctx);
601         }
602         counter_sched_out(group_counter, cpuctx, ctx);
603
604         return -EAGAIN;
605 }
606
607 /*
608  * Return 1 for a group consisting entirely of software counters,
609  * 0 if the group contains any hardware counters.
610  */
611 static int is_software_only_group(struct perf_counter *leader)
612 {
613         struct perf_counter *counter;
614
615         if (!is_software_counter(leader))
616                 return 0;
617
618         list_for_each_entry(counter, &leader->sibling_list, list_entry)
619                 if (!is_software_counter(counter))
620                         return 0;
621
622         return 1;
623 }
624
625 /*
626  * Work out whether we can put this counter group on the CPU now.
627  */
628 static int group_can_go_on(struct perf_counter *counter,
629                            struct perf_cpu_context *cpuctx,
630                            int can_add_hw)
631 {
632         /*
633          * Groups consisting entirely of software counters can always go on.
634          */
635         if (is_software_only_group(counter))
636                 return 1;
637         /*
638          * If an exclusive group is already on, no other hardware
639          * counters can go on.
640          */
641         if (cpuctx->exclusive)
642                 return 0;
643         /*
644          * If this group is exclusive and there are already
645          * counters on the CPU, it can't go on.
646          */
647         if (counter->attr.exclusive && cpuctx->active_oncpu)
648                 return 0;
649         /*
650          * Otherwise, try to add it if all previous groups were able
651          * to go on.
652          */
653         return can_add_hw;
654 }
655
656 static void add_counter_to_ctx(struct perf_counter *counter,
657                                struct perf_counter_context *ctx)
658 {
659         list_add_counter(counter, ctx);
660         counter->tstamp_enabled = ctx->time;
661         counter->tstamp_running = ctx->time;
662         counter->tstamp_stopped = ctx->time;
663 }
664
665 /*
666  * Cross CPU call to install and enable a performance counter
667  *
668  * Must be called with ctx->mutex held
669  */
670 static void __perf_install_in_context(void *info)
671 {
672         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
673         struct perf_counter *counter = info;
674         struct perf_counter_context *ctx = counter->ctx;
675         struct perf_counter *leader = counter->group_leader;
676         int cpu = smp_processor_id();
677         int err;
678
679         /*
680          * If this is a task context, we need to check whether it is
681          * the current task context of this cpu. If not it has been
682          * scheduled out before the smp call arrived.
683          * Or possibly this is the right context but it isn't
684          * on this cpu because it had no counters.
685          */
686         if (ctx->task && cpuctx->task_ctx != ctx) {
687                 if (cpuctx->task_ctx || ctx->task != current)
688                         return;
689                 cpuctx->task_ctx = ctx;
690         }
691
692         spin_lock(&ctx->lock);
693         ctx->is_active = 1;
694         update_context_time(ctx);
695
696         /*
697          * Protect the list operation against NMI by disabling the
698          * counters on a global level. NOP for non NMI based counters.
699          */
700         perf_disable();
701
702         add_counter_to_ctx(counter, ctx);
703
704         /*
705          * Don't put the counter on if it is disabled or if
706          * it is in a group and the group isn't on.
707          */
708         if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
709             (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
710                 goto unlock;
711
712         /*
713          * An exclusive counter can't go on if there are already active
714          * hardware counters, and no hardware counter can go on if there
715          * is already an exclusive counter on.
716          */
717         if (!group_can_go_on(counter, cpuctx, 1))
718                 err = -EEXIST;
719         else
720                 err = counter_sched_in(counter, cpuctx, ctx, cpu);
721
722         if (err) {
723                 /*
724                  * This counter couldn't go on.  If it is in a group
725                  * then we have to pull the whole group off.
726                  * If the counter group is pinned then put it in error state.
727                  */
728                 if (leader != counter)
729                         group_sched_out(leader, cpuctx, ctx);
730                 if (leader->attr.pinned) {
731                         update_group_times(leader);
732                         leader->state = PERF_COUNTER_STATE_ERROR;
733                 }
734         }
735
736         if (!err && !ctx->task && cpuctx->max_pertask)
737                 cpuctx->max_pertask--;
738
739  unlock:
740         perf_enable();
741
742         spin_unlock(&ctx->lock);
743 }
744
745 /*
746  * Attach a performance counter to a context
747  *
748  * First we add the counter to the list with the hardware enable bit
749  * in counter->hw_config cleared.
750  *
751  * If the counter is attached to a task which is on a CPU we use a smp
752  * call to enable it in the task context. The task might have been
753  * scheduled away, but we check this in the smp call again.
754  *
755  * Must be called with ctx->mutex held.
756  */
757 static void
758 perf_install_in_context(struct perf_counter_context *ctx,
759                         struct perf_counter *counter,
760                         int cpu)
761 {
762         struct task_struct *task = ctx->task;
763
764         if (!task) {
765                 /*
766                  * Per cpu counters are installed via an smp call and
767                  * the install is always sucessful.
768                  */
769                 smp_call_function_single(cpu, __perf_install_in_context,
770                                          counter, 1);
771                 return;
772         }
773
774 retry:
775         task_oncpu_function_call(task, __perf_install_in_context,
776                                  counter);
777
778         spin_lock_irq(&ctx->lock);
779         /*
780          * we need to retry the smp call.
781          */
782         if (ctx->is_active && list_empty(&counter->list_entry)) {
783                 spin_unlock_irq(&ctx->lock);
784                 goto retry;
785         }
786
787         /*
788          * The lock prevents that this context is scheduled in so we
789          * can add the counter safely, if it the call above did not
790          * succeed.
791          */
792         if (list_empty(&counter->list_entry))
793                 add_counter_to_ctx(counter, ctx);
794         spin_unlock_irq(&ctx->lock);
795 }
796
797 /*
798  * Cross CPU call to enable a performance counter
799  */
800 static void __perf_counter_enable(void *info)
801 {
802         struct perf_counter *counter = info;
803         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
804         struct perf_counter_context *ctx = counter->ctx;
805         struct perf_counter *leader = counter->group_leader;
806         int err;
807
808         /*
809          * If this is a per-task counter, need to check whether this
810          * counter's task is the current task on this cpu.
811          */
812         if (ctx->task && cpuctx->task_ctx != ctx) {
813                 if (cpuctx->task_ctx || ctx->task != current)
814                         return;
815                 cpuctx->task_ctx = ctx;
816         }
817
818         spin_lock(&ctx->lock);
819         ctx->is_active = 1;
820         update_context_time(ctx);
821
822         if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
823                 goto unlock;
824         counter->state = PERF_COUNTER_STATE_INACTIVE;
825         counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
826
827         /*
828          * If the counter is in a group and isn't the group leader,
829          * then don't put it on unless the group is on.
830          */
831         if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
832                 goto unlock;
833
834         if (!group_can_go_on(counter, cpuctx, 1)) {
835                 err = -EEXIST;
836         } else {
837                 perf_disable();
838                 if (counter == leader)
839                         err = group_sched_in(counter, cpuctx, ctx,
840                                              smp_processor_id());
841                 else
842                         err = counter_sched_in(counter, cpuctx, ctx,
843                                                smp_processor_id());
844                 perf_enable();
845         }
846
847         if (err) {
848                 /*
849                  * If this counter can't go on and it's part of a
850                  * group, then the whole group has to come off.
851                  */
852                 if (leader != counter)
853                         group_sched_out(leader, cpuctx, ctx);
854                 if (leader->attr.pinned) {
855                         update_group_times(leader);
856                         leader->state = PERF_COUNTER_STATE_ERROR;
857                 }
858         }
859
860  unlock:
861         spin_unlock(&ctx->lock);
862 }
863
864 /*
865  * Enable a counter.
866  *
867  * If counter->ctx is a cloned context, callers must make sure that
868  * every task struct that counter->ctx->task could possibly point to
869  * remains valid.  This condition is satisfied when called through
870  * perf_counter_for_each_child or perf_counter_for_each as described
871  * for perf_counter_disable.
872  */
873 static void perf_counter_enable(struct perf_counter *counter)
874 {
875         struct perf_counter_context *ctx = counter->ctx;
876         struct task_struct *task = ctx->task;
877
878         if (!task) {
879                 /*
880                  * Enable the counter on the cpu that it's on
881                  */
882                 smp_call_function_single(counter->cpu, __perf_counter_enable,
883                                          counter, 1);
884                 return;
885         }
886
887         spin_lock_irq(&ctx->lock);
888         if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
889                 goto out;
890
891         /*
892          * If the counter is in error state, clear that first.
893          * That way, if we see the counter in error state below, we
894          * know that it has gone back into error state, as distinct
895          * from the task having been scheduled away before the
896          * cross-call arrived.
897          */
898         if (counter->state == PERF_COUNTER_STATE_ERROR)
899                 counter->state = PERF_COUNTER_STATE_OFF;
900
901  retry:
902         spin_unlock_irq(&ctx->lock);
903         task_oncpu_function_call(task, __perf_counter_enable, counter);
904
905         spin_lock_irq(&ctx->lock);
906
907         /*
908          * If the context is active and the counter is still off,
909          * we need to retry the cross-call.
910          */
911         if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
912                 goto retry;
913
914         /*
915          * Since we have the lock this context can't be scheduled
916          * in, so we can change the state safely.
917          */
918         if (counter->state == PERF_COUNTER_STATE_OFF) {
919                 counter->state = PERF_COUNTER_STATE_INACTIVE;
920                 counter->tstamp_enabled =
921                         ctx->time - counter->total_time_enabled;
922         }
923  out:
924         spin_unlock_irq(&ctx->lock);
925 }
926
927 static int perf_counter_refresh(struct perf_counter *counter, int refresh)
928 {
929         /*
930          * not supported on inherited counters
931          */
932         if (counter->attr.inherit)
933                 return -EINVAL;
934
935         atomic_add(refresh, &counter->event_limit);
936         perf_counter_enable(counter);
937
938         return 0;
939 }
940
941 void __perf_counter_sched_out(struct perf_counter_context *ctx,
942                               struct perf_cpu_context *cpuctx)
943 {
944         struct perf_counter *counter;
945
946         spin_lock(&ctx->lock);
947         ctx->is_active = 0;
948         if (likely(!ctx->nr_counters))
949                 goto out;
950         update_context_time(ctx);
951
952         perf_disable();
953         if (ctx->nr_active) {
954                 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
955                         if (counter != counter->group_leader)
956                                 counter_sched_out(counter, cpuctx, ctx);
957                         else
958                                 group_sched_out(counter, cpuctx, ctx);
959                 }
960         }
961         perf_enable();
962  out:
963         spin_unlock(&ctx->lock);
964 }
965
966 /*
967  * Test whether two contexts are equivalent, i.e. whether they
968  * have both been cloned from the same version of the same context
969  * and they both have the same number of enabled counters.
970  * If the number of enabled counters is the same, then the set
971  * of enabled counters should be the same, because these are both
972  * inherited contexts, therefore we can't access individual counters
973  * in them directly with an fd; we can only enable/disable all
974  * counters via prctl, or enable/disable all counters in a family
975  * via ioctl, which will have the same effect on both contexts.
976  */
977 static int context_equiv(struct perf_counter_context *ctx1,
978                          struct perf_counter_context *ctx2)
979 {
980         return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
981                 && ctx1->parent_gen == ctx2->parent_gen
982                 && !ctx1->pin_count && !ctx2->pin_count;
983 }
984
985 /*
986  * Called from scheduler to remove the counters of the current task,
987  * with interrupts disabled.
988  *
989  * We stop each counter and update the counter value in counter->count.
990  *
991  * This does not protect us against NMI, but disable()
992  * sets the disabled bit in the control field of counter _before_
993  * accessing the counter control register. If a NMI hits, then it will
994  * not restart the counter.
995  */
996 void perf_counter_task_sched_out(struct task_struct *task,
997                                  struct task_struct *next, int cpu)
998 {
999         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1000         struct perf_counter_context *ctx = task->perf_counter_ctxp;
1001         struct perf_counter_context *next_ctx;
1002         struct perf_counter_context *parent;
1003         struct pt_regs *regs;
1004         int do_switch = 1;
1005
1006         regs = task_pt_regs(task);
1007         perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0);
1008
1009         if (likely(!ctx || !cpuctx->task_ctx))
1010                 return;
1011
1012         update_context_time(ctx);
1013
1014         rcu_read_lock();
1015         parent = rcu_dereference(ctx->parent_ctx);
1016         next_ctx = next->perf_counter_ctxp;
1017         if (parent && next_ctx &&
1018             rcu_dereference(next_ctx->parent_ctx) == parent) {
1019                 /*
1020                  * Looks like the two contexts are clones, so we might be
1021                  * able to optimize the context switch.  We lock both
1022                  * contexts and check that they are clones under the
1023                  * lock (including re-checking that neither has been
1024                  * uncloned in the meantime).  It doesn't matter which
1025                  * order we take the locks because no other cpu could
1026                  * be trying to lock both of these tasks.
1027                  */
1028                 spin_lock(&ctx->lock);
1029                 spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1030                 if (context_equiv(ctx, next_ctx)) {
1031                         /*
1032                          * XXX do we need a memory barrier of sorts
1033                          * wrt to rcu_dereference() of perf_counter_ctxp
1034                          */
1035                         task->perf_counter_ctxp = next_ctx;
1036                         next->perf_counter_ctxp = ctx;
1037                         ctx->task = next;
1038                         next_ctx->task = task;
1039                         do_switch = 0;
1040                 }
1041                 spin_unlock(&next_ctx->lock);
1042                 spin_unlock(&ctx->lock);
1043         }
1044         rcu_read_unlock();
1045
1046         if (do_switch) {
1047                 __perf_counter_sched_out(ctx, cpuctx);
1048                 cpuctx->task_ctx = NULL;
1049         }
1050 }
1051
1052 /*
1053  * Called with IRQs disabled
1054  */
1055 static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
1056 {
1057         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1058
1059         if (!cpuctx->task_ctx)
1060                 return;
1061
1062         if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1063                 return;
1064
1065         __perf_counter_sched_out(ctx, cpuctx);
1066         cpuctx->task_ctx = NULL;
1067 }
1068
1069 /*
1070  * Called with IRQs disabled
1071  */
1072 static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
1073 {
1074         __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
1075 }
1076
1077 static void
1078 __perf_counter_sched_in(struct perf_counter_context *ctx,
1079                         struct perf_cpu_context *cpuctx, int cpu)
1080 {
1081         struct perf_counter *counter;
1082         int can_add_hw = 1;
1083
1084         spin_lock(&ctx->lock);
1085         ctx->is_active = 1;
1086         if (likely(!ctx->nr_counters))
1087                 goto out;
1088
1089         ctx->timestamp = perf_clock();
1090
1091         perf_disable();
1092
1093         /*
1094          * First go through the list and put on any pinned groups
1095          * in order to give them the best chance of going on.
1096          */
1097         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1098                 if (counter->state <= PERF_COUNTER_STATE_OFF ||
1099                     !counter->attr.pinned)
1100                         continue;
1101                 if (counter->cpu != -1 && counter->cpu != cpu)
1102                         continue;
1103
1104                 if (counter != counter->group_leader)
1105                         counter_sched_in(counter, cpuctx, ctx, cpu);
1106                 else {
1107                         if (group_can_go_on(counter, cpuctx, 1))
1108                                 group_sched_in(counter, cpuctx, ctx, cpu);
1109                 }
1110
1111                 /*
1112                  * If this pinned group hasn't been scheduled,
1113                  * put it in error state.
1114                  */
1115                 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1116                         update_group_times(counter);
1117                         counter->state = PERF_COUNTER_STATE_ERROR;
1118                 }
1119         }
1120
1121         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1122                 /*
1123                  * Ignore counters in OFF or ERROR state, and
1124                  * ignore pinned counters since we did them already.
1125                  */
1126                 if (counter->state <= PERF_COUNTER_STATE_OFF ||
1127                     counter->attr.pinned)
1128                         continue;
1129
1130                 /*
1131                  * Listen to the 'cpu' scheduling filter constraint
1132                  * of counters:
1133                  */
1134                 if (counter->cpu != -1 && counter->cpu != cpu)
1135                         continue;
1136
1137                 if (counter != counter->group_leader) {
1138                         if (counter_sched_in(counter, cpuctx, ctx, cpu))
1139                                 can_add_hw = 0;
1140                 } else {
1141                         if (group_can_go_on(counter, cpuctx, can_add_hw)) {
1142                                 if (group_sched_in(counter, cpuctx, ctx, cpu))
1143                                         can_add_hw = 0;
1144                         }
1145                 }
1146         }
1147         perf_enable();
1148  out:
1149         spin_unlock(&ctx->lock);
1150 }
1151
1152 /*
1153  * Called from scheduler to add the counters of the current task
1154  * with interrupts disabled.
1155  *
1156  * We restore the counter value and then enable it.
1157  *
1158  * This does not protect us against NMI, but enable()
1159  * sets the enabled bit in the control field of counter _before_
1160  * accessing the counter control register. If a NMI hits, then it will
1161  * keep the counter running.
1162  */
1163 void perf_counter_task_sched_in(struct task_struct *task, int cpu)
1164 {
1165         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1166         struct perf_counter_context *ctx = task->perf_counter_ctxp;
1167
1168         if (likely(!ctx))
1169                 return;
1170         if (cpuctx->task_ctx == ctx)
1171                 return;
1172         __perf_counter_sched_in(ctx, cpuctx, cpu);
1173         cpuctx->task_ctx = ctx;
1174 }
1175
1176 static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
1177 {
1178         struct perf_counter_context *ctx = &cpuctx->ctx;
1179
1180         __perf_counter_sched_in(ctx, cpuctx, cpu);
1181 }
1182
1183 #define MAX_INTERRUPTS (~0ULL)
1184
1185 static void perf_log_throttle(struct perf_counter *counter, int enable);
1186 static void perf_log_period(struct perf_counter *counter, u64 period);
1187
1188 static void perf_adjust_freq(struct perf_counter_context *ctx)
1189 {
1190         struct perf_counter *counter;
1191         u64 interrupts, sample_period;
1192         u64 events, period;
1193         s64 delta;
1194
1195         spin_lock(&ctx->lock);
1196         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1197                 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1198                         continue;
1199
1200                 interrupts = counter->hw.interrupts;
1201                 counter->hw.interrupts = 0;
1202
1203                 if (interrupts == MAX_INTERRUPTS) {
1204                         perf_log_throttle(counter, 1);
1205                         counter->pmu->unthrottle(counter);
1206                         interrupts = 2*sysctl_perf_counter_limit/HZ;
1207                 }
1208
1209                 if (!counter->attr.freq || !counter->attr.sample_freq)
1210                         continue;
1211
1212                 events = HZ * interrupts * counter->hw.sample_period;
1213                 period = div64_u64(events, counter->attr.sample_freq);
1214
1215                 delta = (s64)(1 + period - counter->hw.sample_period);
1216                 delta >>= 1;
1217
1218                 sample_period = counter->hw.sample_period + delta;
1219
1220                 if (!sample_period)
1221                         sample_period = 1;
1222
1223                 perf_log_period(counter, sample_period);
1224
1225                 counter->hw.sample_period = sample_period;
1226         }
1227         spin_unlock(&ctx->lock);
1228 }
1229
1230 /*
1231  * Round-robin a context's counters:
1232  */
1233 static void rotate_ctx(struct perf_counter_context *ctx)
1234 {
1235         struct perf_counter *counter;
1236
1237         if (!ctx->nr_counters)
1238                 return;
1239
1240         spin_lock(&ctx->lock);
1241         /*
1242          * Rotate the first entry last (works just fine for group counters too):
1243          */
1244         perf_disable();
1245         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1246                 list_move_tail(&counter->list_entry, &ctx->counter_list);
1247                 break;
1248         }
1249         perf_enable();
1250
1251         spin_unlock(&ctx->lock);
1252 }
1253
1254 void perf_counter_task_tick(struct task_struct *curr, int cpu)
1255 {
1256         struct perf_cpu_context *cpuctx;
1257         struct perf_counter_context *ctx;
1258
1259         if (!atomic_read(&nr_counters))
1260                 return;
1261
1262         cpuctx = &per_cpu(perf_cpu_context, cpu);
1263         ctx = curr->perf_counter_ctxp;
1264
1265         perf_adjust_freq(&cpuctx->ctx);
1266         if (ctx)
1267                 perf_adjust_freq(ctx);
1268
1269         perf_counter_cpu_sched_out(cpuctx);
1270         if (ctx)
1271                 __perf_counter_task_sched_out(ctx);
1272
1273         rotate_ctx(&cpuctx->ctx);
1274         if (ctx)
1275                 rotate_ctx(ctx);
1276
1277         perf_counter_cpu_sched_in(cpuctx, cpu);
1278         if (ctx)
1279                 perf_counter_task_sched_in(curr, cpu);
1280 }
1281
1282 /*
1283  * Cross CPU call to read the hardware counter
1284  */
1285 static void __read(void *info)
1286 {
1287         struct perf_counter *counter = info;
1288         struct perf_counter_context *ctx = counter->ctx;
1289         unsigned long flags;
1290
1291         local_irq_save(flags);
1292         if (ctx->is_active)
1293                 update_context_time(ctx);
1294         counter->pmu->read(counter);
1295         update_counter_times(counter);
1296         local_irq_restore(flags);
1297 }
1298
1299 static u64 perf_counter_read(struct perf_counter *counter)
1300 {
1301         /*
1302          * If counter is enabled and currently active on a CPU, update the
1303          * value in the counter structure:
1304          */
1305         if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
1306                 smp_call_function_single(counter->oncpu,
1307                                          __read, counter, 1);
1308         } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1309                 update_counter_times(counter);
1310         }
1311
1312         return atomic64_read(&counter->count);
1313 }
1314
1315 /*
1316  * Initialize the perf_counter context in a task_struct:
1317  */
1318 static void
1319 __perf_counter_init_context(struct perf_counter_context *ctx,
1320                             struct task_struct *task)
1321 {
1322         memset(ctx, 0, sizeof(*ctx));
1323         spin_lock_init(&ctx->lock);
1324         mutex_init(&ctx->mutex);
1325         INIT_LIST_HEAD(&ctx->counter_list);
1326         INIT_LIST_HEAD(&ctx->event_list);
1327         atomic_set(&ctx->refcount, 1);
1328         ctx->task = task;
1329 }
1330
1331 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1332 {
1333         struct perf_counter_context *parent_ctx;
1334         struct perf_counter_context *ctx;
1335         struct perf_cpu_context *cpuctx;
1336         struct task_struct *task;
1337         unsigned long flags;
1338         int err;
1339
1340         /*
1341          * If cpu is not a wildcard then this is a percpu counter:
1342          */
1343         if (cpu != -1) {
1344                 /* Must be root to operate on a CPU counter: */
1345                 if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN))
1346                         return ERR_PTR(-EACCES);
1347
1348                 if (cpu < 0 || cpu > num_possible_cpus())
1349                         return ERR_PTR(-EINVAL);
1350
1351                 /*
1352                  * We could be clever and allow to attach a counter to an
1353                  * offline CPU and activate it when the CPU comes up, but
1354                  * that's for later.
1355                  */
1356                 if (!cpu_isset(cpu, cpu_online_map))
1357                         return ERR_PTR(-ENODEV);
1358
1359                 cpuctx = &per_cpu(perf_cpu_context, cpu);
1360                 ctx = &cpuctx->ctx;
1361                 get_ctx(ctx);
1362
1363                 return ctx;
1364         }
1365
1366         rcu_read_lock();
1367         if (!pid)
1368                 task = current;
1369         else
1370                 task = find_task_by_vpid(pid);
1371         if (task)
1372                 get_task_struct(task);
1373         rcu_read_unlock();
1374
1375         if (!task)
1376                 return ERR_PTR(-ESRCH);
1377
1378         /*
1379          * Can't attach counters to a dying task.
1380          */
1381         err = -ESRCH;
1382         if (task->flags & PF_EXITING)
1383                 goto errout;
1384
1385         /* Reuse ptrace permission checks for now. */
1386         err = -EACCES;
1387         if (!ptrace_may_access(task, PTRACE_MODE_READ))
1388                 goto errout;
1389
1390  retry:
1391         ctx = perf_lock_task_context(task, &flags);
1392         if (ctx) {
1393                 parent_ctx = ctx->parent_ctx;
1394                 if (parent_ctx) {
1395                         put_ctx(parent_ctx);
1396                         ctx->parent_ctx = NULL;         /* no longer a clone */
1397                 }
1398                 /*
1399                  * Get an extra reference before dropping the lock so that
1400                  * this context won't get freed if the task exits.
1401                  */
1402                 get_ctx(ctx);
1403                 spin_unlock_irqrestore(&ctx->lock, flags);
1404         }
1405
1406         if (!ctx) {
1407                 ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
1408                 err = -ENOMEM;
1409                 if (!ctx)
1410                         goto errout;
1411                 __perf_counter_init_context(ctx, task);
1412                 get_ctx(ctx);
1413                 if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
1414                         /*
1415                          * We raced with some other task; use
1416                          * the context they set.
1417                          */
1418                         kfree(ctx);
1419                         goto retry;
1420                 }
1421                 get_task_struct(task);
1422         }
1423
1424         put_task_struct(task);
1425         return ctx;
1426
1427  errout:
1428         put_task_struct(task);
1429         return ERR_PTR(err);
1430 }
1431
1432 static void free_counter_rcu(struct rcu_head *head)
1433 {
1434         struct perf_counter *counter;
1435
1436         counter = container_of(head, struct perf_counter, rcu_head);
1437         if (counter->ns)
1438                 put_pid_ns(counter->ns);
1439         kfree(counter);
1440 }
1441
1442 static void perf_pending_sync(struct perf_counter *counter);
1443
1444 static void free_counter(struct perf_counter *counter)
1445 {
1446         perf_pending_sync(counter);
1447
1448         atomic_dec(&nr_counters);
1449         if (counter->attr.mmap)
1450                 atomic_dec(&nr_mmap_counters);
1451         if (counter->attr.munmap)
1452                 atomic_dec(&nr_munmap_counters);
1453         if (counter->attr.comm)
1454                 atomic_dec(&nr_comm_counters);
1455
1456         if (counter->destroy)
1457                 counter->destroy(counter);
1458
1459         put_ctx(counter->ctx);
1460         call_rcu(&counter->rcu_head, free_counter_rcu);
1461 }
1462
1463 /*
1464  * Called when the last reference to the file is gone.
1465  */
1466 static int perf_release(struct inode *inode, struct file *file)
1467 {
1468         struct perf_counter *counter = file->private_data;
1469         struct perf_counter_context *ctx = counter->ctx;
1470
1471         file->private_data = NULL;
1472
1473         WARN_ON_ONCE(ctx->parent_ctx);
1474         mutex_lock(&ctx->mutex);
1475         perf_counter_remove_from_context(counter);
1476         mutex_unlock(&ctx->mutex);
1477
1478         mutex_lock(&counter->owner->perf_counter_mutex);
1479         list_del_init(&counter->owner_entry);
1480         mutex_unlock(&counter->owner->perf_counter_mutex);
1481         put_task_struct(counter->owner);
1482
1483         free_counter(counter);
1484
1485         return 0;
1486 }
1487
1488 /*
1489  * Read the performance counter - simple non blocking version for now
1490  */
1491 static ssize_t
1492 perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1493 {
1494         u64 values[3];
1495         int n;
1496
1497         /*
1498          * Return end-of-file for a read on a counter that is in
1499          * error state (i.e. because it was pinned but it couldn't be
1500          * scheduled on to the CPU at some point).
1501          */
1502         if (counter->state == PERF_COUNTER_STATE_ERROR)
1503                 return 0;
1504
1505         WARN_ON_ONCE(counter->ctx->parent_ctx);
1506         mutex_lock(&counter->child_mutex);
1507         values[0] = perf_counter_read(counter);
1508         n = 1;
1509         if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1510                 values[n++] = counter->total_time_enabled +
1511                         atomic64_read(&counter->child_total_time_enabled);
1512         if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1513                 values[n++] = counter->total_time_running +
1514                         atomic64_read(&counter->child_total_time_running);
1515         if (counter->attr.read_format & PERF_FORMAT_ID)
1516                 values[n++] = counter->id;
1517         mutex_unlock(&counter->child_mutex);
1518
1519         if (count < n * sizeof(u64))
1520                 return -EINVAL;
1521         count = n * sizeof(u64);
1522
1523         if (copy_to_user(buf, values, count))
1524                 return -EFAULT;
1525
1526         return count;
1527 }
1528
1529 static ssize_t
1530 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1531 {
1532         struct perf_counter *counter = file->private_data;
1533
1534         return perf_read_hw(counter, buf, count);
1535 }
1536
1537 static unsigned int perf_poll(struct file *file, poll_table *wait)
1538 {
1539         struct perf_counter *counter = file->private_data;
1540         struct perf_mmap_data *data;
1541         unsigned int events = POLL_HUP;
1542
1543         rcu_read_lock();
1544         data = rcu_dereference(counter->data);
1545         if (data)
1546                 events = atomic_xchg(&data->poll, 0);
1547         rcu_read_unlock();
1548
1549         poll_wait(file, &counter->waitq, wait);
1550
1551         return events;
1552 }
1553
1554 static void perf_counter_reset(struct perf_counter *counter)
1555 {
1556         (void)perf_counter_read(counter);
1557         atomic64_set(&counter->count, 0);
1558         perf_counter_update_userpage(counter);
1559 }
1560
1561 static void perf_counter_for_each_sibling(struct perf_counter *counter,
1562                                           void (*func)(struct perf_counter *))
1563 {
1564         struct perf_counter_context *ctx = counter->ctx;
1565         struct perf_counter *sibling;
1566
1567         WARN_ON_ONCE(ctx->parent_ctx);
1568         mutex_lock(&ctx->mutex);
1569         counter = counter->group_leader;
1570
1571         func(counter);
1572         list_for_each_entry(sibling, &counter->sibling_list, list_entry)
1573                 func(sibling);
1574         mutex_unlock(&ctx->mutex);
1575 }
1576
1577 /*
1578  * Holding the top-level counter's child_mutex means that any
1579  * descendant process that has inherited this counter will block
1580  * in sync_child_counter if it goes to exit, thus satisfying the
1581  * task existence requirements of perf_counter_enable/disable.
1582  */
1583 static void perf_counter_for_each_child(struct perf_counter *counter,
1584                                         void (*func)(struct perf_counter *))
1585 {
1586         struct perf_counter *child;
1587
1588         WARN_ON_ONCE(counter->ctx->parent_ctx);
1589         mutex_lock(&counter->child_mutex);
1590         func(counter);
1591         list_for_each_entry(child, &counter->child_list, child_list)
1592                 func(child);
1593         mutex_unlock(&counter->child_mutex);
1594 }
1595
1596 static void perf_counter_for_each(struct perf_counter *counter,
1597                                   void (*func)(struct perf_counter *))
1598 {
1599         struct perf_counter *child;
1600
1601         WARN_ON_ONCE(counter->ctx->parent_ctx);
1602         mutex_lock(&counter->child_mutex);
1603         perf_counter_for_each_sibling(counter, func);
1604         list_for_each_entry(child, &counter->child_list, child_list)
1605                 perf_counter_for_each_sibling(child, func);
1606         mutex_unlock(&counter->child_mutex);
1607 }
1608
1609 static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
1610 {
1611         struct perf_counter_context *ctx = counter->ctx;
1612         unsigned long size;
1613         int ret = 0;
1614         u64 value;
1615
1616         if (!counter->attr.sample_period)
1617                 return -EINVAL;
1618
1619         size = copy_from_user(&value, arg, sizeof(value));
1620         if (size != sizeof(value))
1621                 return -EFAULT;
1622
1623         if (!value)
1624                 return -EINVAL;
1625
1626         spin_lock_irq(&ctx->lock);
1627         if (counter->attr.freq) {
1628                 if (value > sysctl_perf_counter_limit) {
1629                         ret = -EINVAL;
1630                         goto unlock;
1631                 }
1632
1633                 counter->attr.sample_freq = value;
1634         } else {
1635                 counter->attr.sample_period = value;
1636                 counter->hw.sample_period = value;
1637
1638                 perf_log_period(counter, value);
1639         }
1640 unlock:
1641         spin_unlock_irq(&ctx->lock);
1642
1643         return ret;
1644 }
1645
1646 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1647 {
1648         struct perf_counter *counter = file->private_data;
1649         void (*func)(struct perf_counter *);
1650         u32 flags = arg;
1651
1652         switch (cmd) {
1653         case PERF_COUNTER_IOC_ENABLE:
1654                 func = perf_counter_enable;
1655                 break;
1656         case PERF_COUNTER_IOC_DISABLE:
1657                 func = perf_counter_disable;
1658                 break;
1659         case PERF_COUNTER_IOC_RESET:
1660                 func = perf_counter_reset;
1661                 break;
1662
1663         case PERF_COUNTER_IOC_REFRESH:
1664                 return perf_counter_refresh(counter, arg);
1665
1666         case PERF_COUNTER_IOC_PERIOD:
1667                 return perf_counter_period(counter, (u64 __user *)arg);
1668
1669         default:
1670                 return -ENOTTY;
1671         }
1672
1673         if (flags & PERF_IOC_FLAG_GROUP)
1674                 perf_counter_for_each(counter, func);
1675         else
1676                 perf_counter_for_each_child(counter, func);
1677
1678         return 0;
1679 }
1680
1681 int perf_counter_task_enable(void)
1682 {
1683         struct perf_counter *counter;
1684
1685         mutex_lock(&current->perf_counter_mutex);
1686         list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
1687                 perf_counter_for_each_child(counter, perf_counter_enable);
1688         mutex_unlock(&current->perf_counter_mutex);
1689
1690         return 0;
1691 }
1692
1693 int perf_counter_task_disable(void)
1694 {
1695         struct perf_counter *counter;
1696
1697         mutex_lock(&current->perf_counter_mutex);
1698         list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
1699                 perf_counter_for_each_child(counter, perf_counter_disable);
1700         mutex_unlock(&current->perf_counter_mutex);
1701
1702         return 0;
1703 }
1704
1705 /*
1706  * Callers need to ensure there can be no nesting of this function, otherwise
1707  * the seqlock logic goes bad. We can not serialize this because the arch
1708  * code calls this from NMI context.
1709  */
1710 void perf_counter_update_userpage(struct perf_counter *counter)
1711 {
1712         struct perf_counter_mmap_page *userpg;
1713         struct perf_mmap_data *data;
1714
1715         rcu_read_lock();
1716         data = rcu_dereference(counter->data);
1717         if (!data)
1718                 goto unlock;
1719
1720         userpg = data->user_page;
1721
1722         /*
1723          * Disable preemption so as to not let the corresponding user-space
1724          * spin too long if we get preempted.
1725          */
1726         preempt_disable();
1727         ++userpg->lock;
1728         barrier();
1729         userpg->index = counter->hw.idx;
1730         userpg->offset = atomic64_read(&counter->count);
1731         if (counter->state == PERF_COUNTER_STATE_ACTIVE)
1732                 userpg->offset -= atomic64_read(&counter->hw.prev_count);
1733
1734         barrier();
1735         ++userpg->lock;
1736         preempt_enable();
1737 unlock:
1738         rcu_read_unlock();
1739 }
1740
1741 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1742 {
1743         struct perf_counter *counter = vma->vm_file->private_data;
1744         struct perf_mmap_data *data;
1745         int ret = VM_FAULT_SIGBUS;
1746
1747         rcu_read_lock();
1748         data = rcu_dereference(counter->data);
1749         if (!data)
1750                 goto unlock;
1751
1752         if (vmf->pgoff == 0) {
1753                 vmf->page = virt_to_page(data->user_page);
1754         } else {
1755                 int nr = vmf->pgoff - 1;
1756
1757                 if ((unsigned)nr > data->nr_pages)
1758                         goto unlock;
1759
1760                 vmf->page = virt_to_page(data->data_pages[nr]);
1761         }
1762         get_page(vmf->page);
1763         ret = 0;
1764 unlock:
1765         rcu_read_unlock();
1766
1767         return ret;
1768 }
1769
1770 static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
1771 {
1772         struct perf_mmap_data *data;
1773         unsigned long size;
1774         int i;
1775
1776         WARN_ON(atomic_read(&counter->mmap_count));
1777
1778         size = sizeof(struct perf_mmap_data);
1779         size += nr_pages * sizeof(void *);
1780
1781         data = kzalloc(size, GFP_KERNEL);
1782         if (!data)
1783                 goto fail;
1784
1785         data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
1786         if (!data->user_page)
1787                 goto fail_user_page;
1788
1789         for (i = 0; i < nr_pages; i++) {
1790                 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
1791                 if (!data->data_pages[i])
1792                         goto fail_data_pages;
1793         }
1794
1795         data->nr_pages = nr_pages;
1796         atomic_set(&data->lock, -1);
1797
1798         rcu_assign_pointer(counter->data, data);
1799
1800         return 0;
1801
1802 fail_data_pages:
1803         for (i--; i >= 0; i--)
1804                 free_page((unsigned long)data->data_pages[i]);
1805
1806         free_page((unsigned long)data->user_page);
1807
1808 fail_user_page:
1809         kfree(data);
1810
1811 fail:
1812         return -ENOMEM;
1813 }
1814
1815 static void __perf_mmap_data_free(struct rcu_head *rcu_head)
1816 {
1817         struct perf_mmap_data *data;
1818         int i;
1819
1820         data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
1821
1822         free_page((unsigned long)data->user_page);
1823         for (i = 0; i < data->nr_pages; i++)
1824                 free_page((unsigned long)data->data_pages[i]);
1825         kfree(data);
1826 }
1827
1828 static void perf_mmap_data_free(struct perf_counter *counter)
1829 {
1830         struct perf_mmap_data *data = counter->data;
1831
1832         WARN_ON(atomic_read(&counter->mmap_count));
1833
1834         rcu_assign_pointer(counter->data, NULL);
1835         call_rcu(&data->rcu_head, __perf_mmap_data_free);
1836 }
1837
1838 static void perf_mmap_open(struct vm_area_struct *vma)
1839 {
1840         struct perf_counter *counter = vma->vm_file->private_data;
1841
1842         atomic_inc(&counter->mmap_count);
1843 }
1844
1845 static void perf_mmap_close(struct vm_area_struct *vma)
1846 {
1847         struct perf_counter *counter = vma->vm_file->private_data;
1848
1849         WARN_ON_ONCE(counter->ctx->parent_ctx);
1850         if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
1851                 struct user_struct *user = current_user();
1852
1853                 atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
1854                 vma->vm_mm->locked_vm -= counter->data->nr_locked;
1855                 perf_mmap_data_free(counter);
1856                 mutex_unlock(&counter->mmap_mutex);
1857         }
1858 }
1859
1860 static struct vm_operations_struct perf_mmap_vmops = {
1861         .open  = perf_mmap_open,
1862         .close = perf_mmap_close,
1863         .fault = perf_mmap_fault,
1864 };
1865
1866 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1867 {
1868         struct perf_counter *counter = file->private_data;
1869         unsigned long user_locked, user_lock_limit;
1870         struct user_struct *user = current_user();
1871         unsigned long locked, lock_limit;
1872         unsigned long vma_size;
1873         unsigned long nr_pages;
1874         long user_extra, extra;
1875         int ret = 0;
1876
1877         if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
1878                 return -EINVAL;
1879
1880         vma_size = vma->vm_end - vma->vm_start;
1881         nr_pages = (vma_size / PAGE_SIZE) - 1;
1882
1883         /*
1884          * If we have data pages ensure they're a power-of-two number, so we
1885          * can do bitmasks instead of modulo.
1886          */
1887         if (nr_pages != 0 && !is_power_of_2(nr_pages))
1888                 return -EINVAL;
1889
1890         if (vma_size != PAGE_SIZE * (1 + nr_pages))
1891                 return -EINVAL;
1892
1893         if (vma->vm_pgoff != 0)
1894                 return -EINVAL;
1895
1896         WARN_ON_ONCE(counter->ctx->parent_ctx);
1897         mutex_lock(&counter->mmap_mutex);
1898         if (atomic_inc_not_zero(&counter->mmap_count)) {
1899                 if (nr_pages != counter->data->nr_pages)
1900                         ret = -EINVAL;
1901                 goto unlock;
1902         }
1903
1904         user_extra = nr_pages + 1;
1905         user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
1906
1907         /*
1908          * Increase the limit linearly with more CPUs:
1909          */
1910         user_lock_limit *= num_online_cpus();
1911
1912         user_locked = atomic_long_read(&user->locked_vm) + user_extra;
1913
1914         extra = 0;
1915         if (user_locked > user_lock_limit)
1916                 extra = user_locked - user_lock_limit;
1917
1918         lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1919         lock_limit >>= PAGE_SHIFT;
1920         locked = vma->vm_mm->locked_vm + extra;
1921
1922         if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
1923                 ret = -EPERM;
1924                 goto unlock;
1925         }
1926
1927         WARN_ON(counter->data);
1928         ret = perf_mmap_data_alloc(counter, nr_pages);
1929         if (ret)
1930                 goto unlock;
1931
1932         atomic_set(&counter->mmap_count, 1);
1933         atomic_long_add(user_extra, &user->locked_vm);
1934         vma->vm_mm->locked_vm += extra;
1935         counter->data->nr_locked = extra;
1936 unlock:
1937         mutex_unlock(&counter->mmap_mutex);
1938
1939         vma->vm_flags &= ~VM_MAYWRITE;
1940         vma->vm_flags |= VM_RESERVED;
1941         vma->vm_ops = &perf_mmap_vmops;
1942
1943         return ret;
1944 }
1945
1946 static int perf_fasync(int fd, struct file *filp, int on)
1947 {
1948         struct inode *inode = filp->f_path.dentry->d_inode;
1949         struct perf_counter *counter = filp->private_data;
1950         int retval;
1951
1952         mutex_lock(&inode->i_mutex);
1953         retval = fasync_helper(fd, filp, on, &counter->fasync);
1954         mutex_unlock(&inode->i_mutex);
1955
1956         if (retval < 0)
1957                 return retval;
1958
1959         return 0;
1960 }
1961
1962 static const struct file_operations perf_fops = {
1963         .release                = perf_release,
1964         .read                   = perf_read,
1965         .poll                   = perf_poll,
1966         .unlocked_ioctl         = perf_ioctl,
1967         .compat_ioctl           = perf_ioctl,
1968         .mmap                   = perf_mmap,
1969         .fasync                 = perf_fasync,
1970 };
1971
1972 /*
1973  * Perf counter wakeup
1974  *
1975  * If there's data, ensure we set the poll() state and publish everything
1976  * to user-space before waking everybody up.
1977  */
1978
1979 void perf_counter_wakeup(struct perf_counter *counter)
1980 {
1981         wake_up_all(&counter->waitq);
1982
1983         if (counter->pending_kill) {
1984                 kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
1985                 counter->pending_kill = 0;
1986         }
1987 }
1988
1989 /*
1990  * Pending wakeups
1991  *
1992  * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
1993  *
1994  * The NMI bit means we cannot possibly take locks. Therefore, maintain a
1995  * single linked list and use cmpxchg() to add entries lockless.
1996  */
1997
1998 static void perf_pending_counter(struct perf_pending_entry *entry)
1999 {
2000         struct perf_counter *counter = container_of(entry,
2001                         struct perf_counter, pending);
2002
2003         if (counter->pending_disable) {
2004                 counter->pending_disable = 0;
2005                 perf_counter_disable(counter);
2006         }
2007
2008         if (counter->pending_wakeup) {
2009                 counter->pending_wakeup = 0;
2010                 perf_counter_wakeup(counter);
2011         }
2012 }
2013
2014 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
2015
2016 static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
2017         PENDING_TAIL,
2018 };
2019
2020 static void perf_pending_queue(struct perf_pending_entry *entry,
2021                                void (*func)(struct perf_pending_entry *))
2022 {
2023         struct perf_pending_entry **head;
2024
2025         if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
2026                 return;
2027
2028         entry->func = func;
2029
2030         head = &get_cpu_var(perf_pending_head);
2031
2032         do {
2033                 entry->next = *head;
2034         } while (cmpxchg(head, entry->next, entry) != entry->next);
2035
2036         set_perf_counter_pending();
2037
2038         put_cpu_var(perf_pending_head);
2039 }
2040
2041 static int __perf_pending_run(void)
2042 {
2043         struct perf_pending_entry *list;
2044         int nr = 0;
2045
2046         list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
2047         while (list != PENDING_TAIL) {
2048                 void (*func)(struct perf_pending_entry *);
2049                 struct perf_pending_entry *entry = list;
2050
2051                 list = list->next;
2052
2053                 func = entry->func;
2054                 entry->next = NULL;
2055                 /*
2056                  * Ensure we observe the unqueue before we issue the wakeup,
2057                  * so that we won't be waiting forever.
2058                  * -- see perf_not_pending().
2059                  */
2060                 smp_wmb();
2061
2062                 func(entry);
2063                 nr++;
2064         }
2065
2066         return nr;
2067 }
2068
2069 static inline int perf_not_pending(struct perf_counter *counter)
2070 {
2071         /*
2072          * If we flush on whatever cpu we run, there is a chance we don't
2073          * need to wait.
2074          */
2075         get_cpu();
2076         __perf_pending_run();
2077         put_cpu();
2078
2079         /*
2080          * Ensure we see the proper queue state before going to sleep
2081          * so that we do not miss the wakeup. -- see perf_pending_handle()
2082          */
2083         smp_rmb();
2084         return counter->pending.next == NULL;
2085 }
2086
2087 static void perf_pending_sync(struct perf_counter *counter)
2088 {
2089         wait_event(counter->waitq, perf_not_pending(counter));
2090 }
2091
2092 void perf_counter_do_pending(void)
2093 {
2094         __perf_pending_run();
2095 }
2096
2097 /*
2098  * Callchain support -- arch specific
2099  */
2100
2101 __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2102 {
2103         return NULL;
2104 }
2105
2106 /*
2107  * Output
2108  */
2109
2110 struct perf_output_handle {
2111         struct perf_counter     *counter;
2112         struct perf_mmap_data   *data;
2113         unsigned long           head;
2114         unsigned long           offset;
2115         int                     nmi;
2116         int                     overflow;
2117         int                     locked;
2118         unsigned long           flags;
2119 };
2120
2121 static void perf_output_wakeup(struct perf_output_handle *handle)
2122 {
2123         atomic_set(&handle->data->poll, POLL_IN);
2124
2125         if (handle->nmi) {
2126                 handle->counter->pending_wakeup = 1;
2127                 perf_pending_queue(&handle->counter->pending,
2128                                    perf_pending_counter);
2129         } else
2130                 perf_counter_wakeup(handle->counter);
2131 }
2132
2133 /*
2134  * Curious locking construct.
2135  *
2136  * We need to ensure a later event doesn't publish a head when a former
2137  * event isn't done writing. However since we need to deal with NMIs we
2138  * cannot fully serialize things.
2139  *
2140  * What we do is serialize between CPUs so we only have to deal with NMI
2141  * nesting on a single CPU.
2142  *
2143  * We only publish the head (and generate a wakeup) when the outer-most
2144  * event completes.
2145  */
2146 static void perf_output_lock(struct perf_output_handle *handle)
2147 {
2148         struct perf_mmap_data *data = handle->data;
2149         int cpu;
2150
2151         handle->locked = 0;
2152
2153         local_irq_save(handle->flags);
2154         cpu = smp_processor_id();
2155
2156         if (in_nmi() && atomic_read(&data->lock) == cpu)
2157                 return;
2158
2159         while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2160                 cpu_relax();
2161
2162         handle->locked = 1;
2163 }
2164
2165 static void perf_output_unlock(struct perf_output_handle *handle)
2166 {
2167         struct perf_mmap_data *data = handle->data;
2168         unsigned long head;
2169         int cpu;
2170
2171         data->done_head = data->head;
2172
2173         if (!handle->locked)
2174                 goto out;
2175
2176 again:
2177         /*
2178          * The xchg implies a full barrier that ensures all writes are done
2179          * before we publish the new head, matched by a rmb() in userspace when
2180          * reading this position.
2181          */
2182         while ((head = atomic_long_xchg(&data->done_head, 0)))
2183                 data->user_page->data_head = head;
2184
2185         /*
2186          * NMI can happen here, which means we can miss a done_head update.
2187          */
2188
2189         cpu = atomic_xchg(&data->lock, -1);
2190         WARN_ON_ONCE(cpu != smp_processor_id());
2191
2192         /*
2193          * Therefore we have to validate we did not indeed do so.
2194          */
2195         if (unlikely(atomic_long_read(&data->done_head))) {
2196                 /*
2197                  * Since we had it locked, we can lock it again.
2198                  */
2199                 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2200                         cpu_relax();
2201
2202                 goto again;
2203         }
2204
2205         if (atomic_xchg(&data->wakeup, 0))
2206                 perf_output_wakeup(handle);
2207 out:
2208         local_irq_restore(handle->flags);
2209 }
2210
2211 static int perf_output_begin(struct perf_output_handle *handle,
2212                              struct perf_counter *counter, unsigned int size,
2213                              int nmi, int overflow)
2214 {
2215         struct perf_mmap_data *data;
2216         unsigned int offset, head;
2217
2218         /*
2219          * For inherited counters we send all the output towards the parent.
2220          */
2221         if (counter->parent)
2222                 counter = counter->parent;
2223
2224         rcu_read_lock();
2225         data = rcu_dereference(counter->data);
2226         if (!data)
2227                 goto out;
2228
2229         handle->data     = data;
2230         handle->counter  = counter;
2231         handle->nmi      = nmi;
2232         handle->overflow = overflow;
2233
2234         if (!data->nr_pages)
2235                 goto fail;
2236
2237         perf_output_lock(handle);
2238
2239         do {
2240                 offset = head = atomic_read(&data->head);
2241                 head += size;
2242         } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
2243
2244         handle->offset  = offset;
2245         handle->head    = head;
2246
2247         if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
2248                 atomic_set(&data->wakeup, 1);
2249
2250         return 0;
2251
2252 fail:
2253         perf_output_wakeup(handle);
2254 out:
2255         rcu_read_unlock();
2256
2257         return -ENOSPC;
2258 }
2259
2260 static void perf_output_copy(struct perf_output_handle *handle,
2261                              void *buf, unsigned int len)
2262 {
2263         unsigned int pages_mask;
2264         unsigned int offset;
2265         unsigned int size;
2266         void **pages;
2267
2268         offset          = handle->offset;
2269         pages_mask      = handle->data->nr_pages - 1;
2270         pages           = handle->data->data_pages;
2271
2272         do {
2273                 unsigned int page_offset;
2274                 int nr;
2275
2276                 nr          = (offset >> PAGE_SHIFT) & pages_mask;
2277                 page_offset = offset & (PAGE_SIZE - 1);
2278                 size        = min_t(unsigned int, PAGE_SIZE - page_offset, len);
2279
2280                 memcpy(pages[nr] + page_offset, buf, size);
2281
2282                 len         -= size;
2283                 buf         += size;
2284                 offset      += size;
2285         } while (len);
2286
2287         handle->offset = offset;
2288
2289         /*
2290          * Check we didn't copy past our reservation window, taking the
2291          * possible unsigned int wrap into account.
2292          */
2293         WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
2294 }
2295
2296 #define perf_output_put(handle, x) \
2297         perf_output_copy((handle), &(x), sizeof(x))
2298
2299 static void perf_output_end(struct perf_output_handle *handle)
2300 {
2301         struct perf_counter *counter = handle->counter;
2302         struct perf_mmap_data *data = handle->data;
2303
2304         int wakeup_events = counter->attr.wakeup_events;
2305
2306         if (handle->overflow && wakeup_events) {
2307                 int events = atomic_inc_return(&data->events);
2308                 if (events >= wakeup_events) {
2309                         atomic_sub(wakeup_events, &data->events);
2310                         atomic_set(&data->wakeup, 1);
2311                 }
2312         }
2313
2314         perf_output_unlock(handle);
2315         rcu_read_unlock();
2316 }
2317
2318 static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p)
2319 {
2320         /*
2321          * only top level counters have the pid namespace they were created in
2322          */
2323         if (counter->parent)
2324                 counter = counter->parent;
2325
2326         return task_tgid_nr_ns(p, counter->ns);
2327 }
2328
2329 static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
2330 {
2331         /*
2332          * only top level counters have the pid namespace they were created in
2333          */
2334         if (counter->parent)
2335                 counter = counter->parent;
2336
2337         return task_pid_nr_ns(p, counter->ns);
2338 }
2339
2340 static void perf_counter_output(struct perf_counter *counter,
2341                                 int nmi, struct pt_regs *regs, u64 addr)
2342 {
2343         int ret;
2344         u64 sample_type = counter->attr.sample_type;
2345         struct perf_output_handle handle;
2346         struct perf_event_header header;
2347         u64 ip;
2348         struct {
2349                 u32 pid, tid;
2350         } tid_entry;
2351         struct {
2352                 u64 id;
2353                 u64 counter;
2354         } group_entry;
2355         struct perf_callchain_entry *callchain = NULL;
2356         int callchain_size = 0;
2357         u64 time;
2358         struct {
2359                 u32 cpu, reserved;
2360         } cpu_entry;
2361
2362         header.type = 0;
2363         header.size = sizeof(header);
2364
2365         header.misc = PERF_EVENT_MISC_OVERFLOW;
2366         header.misc |= perf_misc_flags(regs);
2367
2368         if (sample_type & PERF_SAMPLE_IP) {
2369                 ip = perf_instruction_pointer(regs);
2370                 header.type |= PERF_SAMPLE_IP;
2371                 header.size += sizeof(ip);
2372         }
2373
2374         if (sample_type & PERF_SAMPLE_TID) {
2375                 /* namespace issues */
2376                 tid_entry.pid = perf_counter_pid(counter, current);
2377                 tid_entry.tid = perf_counter_tid(counter, current);
2378
2379                 header.type |= PERF_SAMPLE_TID;
2380                 header.size += sizeof(tid_entry);
2381         }
2382
2383         if (sample_type & PERF_SAMPLE_TIME) {
2384                 /*
2385                  * Maybe do better on x86 and provide cpu_clock_nmi()
2386                  */
2387                 time = sched_clock();
2388
2389                 header.type |= PERF_SAMPLE_TIME;
2390                 header.size += sizeof(u64);
2391         }
2392
2393         if (sample_type & PERF_SAMPLE_ADDR) {
2394                 header.type |= PERF_SAMPLE_ADDR;
2395                 header.size += sizeof(u64);
2396         }
2397
2398         if (sample_type & PERF_SAMPLE_CONFIG) {
2399                 header.type |= PERF_SAMPLE_CONFIG;
2400                 header.size += sizeof(u64);
2401         }
2402
2403         if (sample_type & PERF_SAMPLE_CPU) {
2404                 header.type |= PERF_SAMPLE_CPU;
2405                 header.size += sizeof(cpu_entry);
2406
2407                 cpu_entry.cpu = raw_smp_processor_id();
2408         }
2409
2410         if (sample_type & PERF_SAMPLE_GROUP) {
2411                 header.type |= PERF_SAMPLE_GROUP;
2412                 header.size += sizeof(u64) +
2413                         counter->nr_siblings * sizeof(group_entry);
2414         }
2415
2416         if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2417                 callchain = perf_callchain(regs);
2418
2419                 if (callchain) {
2420                         callchain_size = (1 + callchain->nr) * sizeof(u64);
2421
2422                         header.type |= PERF_SAMPLE_CALLCHAIN;
2423                         header.size += callchain_size;
2424                 }
2425         }
2426
2427         ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
2428         if (ret)
2429                 return;
2430
2431         perf_output_put(&handle, header);
2432
2433         if (sample_type & PERF_SAMPLE_IP)
2434                 perf_output_put(&handle, ip);
2435
2436         if (sample_type & PERF_SAMPLE_TID)
2437                 perf_output_put(&handle, tid_entry);
2438
2439         if (sample_type & PERF_SAMPLE_TIME)
2440                 perf_output_put(&handle, time);
2441
2442         if (sample_type & PERF_SAMPLE_ADDR)
2443                 perf_output_put(&handle, addr);
2444
2445         if (sample_type & PERF_SAMPLE_CONFIG)
2446                 perf_output_put(&handle, counter->attr.config);
2447
2448         if (sample_type & PERF_SAMPLE_CPU)
2449                 perf_output_put(&handle, cpu_entry);
2450
2451         /*
2452          * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
2453          */
2454         if (sample_type & PERF_SAMPLE_GROUP) {
2455                 struct perf_counter *leader, *sub;
2456                 u64 nr = counter->nr_siblings;
2457
2458                 perf_output_put(&handle, nr);
2459
2460                 leader = counter->group_leader;
2461                 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2462                         if (sub != counter)
2463                                 sub->pmu->read(sub);
2464
2465                         group_entry.id = sub->id;
2466                         group_entry.counter = atomic64_read(&sub->count);
2467
2468                         perf_output_put(&handle, group_entry);
2469                 }
2470         }
2471
2472         if (callchain)
2473                 perf_output_copy(&handle, callchain, callchain_size);
2474
2475         perf_output_end(&handle);
2476 }
2477
2478 /*
2479  * fork tracking
2480  */
2481
2482 struct perf_fork_event {
2483         struct task_struct      *task;
2484
2485         struct {
2486                 struct perf_event_header        header;
2487
2488                 u32                             pid;
2489                 u32                             ppid;
2490         } event;
2491 };
2492
2493 static void perf_counter_fork_output(struct perf_counter *counter,
2494                                      struct perf_fork_event *fork_event)
2495 {
2496         struct perf_output_handle handle;
2497         int size = fork_event->event.header.size;
2498         struct task_struct *task = fork_event->task;
2499         int ret = perf_output_begin(&handle, counter, size, 0, 0);
2500
2501         if (ret)
2502                 return;
2503
2504         fork_event->event.pid = perf_counter_pid(counter, task);
2505         fork_event->event.ppid = perf_counter_pid(counter, task->real_parent);
2506
2507         perf_output_put(&handle, fork_event->event);
2508         perf_output_end(&handle);
2509 }
2510
2511 static int perf_counter_fork_match(struct perf_counter *counter)
2512 {
2513         if (counter->attr.comm || counter->attr.mmap || counter->attr.munmap)
2514                 return 1;
2515
2516         return 0;
2517 }
2518
2519 static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
2520                                   struct perf_fork_event *fork_event)
2521 {
2522         struct perf_counter *counter;
2523
2524         if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2525                 return;
2526
2527         rcu_read_lock();
2528         list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2529                 if (perf_counter_fork_match(counter))
2530                         perf_counter_fork_output(counter, fork_event);
2531         }
2532         rcu_read_unlock();
2533 }
2534
2535 static void perf_counter_fork_event(struct perf_fork_event *fork_event)
2536 {
2537         struct perf_cpu_context *cpuctx;
2538         struct perf_counter_context *ctx;
2539
2540         cpuctx = &get_cpu_var(perf_cpu_context);
2541         perf_counter_fork_ctx(&cpuctx->ctx, fork_event);
2542         put_cpu_var(perf_cpu_context);
2543
2544         rcu_read_lock();
2545         /*
2546          * doesn't really matter which of the child contexts the
2547          * events ends up in.
2548          */
2549         ctx = rcu_dereference(current->perf_counter_ctxp);
2550         if (ctx)
2551                 perf_counter_fork_ctx(ctx, fork_event);
2552         rcu_read_unlock();
2553 }
2554
2555 void perf_counter_fork(struct task_struct *task)
2556 {
2557         struct perf_fork_event fork_event;
2558
2559         if (!atomic_read(&nr_comm_counters) &&
2560             !atomic_read(&nr_mmap_counters) &&
2561             !atomic_read(&nr_munmap_counters))
2562                 return;
2563
2564         fork_event = (struct perf_fork_event){
2565                 .task   = task,
2566                 .event  = {
2567                         .header = {
2568                                 .type = PERF_EVENT_FORK,
2569                                 .size = sizeof(fork_event.event),
2570                         },
2571                 },
2572         };
2573
2574         perf_counter_fork_event(&fork_event);
2575 }
2576
2577 /*
2578  * comm tracking
2579  */
2580
2581 struct perf_comm_event {
2582         struct task_struct      *task;
2583         char                    *comm;
2584         int                     comm_size;
2585
2586         struct {
2587                 struct perf_event_header        header;
2588
2589                 u32                             pid;
2590                 u32                             tid;
2591         } event;
2592 };
2593
2594 static void perf_counter_comm_output(struct perf_counter *counter,
2595                                      struct perf_comm_event *comm_event)
2596 {
2597         struct perf_output_handle handle;
2598         int size = comm_event->event.header.size;
2599         int ret = perf_output_begin(&handle, counter, size, 0, 0);
2600
2601         if (ret)
2602                 return;
2603
2604         comm_event->event.pid = perf_counter_pid(counter, comm_event->task);
2605         comm_event->event.tid = perf_counter_tid(counter, comm_event->task);
2606
2607         perf_output_put(&handle, comm_event->event);
2608         perf_output_copy(&handle, comm_event->comm,
2609                                    comm_event->comm_size);
2610         perf_output_end(&handle);
2611 }
2612
2613 static int perf_counter_comm_match(struct perf_counter *counter)
2614 {
2615         if (counter->attr.comm)
2616                 return 1;
2617
2618         return 0;
2619 }
2620
2621 static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
2622                                   struct perf_comm_event *comm_event)
2623 {
2624         struct perf_counter *counter;
2625
2626         if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2627                 return;
2628
2629         rcu_read_lock();
2630         list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2631                 if (perf_counter_comm_match(counter))
2632                         perf_counter_comm_output(counter, comm_event);
2633         }
2634         rcu_read_unlock();
2635 }
2636
2637 static void perf_counter_comm_event(struct perf_comm_event *comm_event)
2638 {
2639         struct perf_cpu_context *cpuctx;
2640         struct perf_counter_context *ctx;
2641         unsigned int size;
2642         char *comm = comm_event->task->comm;
2643
2644         size = ALIGN(strlen(comm)+1, sizeof(u64));
2645
2646         comm_event->comm = comm;
2647         comm_event->comm_size = size;
2648
2649         comm_event->event.header.size = sizeof(comm_event->event) + size;
2650
2651         cpuctx = &get_cpu_var(perf_cpu_context);
2652         perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
2653         put_cpu_var(perf_cpu_context);
2654
2655         rcu_read_lock();
2656         /*
2657          * doesn't really matter which of the child contexts the
2658          * events ends up in.
2659          */
2660         ctx = rcu_dereference(current->perf_counter_ctxp);
2661         if (ctx)
2662                 perf_counter_comm_ctx(ctx, comm_event);
2663         rcu_read_unlock();
2664 }
2665
2666 void perf_counter_comm(struct task_struct *task)
2667 {
2668         struct perf_comm_event comm_event;
2669
2670         if (!atomic_read(&nr_comm_counters))
2671                 return;
2672
2673         comm_event = (struct perf_comm_event){
2674                 .task   = task,
2675                 .event  = {
2676                         .header = { .type = PERF_EVENT_COMM, },
2677                 },
2678         };
2679
2680         perf_counter_comm_event(&comm_event);
2681 }
2682
2683 /*
2684  * mmap tracking
2685  */
2686
2687 struct perf_mmap_event {
2688         struct file     *file;
2689         char            *file_name;
2690         int             file_size;
2691
2692         struct {
2693                 struct perf_event_header        header;
2694
2695                 u32                             pid;
2696                 u32                             tid;
2697                 u64                             start;
2698                 u64                             len;
2699                 u64                             pgoff;
2700         } event;
2701 };
2702
2703 static void perf_counter_mmap_output(struct perf_counter *counter,
2704                                      struct perf_mmap_event *mmap_event)
2705 {
2706         struct perf_output_handle handle;
2707         int size = mmap_event->event.header.size;
2708         int ret = perf_output_begin(&handle, counter, size, 0, 0);
2709
2710         if (ret)
2711                 return;
2712
2713         mmap_event->event.pid = perf_counter_pid(counter, current);
2714         mmap_event->event.tid = perf_counter_tid(counter, current);
2715
2716         perf_output_put(&handle, mmap_event->event);
2717         perf_output_copy(&handle, mmap_event->file_name,
2718                                    mmap_event->file_size);
2719         perf_output_end(&handle);
2720 }
2721
2722 static int perf_counter_mmap_match(struct perf_counter *counter,
2723                                    struct perf_mmap_event *mmap_event)
2724 {
2725         if (counter->attr.mmap &&
2726             mmap_event->event.header.type == PERF_EVENT_MMAP)
2727                 return 1;
2728
2729         if (counter->attr.munmap &&
2730             mmap_event->event.header.type == PERF_EVENT_MUNMAP)
2731                 return 1;
2732
2733         return 0;
2734 }
2735
2736 static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
2737                                   struct perf_mmap_event *mmap_event)
2738 {
2739         struct perf_counter *counter;
2740
2741         if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2742                 return;
2743
2744         rcu_read_lock();
2745         list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2746                 if (perf_counter_mmap_match(counter, mmap_event))
2747                         perf_counter_mmap_output(counter, mmap_event);
2748         }
2749         rcu_read_unlock();
2750 }
2751
2752 static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
2753 {
2754         struct perf_cpu_context *cpuctx;
2755         struct perf_counter_context *ctx;
2756         struct file *file = mmap_event->file;
2757         unsigned int size;
2758         char tmp[16];
2759         char *buf = NULL;
2760         char *name;
2761
2762         if (file) {
2763                 buf = kzalloc(PATH_MAX, GFP_KERNEL);
2764                 if (!buf) {
2765                         name = strncpy(tmp, "//enomem", sizeof(tmp));
2766                         goto got_name;
2767                 }
2768                 name = d_path(&file->f_path, buf, PATH_MAX);
2769                 if (IS_ERR(name)) {
2770                         name = strncpy(tmp, "//toolong", sizeof(tmp));
2771                         goto got_name;
2772                 }
2773         } else {
2774                 name = strncpy(tmp, "//anon", sizeof(tmp));
2775                 goto got_name;
2776         }
2777
2778 got_name:
2779         size = ALIGN(strlen(name)+1, sizeof(u64));
2780
2781         mmap_event->file_name = name;
2782         mmap_event->file_size = size;
2783
2784         mmap_event->event.header.size = sizeof(mmap_event->event) + size;
2785
2786         cpuctx = &get_cpu_var(perf_cpu_context);
2787         perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
2788         put_cpu_var(perf_cpu_context);
2789
2790         rcu_read_lock();
2791         /*
2792          * doesn't really matter which of the child contexts the
2793          * events ends up in.
2794          */
2795         ctx = rcu_dereference(current->perf_counter_ctxp);
2796         if (ctx)
2797                 perf_counter_mmap_ctx(ctx, mmap_event);
2798         rcu_read_unlock();
2799
2800         kfree(buf);
2801 }
2802
2803 void perf_counter_mmap(unsigned long addr, unsigned long len,
2804                        unsigned long pgoff, struct file *file)
2805 {
2806         struct perf_mmap_event mmap_event;
2807
2808         if (!atomic_read(&nr_mmap_counters))
2809                 return;
2810
2811         mmap_event = (struct perf_mmap_event){
2812                 .file   = file,
2813                 .event  = {
2814                         .header = { .type = PERF_EVENT_MMAP, },
2815                         .start  = addr,
2816                         .len    = len,
2817                         .pgoff  = pgoff,
2818                 },
2819         };
2820
2821         perf_counter_mmap_event(&mmap_event);
2822 }
2823
2824 void perf_counter_munmap(unsigned long addr, unsigned long len,
2825                          unsigned long pgoff, struct file *file)
2826 {
2827         struct perf_mmap_event mmap_event;
2828
2829         if (!atomic_read(&nr_munmap_counters))
2830                 return;
2831
2832         mmap_event = (struct perf_mmap_event){
2833                 .file   = file,
2834                 .event  = {
2835                         .header = { .type = PERF_EVENT_MUNMAP, },
2836                         .start  = addr,
2837                         .len    = len,
2838                         .pgoff  = pgoff,
2839                 },
2840         };
2841
2842         perf_counter_mmap_event(&mmap_event);
2843 }
2844
2845 /*
2846  * Log sample_period changes so that analyzing tools can re-normalize the
2847  * event flow.
2848  */
2849
2850 static void perf_log_period(struct perf_counter *counter, u64 period)
2851 {
2852         struct perf_output_handle handle;
2853         int ret;
2854
2855         struct {
2856                 struct perf_event_header        header;
2857                 u64                             time;
2858                 u64                             period;
2859         } freq_event = {
2860                 .header = {
2861                         .type = PERF_EVENT_PERIOD,
2862                         .misc = 0,
2863                         .size = sizeof(freq_event),
2864                 },
2865                 .time = sched_clock(),
2866                 .period = period,
2867         };
2868
2869         if (counter->hw.sample_period == period)
2870                 return;
2871
2872         ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0);
2873         if (ret)
2874                 return;
2875
2876         perf_output_put(&handle, freq_event);
2877         perf_output_end(&handle);
2878 }
2879
2880 /*
2881  * IRQ throttle logging
2882  */
2883
2884 static void perf_log_throttle(struct perf_counter *counter, int enable)
2885 {
2886         struct perf_output_handle handle;
2887         int ret;
2888
2889         struct {
2890                 struct perf_event_header        header;
2891                 u64                             time;
2892         } throttle_event = {
2893                 .header = {
2894                         .type = PERF_EVENT_THROTTLE + 1,
2895                         .misc = 0,
2896                         .size = sizeof(throttle_event),
2897                 },
2898                 .time = sched_clock(),
2899         };
2900
2901         ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
2902         if (ret)
2903                 return;
2904
2905         perf_output_put(&handle, throttle_event);
2906         perf_output_end(&handle);
2907 }
2908
2909 /*
2910  * Generic counter overflow handling.
2911  */
2912
2913 int perf_counter_overflow(struct perf_counter *counter,
2914                           int nmi, struct pt_regs *regs, u64 addr)
2915 {
2916         int events = atomic_read(&counter->event_limit);
2917         int throttle = counter->pmu->unthrottle != NULL;
2918         int ret = 0;
2919
2920         if (!throttle) {
2921                 counter->hw.interrupts++;
2922         } else {
2923                 if (counter->hw.interrupts != MAX_INTERRUPTS) {
2924                         counter->hw.interrupts++;
2925                         if (HZ*counter->hw.interrupts > (u64)sysctl_perf_counter_limit) {
2926                                 counter->hw.interrupts = MAX_INTERRUPTS;
2927                                 perf_log_throttle(counter, 0);
2928                                 ret = 1;
2929                         }
2930                 } else {
2931                         /*
2932                          * Keep re-disabling counters even though on the previous
2933                          * pass we disabled it - just in case we raced with a
2934                          * sched-in and the counter got enabled again:
2935                          */
2936                         ret = 1;
2937                 }
2938         }
2939
2940         /*
2941          * XXX event_limit might not quite work as expected on inherited
2942          * counters
2943          */
2944
2945         counter->pending_kill = POLL_IN;
2946         if (events && atomic_dec_and_test(&counter->event_limit)) {
2947                 ret = 1;
2948                 counter->pending_kill = POLL_HUP;
2949                 if (nmi) {
2950                         counter->pending_disable = 1;
2951                         perf_pending_queue(&counter->pending,
2952                                            perf_pending_counter);
2953                 } else
2954                         perf_counter_disable(counter);
2955         }
2956
2957         perf_counter_output(counter, nmi, regs, addr);
2958         return ret;
2959 }
2960
2961 /*
2962  * Generic software counter infrastructure
2963  */
2964
2965 static void perf_swcounter_update(struct perf_counter *counter)
2966 {
2967         struct hw_perf_counter *hwc = &counter->hw;
2968         u64 prev, now;
2969         s64 delta;
2970
2971 again:
2972         prev = atomic64_read(&hwc->prev_count);
2973         now = atomic64_read(&hwc->count);
2974         if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
2975                 goto again;
2976
2977         delta = now - prev;
2978
2979         atomic64_add(delta, &counter->count);
2980         atomic64_sub(delta, &hwc->period_left);
2981 }
2982
2983 static void perf_swcounter_set_period(struct perf_counter *counter)
2984 {
2985         struct hw_perf_counter *hwc = &counter->hw;
2986         s64 left = atomic64_read(&hwc->period_left);
2987         s64 period = hwc->sample_period;
2988
2989         if (unlikely(left <= -period)) {
2990                 left = period;
2991                 atomic64_set(&hwc->period_left, left);
2992         }
2993
2994         if (unlikely(left <= 0)) {
2995                 left += period;
2996                 atomic64_add(period, &hwc->period_left);
2997         }
2998
2999         atomic64_set(&hwc->prev_count, -left);
3000         atomic64_set(&hwc->count, -left);
3001 }
3002
3003 static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
3004 {
3005         enum hrtimer_restart ret = HRTIMER_RESTART;
3006         struct perf_counter *counter;
3007         struct pt_regs *regs;
3008         u64 period;
3009
3010         counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3011         counter->pmu->read(counter);
3012
3013         regs = get_irq_regs();
3014         /*
3015          * In case we exclude kernel IPs or are somehow not in interrupt
3016          * context, provide the next best thing, the user IP.
3017          */
3018         if ((counter->attr.exclude_kernel || !regs) &&
3019                         !counter->attr.exclude_user)
3020                 regs = task_pt_regs(current);
3021
3022         if (regs) {
3023                 if (perf_counter_overflow(counter, 0, regs, 0))
3024                         ret = HRTIMER_NORESTART;
3025         }
3026
3027         period = max_t(u64, 10000, counter->hw.sample_period);
3028         hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3029
3030         return ret;
3031 }
3032
3033 static void perf_swcounter_overflow(struct perf_counter *counter,
3034                                     int nmi, struct pt_regs *regs, u64 addr)
3035 {
3036         perf_swcounter_update(counter);
3037         perf_swcounter_set_period(counter);
3038         if (perf_counter_overflow(counter, nmi, regs, addr))
3039                 /* soft-disable the counter */
3040                 ;
3041
3042 }
3043
3044 static int perf_swcounter_is_counting(struct perf_counter *counter)
3045 {
3046         struct perf_counter_context *ctx;
3047         unsigned long flags;
3048         int count;
3049
3050         if (counter->state == PERF_COUNTER_STATE_ACTIVE)
3051                 return 1;
3052
3053         if (counter->state != PERF_COUNTER_STATE_INACTIVE)
3054                 return 0;
3055
3056         /*
3057          * If the counter is inactive, it could be just because
3058          * its task is scheduled out, or because it's in a group
3059          * which could not go on the PMU.  We want to count in
3060          * the first case but not the second.  If the context is
3061          * currently active then an inactive software counter must
3062          * be the second case.  If it's not currently active then
3063          * we need to know whether the counter was active when the
3064          * context was last active, which we can determine by
3065          * comparing counter->tstamp_stopped with ctx->time.
3066          *
3067          * We are within an RCU read-side critical section,
3068          * which protects the existence of *ctx.
3069          */
3070         ctx = counter->ctx;
3071         spin_lock_irqsave(&ctx->lock, flags);
3072         count = 1;
3073         /* Re-check state now we have the lock */
3074         if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
3075             counter->ctx->is_active ||
3076             counter->tstamp_stopped < ctx->time)
3077                 count = 0;
3078         spin_unlock_irqrestore(&ctx->lock, flags);
3079         return count;
3080 }
3081
3082 static int perf_swcounter_match(struct perf_counter *counter,
3083                                 enum perf_event_types type,
3084                                 u32 event, struct pt_regs *regs)
3085 {
3086         u64 event_config;
3087
3088         event_config = ((u64) type << PERF_COUNTER_TYPE_SHIFT) | event;
3089
3090         if (!perf_swcounter_is_counting(counter))
3091                 return 0;
3092
3093         if (counter->attr.config != event_config)
3094                 return 0;
3095
3096         if (regs) {
3097                 if (counter->attr.exclude_user && user_mode(regs))
3098                         return 0;
3099
3100                 if (counter->attr.exclude_kernel && !user_mode(regs))
3101                         return 0;
3102         }
3103
3104         return 1;
3105 }
3106
3107 static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3108                                int nmi, struct pt_regs *regs, u64 addr)
3109 {
3110         int neg = atomic64_add_negative(nr, &counter->hw.count);
3111
3112         if (counter->hw.sample_period && !neg && regs)
3113                 perf_swcounter_overflow(counter, nmi, regs, addr);
3114 }
3115
3116 static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
3117                                      enum perf_event_types type, u32 event,
3118                                      u64 nr, int nmi, struct pt_regs *regs,
3119                                      u64 addr)
3120 {
3121         struct perf_counter *counter;
3122
3123         if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3124                 return;
3125
3126         rcu_read_lock();
3127         list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
3128                 if (perf_swcounter_match(counter, type, event, regs))
3129                         perf_swcounter_add(counter, nr, nmi, regs, addr);
3130         }
3131         rcu_read_unlock();
3132 }
3133
3134 static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
3135 {
3136         if (in_nmi())
3137                 return &cpuctx->recursion[3];
3138
3139         if (in_irq())
3140                 return &cpuctx->recursion[2];
3141
3142         if (in_softirq())
3143                 return &cpuctx->recursion[1];
3144
3145         return &cpuctx->recursion[0];
3146 }
3147
3148 static void __perf_swcounter_event(enum perf_event_types type, u32 event,
3149                                    u64 nr, int nmi, struct pt_regs *regs,
3150                                    u64 addr)
3151 {
3152         struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
3153         int *recursion = perf_swcounter_recursion_context(cpuctx);
3154         struct perf_counter_context *ctx;
3155
3156         if (*recursion)
3157                 goto out;
3158
3159         (*recursion)++;
3160         barrier();
3161
3162         perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
3163                                  nr, nmi, regs, addr);
3164         rcu_read_lock();
3165         /*
3166          * doesn't really matter which of the child contexts the
3167          * events ends up in.
3168          */
3169         ctx = rcu_dereference(current->perf_counter_ctxp);
3170         if (ctx)
3171                 perf_swcounter_ctx_event(ctx, type, event, nr, nmi, regs, addr);
3172         rcu_read_unlock();
3173
3174         barrier();
3175         (*recursion)--;
3176
3177 out:
3178         put_cpu_var(perf_cpu_context);
3179 }
3180
3181 void
3182 perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
3183 {
3184         __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr);
3185 }
3186
3187 static void perf_swcounter_read(struct perf_counter *counter)
3188 {
3189         perf_swcounter_update(counter);
3190 }
3191
3192 static int perf_swcounter_enable(struct perf_counter *counter)
3193 {
3194         perf_swcounter_set_period(counter);
3195         return 0;
3196 }
3197
3198 static void perf_swcounter_disable(struct perf_counter *counter)
3199 {
3200         perf_swcounter_update(counter);
3201 }
3202
3203 static const struct pmu perf_ops_generic = {
3204         .enable         = perf_swcounter_enable,
3205         .disable        = perf_swcounter_disable,
3206         .read           = perf_swcounter_read,
3207 };
3208
3209 /*
3210  * Software counter: cpu wall time clock
3211  */
3212
3213 static void cpu_clock_perf_counter_update(struct perf_counter *counter)
3214 {
3215         int cpu = raw_smp_processor_id();
3216         s64 prev;
3217         u64 now;
3218
3219         now = cpu_clock(cpu);
3220         prev = atomic64_read(&counter->hw.prev_count);
3221         atomic64_set(&counter->hw.prev_count, now);
3222         atomic64_add(now - prev, &counter->count);
3223 }
3224
3225 static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
3226 {
3227         struct hw_perf_counter *hwc = &counter->hw;
3228         int cpu = raw_smp_processor_id();
3229
3230         atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3231         hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3232         hwc->hrtimer.function = perf_swcounter_hrtimer;
3233         if (hwc->sample_period) {
3234                 u64 period = max_t(u64, 10000, hwc->sample_period);
3235                 __hrtimer_start_range_ns(&hwc->hrtimer,
3236                                 ns_to_ktime(period), 0,
3237                                 HRTIMER_MODE_REL, 0);
3238         }
3239
3240         return 0;
3241 }
3242
3243 static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
3244 {
3245         if (counter->hw.sample_period)
3246                 hrtimer_cancel(&counter->hw.hrtimer);
3247         cpu_clock_perf_counter_update(counter);
3248 }
3249
3250 static void cpu_clock_perf_counter_read(struct perf_counter *counter)
3251 {
3252         cpu_clock_perf_counter_update(counter);
3253 }
3254
3255 static const struct pmu perf_ops_cpu_clock = {
3256         .enable         = cpu_clock_perf_counter_enable,
3257         .disable        = cpu_clock_perf_counter_disable,
3258         .read           = cpu_clock_perf_counter_read,
3259 };
3260
3261 /*
3262  * Software counter: task time clock
3263  */
3264
3265 static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
3266 {
3267         u64 prev;
3268         s64 delta;
3269
3270         prev = atomic64_xchg(&counter->hw.prev_count, now);
3271         delta = now - prev;
3272         atomic64_add(delta, &counter->count);
3273 }
3274
3275 static int task_clock_perf_counter_enable(struct perf_counter *counter)
3276 {
3277         struct hw_perf_counter *hwc = &counter->hw;
3278         u64 now;
3279
3280         now = counter->ctx->time;
3281
3282         atomic64_set(&hwc->prev_count, now);
3283         hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3284         hwc->hrtimer.function = perf_swcounter_hrtimer;
3285         if (hwc->sample_period) {
3286                 u64 period = max_t(u64, 10000, hwc->sample_period);
3287                 __hrtimer_start_range_ns(&hwc->hrtimer,
3288                                 ns_to_ktime(period), 0,
3289                                 HRTIMER_MODE_REL, 0);
3290         }
3291
3292         return 0;
3293 }
3294
3295 static void task_clock_perf_counter_disable(struct perf_counter *counter)
3296 {
3297         if (counter->hw.sample_period)
3298                 hrtimer_cancel(&counter->hw.hrtimer);
3299         task_clock_perf_counter_update(counter, counter->ctx->time);
3300
3301 }
3302
3303 static void task_clock_perf_counter_read(struct perf_counter *counter)
3304 {
3305         u64 time;
3306
3307         if (!in_nmi()) {
3308                 update_context_time(counter->ctx);
3309                 time = counter->ctx->time;
3310         } else {
3311                 u64 now = perf_clock();
3312                 u64 delta = now - counter->ctx->timestamp;
3313                 time = counter->ctx->time + delta;
3314         }
3315
3316         task_clock_perf_counter_update(counter, time);
3317 }
3318
3319 static const struct pmu perf_ops_task_clock = {
3320         .enable         = task_clock_perf_counter_enable,
3321         .disable        = task_clock_perf_counter_disable,
3322         .read           = task_clock_perf_counter_read,
3323 };
3324
3325 /*
3326  * Software counter: cpu migrations
3327  */
3328 void perf_counter_task_migration(struct task_struct *task, int cpu)
3329 {
3330         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
3331         struct perf_counter_context *ctx;
3332
3333         perf_swcounter_ctx_event(&cpuctx->ctx, PERF_TYPE_SOFTWARE,
3334                                  PERF_COUNT_CPU_MIGRATIONS,
3335                                  1, 1, NULL, 0);
3336
3337         ctx = perf_pin_task_context(task);
3338         if (ctx) {
3339                 perf_swcounter_ctx_event(ctx, PERF_TYPE_SOFTWARE,
3340                                          PERF_COUNT_CPU_MIGRATIONS,
3341                                          1, 1, NULL, 0);
3342                 perf_unpin_context(ctx);
3343         }
3344 }
3345
3346 #ifdef CONFIG_EVENT_PROFILE
3347 void perf_tpcounter_event(int event_id)
3348 {
3349         struct pt_regs *regs = get_irq_regs();
3350
3351         if (!regs)
3352                 regs = task_pt_regs(current);
3353
3354         __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0);
3355 }
3356 EXPORT_SYMBOL_GPL(perf_tpcounter_event);
3357
3358 extern int ftrace_profile_enable(int);
3359 extern void ftrace_profile_disable(int);
3360
3361 static void tp_perf_counter_destroy(struct perf_counter *counter)
3362 {
3363         ftrace_profile_disable(perf_event_id(&counter->attr));
3364 }
3365
3366 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3367 {
3368         int event_id = perf_event_id(&counter->attr);
3369         int ret;
3370
3371         ret = ftrace_profile_enable(event_id);
3372         if (ret)
3373                 return NULL;
3374
3375         counter->destroy = tp_perf_counter_destroy;
3376         counter->hw.sample_period = counter->attr.sample_period;
3377
3378         return &perf_ops_generic;
3379 }
3380 #else
3381 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3382 {
3383         return NULL;
3384 }
3385 #endif
3386
3387 static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
3388 {
3389         const struct pmu *pmu = NULL;
3390
3391         /*
3392          * Software counters (currently) can't in general distinguish
3393          * between user, kernel and hypervisor events.
3394          * However, context switches and cpu migrations are considered
3395          * to be kernel events, and page faults are never hypervisor
3396          * events.
3397          */
3398         switch (perf_event_id(&counter->attr)) {
3399         case PERF_COUNT_CPU_CLOCK:
3400                 pmu = &perf_ops_cpu_clock;
3401
3402                 break;
3403         case PERF_COUNT_TASK_CLOCK:
3404                 /*
3405                  * If the user instantiates this as a per-cpu counter,
3406                  * use the cpu_clock counter instead.
3407                  */
3408                 if (counter->ctx->task)
3409                         pmu = &perf_ops_task_clock;
3410                 else
3411                         pmu = &perf_ops_cpu_clock;
3412
3413                 break;
3414         case PERF_COUNT_PAGE_FAULTS:
3415         case PERF_COUNT_PAGE_FAULTS_MIN:
3416         case PERF_COUNT_PAGE_FAULTS_MAJ:
3417         case PERF_COUNT_CONTEXT_SWITCHES:
3418         case PERF_COUNT_CPU_MIGRATIONS:
3419                 pmu = &perf_ops_generic;
3420                 break;
3421         }
3422
3423         return pmu;
3424 }
3425
3426 /*
3427  * Allocate and initialize a counter structure
3428  */
3429 static struct perf_counter *
3430 perf_counter_alloc(struct perf_counter_attr *attr,
3431                    int cpu,
3432                    struct perf_counter_context *ctx,
3433                    struct perf_counter *group_leader,
3434                    gfp_t gfpflags)
3435 {
3436         const struct pmu *pmu;
3437         struct perf_counter *counter;
3438         struct hw_perf_counter *hwc;
3439         long err;
3440
3441         counter = kzalloc(sizeof(*counter), gfpflags);
3442         if (!counter)
3443                 return ERR_PTR(-ENOMEM);
3444
3445         /*
3446          * Single counters are their own group leaders, with an
3447          * empty sibling list:
3448          */
3449         if (!group_leader)
3450                 group_leader = counter;
3451
3452         mutex_init(&counter->child_mutex);
3453         INIT_LIST_HEAD(&counter->child_list);
3454
3455         INIT_LIST_HEAD(&counter->list_entry);
3456         INIT_LIST_HEAD(&counter->event_entry);
3457         INIT_LIST_HEAD(&counter->sibling_list);
3458         init_waitqueue_head(&counter->waitq);
3459
3460         mutex_init(&counter->mmap_mutex);
3461
3462         counter->cpu            = cpu;
3463         counter->attr           = *attr;
3464         counter->group_leader   = group_leader;
3465         counter->pmu            = NULL;
3466         counter->ctx            = ctx;
3467         counter->oncpu          = -1;
3468
3469         counter->ns             = get_pid_ns(current->nsproxy->pid_ns);
3470         counter->id             = atomic64_inc_return(&perf_counter_id);
3471
3472         counter->state          = PERF_COUNTER_STATE_INACTIVE;
3473
3474         if (attr->disabled)
3475                 counter->state = PERF_COUNTER_STATE_OFF;
3476
3477         pmu = NULL;
3478
3479         hwc = &counter->hw;
3480         if (attr->freq && attr->sample_freq)
3481                 hwc->sample_period = div64_u64(TICK_NSEC, attr->sample_freq);
3482         else
3483                 hwc->sample_period = attr->sample_period;
3484
3485         /*
3486          * we currently do not support PERF_SAMPLE_GROUP on inherited counters
3487          */
3488         if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
3489                 goto done;
3490
3491         if (perf_event_raw(attr)) {
3492                 pmu = hw_perf_counter_init(counter);
3493                 goto done;
3494         }
3495
3496         switch (perf_event_type(attr)) {
3497         case PERF_TYPE_HARDWARE:
3498                 pmu = hw_perf_counter_init(counter);
3499                 break;
3500
3501         case PERF_TYPE_SOFTWARE:
3502                 pmu = sw_perf_counter_init(counter);
3503                 break;
3504
3505         case PERF_TYPE_TRACEPOINT:
3506                 pmu = tp_perf_counter_init(counter);
3507                 break;
3508         }
3509 done:
3510         err = 0;
3511         if (!pmu)
3512                 err = -EINVAL;
3513         else if (IS_ERR(pmu))
3514                 err = PTR_ERR(pmu);
3515
3516         if (err) {
3517                 if (counter->ns)
3518                         put_pid_ns(counter->ns);
3519                 kfree(counter);
3520                 return ERR_PTR(err);
3521         }
3522
3523         counter->pmu = pmu;
3524
3525         atomic_inc(&nr_counters);
3526         if (counter->attr.mmap)
3527                 atomic_inc(&nr_mmap_counters);
3528         if (counter->attr.munmap)
3529                 atomic_inc(&nr_munmap_counters);
3530         if (counter->attr.comm)
3531                 atomic_inc(&nr_comm_counters);
3532
3533         return counter;
3534 }
3535
3536 /**
3537  * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
3538  *
3539  * @attr_uptr:  event type attributes for monitoring/sampling
3540  * @pid:                target pid
3541  * @cpu:                target cpu
3542  * @group_fd:           group leader counter fd
3543  */
3544 SYSCALL_DEFINE5(perf_counter_open,
3545                 const struct perf_counter_attr __user *, attr_uptr,
3546                 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
3547 {
3548         struct perf_counter *counter, *group_leader;
3549         struct perf_counter_attr attr;
3550         struct perf_counter_context *ctx;
3551         struct file *counter_file = NULL;
3552         struct file *group_file = NULL;
3553         int fput_needed = 0;
3554         int fput_needed2 = 0;
3555         int ret;
3556
3557         /* for future expandability... */
3558         if (flags)
3559                 return -EINVAL;
3560
3561         if (copy_from_user(&attr, attr_uptr, sizeof(attr)) != 0)
3562                 return -EFAULT;
3563
3564         /*
3565          * Get the target context (task or percpu):
3566          */
3567         ctx = find_get_context(pid, cpu);
3568         if (IS_ERR(ctx))
3569                 return PTR_ERR(ctx);
3570
3571         /*
3572          * Look up the group leader (we will attach this counter to it):
3573          */
3574         group_leader = NULL;
3575         if (group_fd != -1) {
3576                 ret = -EINVAL;
3577                 group_file = fget_light(group_fd, &fput_needed);
3578                 if (!group_file)
3579                         goto err_put_context;
3580                 if (group_file->f_op != &perf_fops)
3581                         goto err_put_context;
3582
3583                 group_leader = group_file->private_data;
3584                 /*
3585                  * Do not allow a recursive hierarchy (this new sibling
3586                  * becoming part of another group-sibling):
3587                  */
3588                 if (group_leader->group_leader != group_leader)
3589                         goto err_put_context;
3590                 /*
3591                  * Do not allow to attach to a group in a different
3592                  * task or CPU context:
3593                  */
3594                 if (group_leader->ctx != ctx)
3595                         goto err_put_context;
3596                 /*
3597                  * Only a group leader can be exclusive or pinned
3598                  */
3599                 if (attr.exclusive || attr.pinned)
3600                         goto err_put_context;
3601         }
3602
3603         counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
3604                                      GFP_KERNEL);
3605         ret = PTR_ERR(counter);
3606         if (IS_ERR(counter))
3607                 goto err_put_context;
3608
3609         ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
3610         if (ret < 0)
3611                 goto err_free_put_context;
3612
3613         counter_file = fget_light(ret, &fput_needed2);
3614         if (!counter_file)
3615                 goto err_free_put_context;
3616
3617         counter->filp = counter_file;
3618         WARN_ON_ONCE(ctx->parent_ctx);
3619         mutex_lock(&ctx->mutex);
3620         perf_install_in_context(ctx, counter, cpu);
3621         ++ctx->generation;
3622         mutex_unlock(&ctx->mutex);
3623
3624         counter->owner = current;
3625         get_task_struct(current);
3626         mutex_lock(&current->perf_counter_mutex);
3627         list_add_tail(&counter->owner_entry, &current->perf_counter_list);
3628         mutex_unlock(&current->perf_counter_mutex);
3629
3630         fput_light(counter_file, fput_needed2);
3631
3632 out_fput:
3633         fput_light(group_file, fput_needed);
3634
3635         return ret;
3636
3637 err_free_put_context:
3638         kfree(counter);
3639
3640 err_put_context:
3641         put_ctx(ctx);
3642
3643         goto out_fput;
3644 }
3645
3646 /*
3647  * inherit a counter from parent task to child task:
3648  */
3649 static struct perf_counter *
3650 inherit_counter(struct perf_counter *parent_counter,
3651               struct task_struct *parent,
3652               struct perf_counter_context *parent_ctx,
3653               struct task_struct *child,
3654               struct perf_counter *group_leader,
3655               struct perf_counter_context *child_ctx)
3656 {
3657         struct perf_counter *child_counter;
3658
3659         /*
3660          * Instead of creating recursive hierarchies of counters,
3661          * we link inherited counters back to the original parent,
3662          * which has a filp for sure, which we use as the reference
3663          * count:
3664          */
3665         if (parent_counter->parent)
3666                 parent_counter = parent_counter->parent;
3667
3668         child_counter = perf_counter_alloc(&parent_counter->attr,
3669                                            parent_counter->cpu, child_ctx,
3670                                            group_leader, GFP_KERNEL);
3671         if (IS_ERR(child_counter))
3672                 return child_counter;
3673         get_ctx(child_ctx);
3674
3675         /*
3676          * Make the child state follow the state of the parent counter,
3677          * not its attr.disabled bit.  We hold the parent's mutex,
3678          * so we won't race with perf_counter_{en, dis}able_family.
3679          */
3680         if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
3681                 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
3682         else
3683                 child_counter->state = PERF_COUNTER_STATE_OFF;
3684
3685         /*
3686          * Link it up in the child's context:
3687          */
3688         add_counter_to_ctx(child_counter, child_ctx);
3689
3690         child_counter->parent = parent_counter;
3691         /*
3692          * inherit into child's child as well:
3693          */
3694         child_counter->attr.inherit = 1;
3695
3696         /*
3697          * Get a reference to the parent filp - we will fput it
3698          * when the child counter exits. This is safe to do because
3699          * we are in the parent and we know that the filp still
3700          * exists and has a nonzero count:
3701          */
3702         atomic_long_inc(&parent_counter->filp->f_count);
3703
3704         /*
3705          * Link this into the parent counter's child list
3706          */
3707         WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
3708         mutex_lock(&parent_counter->child_mutex);
3709         list_add_tail(&child_counter->child_list, &parent_counter->child_list);
3710         mutex_unlock(&parent_counter->child_mutex);
3711
3712         return child_counter;
3713 }
3714
3715 static int inherit_group(struct perf_counter *parent_counter,
3716               struct task_struct *parent,
3717               struct perf_counter_context *parent_ctx,
3718               struct task_struct *child,
3719               struct perf_counter_context *child_ctx)
3720 {
3721         struct perf_counter *leader;
3722         struct perf_counter *sub;
3723         struct perf_counter *child_ctr;
3724
3725         leader = inherit_counter(parent_counter, parent, parent_ctx,
3726                                  child, NULL, child_ctx);
3727         if (IS_ERR(leader))
3728                 return PTR_ERR(leader);
3729         list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
3730                 child_ctr = inherit_counter(sub, parent, parent_ctx,
3731                                             child, leader, child_ctx);
3732                 if (IS_ERR(child_ctr))
3733                         return PTR_ERR(child_ctr);
3734         }
3735         return 0;
3736 }
3737
3738 static void sync_child_counter(struct perf_counter *child_counter,
3739                                struct perf_counter *parent_counter)
3740 {
3741         u64 child_val;
3742
3743         child_val = atomic64_read(&child_counter->count);
3744
3745         /*
3746          * Add back the child's count to the parent's count:
3747          */
3748         atomic64_add(child_val, &parent_counter->count);
3749         atomic64_add(child_counter->total_time_enabled,
3750                      &parent_counter->child_total_time_enabled);
3751         atomic64_add(child_counter->total_time_running,
3752                      &parent_counter->child_total_time_running);
3753
3754         /*
3755          * Remove this counter from the parent's list
3756          */
3757         WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
3758         mutex_lock(&parent_counter->child_mutex);
3759         list_del_init(&child_counter->child_list);
3760         mutex_unlock(&parent_counter->child_mutex);
3761
3762         /*
3763          * Release the parent counter, if this was the last
3764          * reference to it.
3765          */
3766         fput(parent_counter->filp);
3767 }
3768
3769 static void
3770 __perf_counter_exit_task(struct perf_counter *child_counter,
3771                          struct perf_counter_context *child_ctx)
3772 {
3773         struct perf_counter *parent_counter;
3774
3775         update_counter_times(child_counter);
3776         perf_counter_remove_from_context(child_counter);
3777
3778         parent_counter = child_counter->parent;
3779         /*
3780          * It can happen that parent exits first, and has counters
3781          * that are still around due to the child reference. These
3782          * counters need to be zapped - but otherwise linger.
3783          */
3784         if (parent_counter) {
3785                 sync_child_counter(child_counter, parent_counter);
3786                 free_counter(child_counter);
3787         }
3788 }
3789
3790 /*
3791  * When a child task exits, feed back counter values to parent counters.
3792  */
3793 void perf_counter_exit_task(struct task_struct *child)
3794 {
3795         struct perf_counter *child_counter, *tmp;
3796         struct perf_counter_context *child_ctx;
3797         unsigned long flags;
3798
3799         if (likely(!child->perf_counter_ctxp))
3800                 return;
3801
3802         local_irq_save(flags);
3803         /*
3804          * We can't reschedule here because interrupts are disabled,
3805          * and either child is current or it is a task that can't be
3806          * scheduled, so we are now safe from rescheduling changing
3807          * our context.
3808          */
3809         child_ctx = child->perf_counter_ctxp;
3810         __perf_counter_task_sched_out(child_ctx);
3811
3812         /*
3813          * Take the context lock here so that if find_get_context is
3814          * reading child->perf_counter_ctxp, we wait until it has
3815          * incremented the context's refcount before we do put_ctx below.
3816          */
3817         spin_lock(&child_ctx->lock);
3818         child->perf_counter_ctxp = NULL;
3819         if (child_ctx->parent_ctx) {
3820                 /*
3821                  * This context is a clone; unclone it so it can't get
3822                  * swapped to another process while we're removing all
3823                  * the counters from it.
3824                  */
3825                 put_ctx(child_ctx->parent_ctx);
3826                 child_ctx->parent_ctx = NULL;
3827         }
3828         spin_unlock(&child_ctx->lock);
3829         local_irq_restore(flags);
3830
3831         mutex_lock(&child_ctx->mutex);
3832
3833 again:
3834         list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
3835                                  list_entry)
3836                 __perf_counter_exit_task(child_counter, child_ctx);
3837
3838         /*
3839          * If the last counter was a group counter, it will have appended all
3840          * its siblings to the list, but we obtained 'tmp' before that which
3841          * will still point to the list head terminating the iteration.
3842          */
3843         if (!list_empty(&child_ctx->counter_list))
3844                 goto again;
3845
3846         mutex_unlock(&child_ctx->mutex);
3847
3848         put_ctx(child_ctx);
3849 }
3850
3851 /*
3852  * free an unexposed, unused context as created by inheritance by
3853  * init_task below, used by fork() in case of fail.
3854  */
3855 void perf_counter_free_task(struct task_struct *task)
3856 {
3857         struct perf_counter_context *ctx = task->perf_counter_ctxp;
3858         struct perf_counter *counter, *tmp;
3859
3860         if (!ctx)
3861                 return;
3862
3863         mutex_lock(&ctx->mutex);
3864 again:
3865         list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
3866                 struct perf_counter *parent = counter->parent;
3867
3868                 if (WARN_ON_ONCE(!parent))
3869                         continue;
3870
3871                 mutex_lock(&parent->child_mutex);
3872                 list_del_init(&counter->child_list);
3873                 mutex_unlock(&parent->child_mutex);
3874
3875                 fput(parent->filp);
3876
3877                 list_del_counter(counter, ctx);
3878                 free_counter(counter);
3879         }
3880
3881         if (!list_empty(&ctx->counter_list))
3882                 goto again;
3883
3884         mutex_unlock(&ctx->mutex);
3885
3886         put_ctx(ctx);
3887 }
3888
3889 /*
3890  * Initialize the perf_counter context in task_struct
3891  */
3892 int perf_counter_init_task(struct task_struct *child)
3893 {
3894         struct perf_counter_context *child_ctx, *parent_ctx;
3895         struct perf_counter_context *cloned_ctx;
3896         struct perf_counter *counter;
3897         struct task_struct *parent = current;
3898         int inherited_all = 1;
3899         int ret = 0;
3900
3901         child->perf_counter_ctxp = NULL;
3902
3903         mutex_init(&child->perf_counter_mutex);
3904         INIT_LIST_HEAD(&child->perf_counter_list);
3905
3906         if (likely(!parent->perf_counter_ctxp))
3907                 return 0;
3908
3909         /*
3910          * This is executed from the parent task context, so inherit
3911          * counters that have been marked for cloning.
3912          * First allocate and initialize a context for the child.
3913          */
3914
3915         child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
3916         if (!child_ctx)
3917                 return -ENOMEM;
3918
3919         __perf_counter_init_context(child_ctx, child);
3920         child->perf_counter_ctxp = child_ctx;
3921         get_task_struct(child);
3922
3923         /*
3924          * If the parent's context is a clone, pin it so it won't get
3925          * swapped under us.
3926          */
3927         parent_ctx = perf_pin_task_context(parent);
3928
3929         /*
3930          * No need to check if parent_ctx != NULL here; since we saw
3931          * it non-NULL earlier, the only reason for it to become NULL
3932          * is if we exit, and since we're currently in the middle of
3933          * a fork we can't be exiting at the same time.
3934          */
3935
3936         /*
3937          * Lock the parent list. No need to lock the child - not PID
3938          * hashed yet and not running, so nobody can access it.
3939          */
3940         mutex_lock(&parent_ctx->mutex);
3941
3942         /*
3943          * We dont have to disable NMIs - we are only looking at
3944          * the list, not manipulating it:
3945          */
3946         list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
3947                 if (counter != counter->group_leader)
3948                         continue;
3949
3950                 if (!counter->attr.inherit) {
3951                         inherited_all = 0;
3952                         continue;
3953                 }
3954
3955                 ret = inherit_group(counter, parent, parent_ctx,
3956                                              child, child_ctx);
3957                 if (ret) {
3958                         inherited_all = 0;
3959                         break;
3960                 }
3961         }
3962
3963         if (inherited_all) {
3964                 /*
3965                  * Mark the child context as a clone of the parent
3966                  * context, or of whatever the parent is a clone of.
3967                  * Note that if the parent is a clone, it could get
3968                  * uncloned at any point, but that doesn't matter
3969                  * because the list of counters and the generation
3970                  * count can't have changed since we took the mutex.
3971                  */
3972                 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
3973                 if (cloned_ctx) {
3974                         child_ctx->parent_ctx = cloned_ctx;
3975                         child_ctx->parent_gen = parent_ctx->parent_gen;
3976                 } else {
3977                         child_ctx->parent_ctx = parent_ctx;
3978                         child_ctx->parent_gen = parent_ctx->generation;
3979                 }
3980                 get_ctx(child_ctx->parent_ctx);
3981         }
3982
3983         mutex_unlock(&parent_ctx->mutex);
3984
3985         perf_unpin_context(parent_ctx);
3986
3987         return ret;
3988 }
3989
3990 static void __cpuinit perf_counter_init_cpu(int cpu)
3991 {
3992         struct perf_cpu_context *cpuctx;
3993
3994         cpuctx = &per_cpu(perf_cpu_context, cpu);
3995         __perf_counter_init_context(&cpuctx->ctx, NULL);
3996
3997         spin_lock(&perf_resource_lock);
3998         cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
3999         spin_unlock(&perf_resource_lock);
4000
4001         hw_perf_counter_setup(cpu);
4002 }
4003
4004 #ifdef CONFIG_HOTPLUG_CPU
4005 static void __perf_counter_exit_cpu(void *info)
4006 {
4007         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4008         struct perf_counter_context *ctx = &cpuctx->ctx;
4009         struct perf_counter *counter, *tmp;
4010
4011         list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
4012                 __perf_counter_remove_from_context(counter);
4013 }
4014 static void perf_counter_exit_cpu(int cpu)
4015 {
4016         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4017         struct perf_counter_context *ctx = &cpuctx->ctx;
4018
4019         mutex_lock(&ctx->mutex);
4020         smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
4021         mutex_unlock(&ctx->mutex);
4022 }
4023 #else
4024 static inline void perf_counter_exit_cpu(int cpu) { }
4025 #endif
4026
4027 static int __cpuinit
4028 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
4029 {
4030         unsigned int cpu = (long)hcpu;
4031
4032         switch (action) {
4033
4034         case CPU_UP_PREPARE:
4035         case CPU_UP_PREPARE_FROZEN:
4036                 perf_counter_init_cpu(cpu);
4037                 break;
4038
4039         case CPU_DOWN_PREPARE:
4040         case CPU_DOWN_PREPARE_FROZEN:
4041                 perf_counter_exit_cpu(cpu);
4042                 break;
4043
4044         default:
4045                 break;
4046         }
4047
4048         return NOTIFY_OK;
4049 }
4050
4051 /*
4052  * This has to have a higher priority than migration_notifier in sched.c.
4053  */
4054 static struct notifier_block __cpuinitdata perf_cpu_nb = {
4055         .notifier_call          = perf_cpu_notify,
4056         .priority               = 20,
4057 };
4058
4059 void __init perf_counter_init(void)
4060 {
4061         perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
4062                         (void *)(long)smp_processor_id());
4063         register_cpu_notifier(&perf_cpu_nb);
4064 }
4065
4066 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
4067 {
4068         return sprintf(buf, "%d\n", perf_reserved_percpu);
4069 }
4070
4071 static ssize_t
4072 perf_set_reserve_percpu(struct sysdev_class *class,
4073                         const char *buf,
4074                         size_t count)
4075 {
4076         struct perf_cpu_context *cpuctx;
4077         unsigned long val;
4078         int err, cpu, mpt;
4079
4080         err = strict_strtoul(buf, 10, &val);
4081         if (err)
4082                 return err;
4083         if (val > perf_max_counters)
4084                 return -EINVAL;
4085
4086         spin_lock(&perf_resource_lock);
4087         perf_reserved_percpu = val;
4088         for_each_online_cpu(cpu) {
4089                 cpuctx = &per_cpu(perf_cpu_context, cpu);
4090                 spin_lock_irq(&cpuctx->ctx.lock);
4091                 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
4092                           perf_max_counters - perf_reserved_percpu);
4093                 cpuctx->max_pertask = mpt;
4094                 spin_unlock_irq(&cpuctx->ctx.lock);
4095         }
4096         spin_unlock(&perf_resource_lock);
4097
4098         return count;
4099 }
4100
4101 static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
4102 {
4103         return sprintf(buf, "%d\n", perf_overcommit);
4104 }
4105
4106 static ssize_t
4107 perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
4108 {
4109         unsigned long val;
4110         int err;
4111
4112         err = strict_strtoul(buf, 10, &val);
4113         if (err)
4114                 return err;
4115         if (val > 1)
4116                 return -EINVAL;
4117
4118         spin_lock(&perf_resource_lock);
4119         perf_overcommit = val;
4120         spin_unlock(&perf_resource_lock);
4121
4122         return count;
4123 }
4124
4125 static SYSDEV_CLASS_ATTR(
4126                                 reserve_percpu,
4127                                 0644,
4128                                 perf_show_reserve_percpu,
4129                                 perf_set_reserve_percpu
4130                         );
4131
4132 static SYSDEV_CLASS_ATTR(
4133                                 overcommit,
4134                                 0644,
4135                                 perf_show_overcommit,
4136                                 perf_set_overcommit
4137                         );
4138
4139 static struct attribute *perfclass_attrs[] = {
4140         &attr_reserve_percpu.attr,
4141         &attr_overcommit.attr,
4142         NULL
4143 };
4144
4145 static struct attribute_group perfclass_attr_group = {
4146         .attrs                  = perfclass_attrs,
4147         .name                   = "perf_counters",
4148 };
4149
4150 static int __init perf_counter_sysfs_init(void)
4151 {
4152         return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
4153                                   &perfclass_attr_group);
4154 }
4155 device_initcall(perf_counter_sysfs_init);