1 // SPDX-License-Identifier: GPL-2.0
3 * Performance event support for s390x - CPU-measurement Counter Sets
5 * Copyright IBM Corp. 2019, 2021
6 * Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
7 * Thomas Richer <tmricht@linux.ibm.com>
9 #define KMSG_COMPONENT "cpum_cf_diag"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/percpu.h>
15 #include <linux/notifier.h>
16 #include <linux/init.h>
17 #include <linux/export.h>
18 #include <linux/slab.h>
19 #include <linux/processor.h>
20 #include <linux/miscdevice.h>
21 #include <linux/mutex.h>
23 #include <asm/ctl_reg.h>
25 #include <asm/cpu_mcf.h>
26 #include <asm/timex.h>
27 #include <asm/debug.h>
29 #include <asm/perf_cpum_cf_diag.h>
31 #define CF_DIAG_CTRSET_DEF 0xfeef /* Counter set header mark */
32 #define CF_DIAG_MIN_INTERVAL 60 /* Minimum counter set read */
33 /* interval in seconds */
34 static unsigned long cf_diag_interval = CF_DIAG_MIN_INTERVAL;
35 static unsigned int cf_diag_cpu_speed;
36 static debug_info_t *cf_diag_dbg;
38 struct cf_diag_csd { /* Counter set data per CPU */
39 size_t used; /* Bytes used in data/start */
40 unsigned char start[PAGE_SIZE]; /* Counter set at event start */
41 unsigned char data[PAGE_SIZE]; /* Counter set at event delete */
42 unsigned int sets; /* # Counter set saved in data */
44 static DEFINE_PER_CPU(struct cf_diag_csd, cf_diag_csd);
46 /* Counter sets are stored as data stream in a page sized memory buffer and
47 * exported to user space via raw data attached to the event sample data.
48 * Each counter set starts with an eight byte header consisting of:
49 * - a two byte eye catcher (0xfeef)
50 * - a one byte counter set number
51 * - a two byte counter set size (indicates the number of counters in this set)
52 * - a three byte reserved value (must be zero) to make the header the same
53 * size as a counter value.
54 * All counter values are eight byte in size.
56 * All counter sets are followed by a 64 byte trailer.
57 * The trailer consists of a:
58 * - flag field indicating valid fields when corresponding bit set
59 * - the counter facility first and second version number
60 * - the CPU speed if nonzero
61 * - the time stamp the counter sets have been collected
62 * - the time of day (TOD) base value
65 * The counter sets are saved when the process is prepared to be executed on a
66 * CPU and saved again when the process is going to be removed from a CPU.
67 * The difference of both counter sets are calculated and stored in the event
71 struct cf_ctrset_entry { /* CPU-M CF counter set entry (8 byte) */
72 unsigned int def:16; /* 0-15 Data Entry Format */
73 unsigned int set:16; /* 16-31 Counter set identifier */
74 unsigned int ctr:16; /* 32-47 Number of stored counters */
75 unsigned int res1:16; /* 48-63 Reserved */
78 struct cf_trailer_entry { /* CPU-M CF_DIAG trailer (64 byte) */
82 unsigned int clock_base:1; /* TOD clock base set */
83 unsigned int speed:1; /* CPU speed set */
84 /* Measurement alerts */
85 unsigned int mtda:1; /* Loss of MT ctr. data alert */
86 unsigned int caca:1; /* Counter auth. change alert */
87 unsigned int lcda:1; /* Loss of counter data alert */
89 unsigned long flags; /* 0-63 All indicators */
92 unsigned int cfvn:16; /* 64-79 Ctr First Version */
93 unsigned int csvn:16; /* 80-95 Ctr Second Version */
94 unsigned int cpu_speed:32; /* 96-127 CPU speed */
96 unsigned long timestamp; /* 128-191 Timestamp (TOD) */
100 unsigned long progusage1;
101 unsigned long progusage2;
102 unsigned long progusage3;
103 unsigned long tod_base;
105 unsigned long progusage[4];
108 unsigned int mach_type:16; /* Machine type */
109 unsigned int res1:16; /* Reserved */
110 unsigned int res2:32; /* Reserved */
113 /* Create the trailer data at the end of a page. */
114 static void cf_diag_trailer(struct cf_trailer_entry *te)
116 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
119 te->cfvn = cpuhw->info.cfvn; /* Counter version numbers */
120 te->csvn = cpuhw->info.csvn;
122 get_cpu_id(&cpuid); /* Machine type */
123 te->mach_type = cpuid.machine;
124 te->cpu_speed = cf_diag_cpu_speed;
127 te->clock_base = 1; /* Save clock base */
128 te->tod_base = tod_clock_base.tod;
129 te->timestamp = get_tod_clock_fast();
133 * Change the CPUMF state to active.
134 * Enable and activate the CPU-counter sets according
135 * to the per-cpu control state.
137 static void cf_diag_enable(struct pmu *pmu)
139 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
142 debug_sprintf_event(cf_diag_dbg, 5,
143 "%s pmu %p cpu %d flags %#x state %#llx\n",
144 __func__, pmu, smp_processor_id(), cpuhw->flags,
146 if (cpuhw->flags & PMU_F_ENABLED)
149 err = lcctl(cpuhw->state);
151 pr_err("Enabling the performance measuring unit "
152 "failed with rc=%x\n", err);
155 cpuhw->flags |= PMU_F_ENABLED;
159 * Change the CPUMF state to inactive.
160 * Disable and enable (inactive) the CPU-counter sets according
161 * to the per-cpu control state.
163 static void cf_diag_disable(struct pmu *pmu)
165 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
169 debug_sprintf_event(cf_diag_dbg, 5,
170 "%s pmu %p cpu %d flags %#x state %#llx\n",
171 __func__, pmu, smp_processor_id(), cpuhw->flags,
173 if (!(cpuhw->flags & PMU_F_ENABLED))
176 inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
177 err = lcctl(inactive);
179 pr_err("Disabling the performance measuring unit "
180 "failed with rc=%x\n", err);
183 cpuhw->flags &= ~PMU_F_ENABLED;
186 /* Number of perf events counting hardware events */
187 static atomic_t cf_diag_events = ATOMIC_INIT(0);
188 /* Used to avoid races in calling reserve/release_cpumf_hardware */
189 static DEFINE_MUTEX(cf_diag_reserve_mutex);
191 /* Release the PMU if event is the last perf event */
192 static void cf_diag_perf_event_destroy(struct perf_event *event)
194 debug_sprintf_event(cf_diag_dbg, 5,
195 "%s event %p cpu %d cf_diag_events %d\n",
196 __func__, event, smp_processor_id(),
197 atomic_read(&cf_diag_events));
198 if (atomic_dec_return(&cf_diag_events) == 0)
199 __kernel_cpumcf_end();
202 static int get_authctrsets(void)
204 struct cpu_cf_events *cpuhw;
205 unsigned long auth = 0;
206 enum cpumf_ctr_set i;
208 cpuhw = &get_cpu_var(cpu_cf_events);
209 for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
210 if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
211 auth |= cpumf_ctr_ctl[i];
213 put_cpu_var(cpu_cf_events);
217 /* Setup the event. Test for authorized counter sets and only include counter
218 * sets which are authorized at the time of the setup. Including unauthorized
219 * counter sets result in specification exception (and panic).
221 static int __hw_perf_event_init(struct perf_event *event)
223 struct perf_event_attr *attr = &event->attr;
226 debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d\n", __func__,
229 event->hw.config = attr->config;
231 /* Add all authorized counter sets to config_base. The
232 * the hardware init function is either called per-cpu or just once
233 * for all CPUS (event->cpu == -1). This depends on the whether
234 * counting is started for all CPUs or on a per workload base where
235 * the perf event moves from one CPU to another CPU.
236 * Checking the authorization on any CPU is fine as the hardware
237 * applies the same authorization settings to all CPUs.
239 event->hw.config_base = get_authctrsets();
241 /* No authorized counter sets, nothing to count/sample */
242 if (!event->hw.config_base) {
247 /* Set sample_period to indicate sampling */
248 event->hw.sample_period = attr->sample_period;
249 local64_set(&event->hw.period_left, event->hw.sample_period);
250 event->hw.last_period = event->hw.sample_period;
252 debug_sprintf_event(cf_diag_dbg, 5, "%s err %d config_base %#lx\n",
253 __func__, err, event->hw.config_base);
257 /* Return 0 if the CPU-measurement counter facility is currently free
258 * and an error otherwise.
260 static int cf_diag_perf_event_inuse(void)
264 if (!atomic_inc_not_zero(&cf_diag_events)) {
265 mutex_lock(&cf_diag_reserve_mutex);
266 if (atomic_read(&cf_diag_events) == 0 &&
267 __kernel_cpumcf_begin())
270 err = atomic_inc_return(&cf_diag_events);
271 mutex_unlock(&cf_diag_reserve_mutex);
276 static int cf_diag_event_init(struct perf_event *event)
278 struct perf_event_attr *attr = &event->attr;
281 debug_sprintf_event(cf_diag_dbg, 5,
282 "%s event %p cpu %d config %#llx type:%u "
283 "sample_type %#llx cf_diag_events %d\n", __func__,
284 event, event->cpu, attr->config, event->pmu->type,
285 attr->sample_type, atomic_read(&cf_diag_events));
287 if (event->attr.config != PERF_EVENT_CPUM_CF_DIAG ||
288 event->attr.type != event->pmu->type)
291 /* Raw events are used to access counters directly,
292 * hence do not permit excludes.
293 * This event is usesless without PERF_SAMPLE_RAW to return counter set
294 * values as raw data.
296 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv ||
297 !(attr->sample_type & (PERF_SAMPLE_CPU | PERF_SAMPLE_RAW))) {
302 /* Initialize for using the CPU-measurement counter facility */
303 err = cf_diag_perf_event_inuse();
306 event->destroy = cf_diag_perf_event_destroy;
308 err = __hw_perf_event_init(event);
310 event->destroy(event);
312 debug_sprintf_event(cf_diag_dbg, 5, "%s err %d\n", __func__, err);
316 static void cf_diag_read(struct perf_event *event)
318 debug_sprintf_event(cf_diag_dbg, 5, "%s event %p\n", __func__, event);
321 /* Return the maximum possible counter set size (in number of 8 byte counters)
322 * depending on type and model number.
324 static size_t cf_diag_ctrset_size(enum cpumf_ctr_set ctrset,
325 struct cpumf_ctr_info *info)
327 size_t ctrset_size = 0;
330 case CPUMF_CTR_SET_BASIC:
334 case CPUMF_CTR_SET_USER:
337 else if (info->cfvn >= 3)
340 case CPUMF_CTR_SET_CRYPTO:
341 if (info->csvn >= 1 && info->csvn <= 5)
343 else if (info->csvn == 6)
346 case CPUMF_CTR_SET_EXT:
349 else if (info->csvn == 2)
351 else if (info->csvn >= 3 && info->csvn <= 5)
353 else if (info->csvn == 6)
356 case CPUMF_CTR_SET_MT_DIAG:
360 case CPUMF_CTR_SET_MAX:
367 /* Calculate memory needed to store all counter sets together with header and
368 * trailer data. This is independend of the counter set authorization which
369 * can vary depending on the configuration.
371 static size_t cf_diag_ctrset_maxsize(struct cpumf_ctr_info *info)
373 size_t max_size = sizeof(struct cf_trailer_entry);
374 enum cpumf_ctr_set i;
376 for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
377 size_t size = cf_diag_ctrset_size(i, info);
380 max_size += size * sizeof(u64) +
381 sizeof(struct cf_ctrset_entry);
383 debug_sprintf_event(cf_diag_dbg, 5, "%s max_size %zu\n", __func__,
389 /* Read a counter set. The counter set number determines which counter set and
390 * the CPUM-CF first and second version number determine the number of
391 * available counters in this counter set.
392 * Each counter set starts with header containing the counter set number and
393 * the number of 8 byte counters.
395 * The functions returns the number of bytes occupied by this counter set
396 * including the header.
397 * If there is no counter in the counter set, this counter set is useless and
398 * zero is returned on this case.
400 static size_t cf_diag_getctrset(struct cf_ctrset_entry *ctrdata, int ctrset,
403 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
404 size_t ctrset_size, need = 0;
405 int rc = 3; /* Assume write failure */
407 ctrdata->def = CF_DIAG_CTRSET_DEF;
408 ctrdata->set = ctrset;
410 ctrset_size = cf_diag_ctrset_size(ctrset, &cpuhw->info);
412 if (ctrset_size) { /* Save data */
413 need = ctrset_size * sizeof(u64) + sizeof(*ctrdata);
415 rc = ctr_stcctm(ctrset, ctrset_size,
416 (u64 *)(ctrdata + 1));
418 ctrdata->ctr = ctrset_size;
423 debug_sprintf_event(cf_diag_dbg, 6,
424 "%s ctrset %d ctrset_size %zu cfvn %d csvn %d"
426 __func__, ctrset, ctrset_size, cpuhw->info.cfvn,
427 cpuhw->info.csvn, need, rc);
431 /* Read out all counter sets and save them in the provided data buffer.
432 * The last 64 byte host an artificial trailer entry.
434 static size_t cf_diag_getctr(void *data, size_t sz, unsigned long auth)
436 struct cf_trailer_entry *trailer;
437 size_t offset = 0, done;
441 sz -= sizeof(*trailer); /* Always room for trailer */
442 for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
443 struct cf_ctrset_entry *ctrdata = data + offset;
445 if (!(auth & cpumf_ctr_ctl[i]))
446 continue; /* Counter set not authorized */
448 done = cf_diag_getctrset(ctrdata, i, sz - offset);
450 debug_sprintf_event(cf_diag_dbg, 6,
451 "%s ctrset %d offset %zu done %zu\n",
452 __func__, i, offset, done);
454 trailer = data + offset;
455 cf_diag_trailer(trailer);
456 return offset + sizeof(*trailer);
459 /* Calculate the difference for each counter in a counter set. */
460 static void cf_diag_diffctrset(u64 *pstart, u64 *pstop, int counters)
462 for (; --counters >= 0; ++pstart, ++pstop)
463 if (*pstop >= *pstart)
466 *pstop = *pstart - *pstop;
469 /* Scan the counter sets and calculate the difference of each counter
470 * in each set. The result is the increment of each counter during the
471 * period the counter set has been activated.
473 * Return true on success.
475 static int cf_diag_diffctr(struct cf_diag_csd *csd, unsigned long auth)
477 struct cf_trailer_entry *trailer_start, *trailer_stop;
478 struct cf_ctrset_entry *ctrstart, *ctrstop;
481 auth &= (1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1;
483 ctrstart = (struct cf_ctrset_entry *)(csd->start + offset);
484 ctrstop = (struct cf_ctrset_entry *)(csd->data + offset);
486 if (memcmp(ctrstop, ctrstart, sizeof(*ctrstop))) {
487 pr_err("cpum_cf_diag counter set compare error "
488 "in set %i\n", ctrstart->set);
491 auth &= ~cpumf_ctr_ctl[ctrstart->set];
492 if (ctrstart->def == CF_DIAG_CTRSET_DEF) {
493 cf_diag_diffctrset((u64 *)(ctrstart + 1),
494 (u64 *)(ctrstop + 1), ctrstart->ctr);
495 offset += ctrstart->ctr * sizeof(u64) +
498 debug_sprintf_event(cf_diag_dbg, 6,
499 "%s set %d ctr %d offset %zu auth %lx\n",
500 __func__, ctrstart->set, ctrstart->ctr,
502 } while (ctrstart->def && auth);
504 /* Save time_stamp from start of event in stop's trailer */
505 trailer_start = (struct cf_trailer_entry *)(csd->start + offset);
506 trailer_stop = (struct cf_trailer_entry *)(csd->data + offset);
507 trailer_stop->progusage[0] = trailer_start->timestamp;
512 /* Create perf event sample with the counter sets as raw data. The sample
513 * is then pushed to the event subsystem and the function checks for
514 * possible event overflows. If an event overflow occurs, the PMU is
517 * Return non-zero if an event overflow occurred.
519 static int cf_diag_push_sample(struct perf_event *event,
520 struct cf_diag_csd *csd)
522 struct perf_sample_data data;
523 struct perf_raw_record raw;
527 /* Setup perf sample */
528 perf_sample_data_init(&data, 0, event->hw.last_period);
529 memset(®s, 0, sizeof(regs));
530 memset(&raw, 0, sizeof(raw));
532 if (event->attr.sample_type & PERF_SAMPLE_CPU)
533 data.cpu_entry.cpu = event->cpu;
534 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
535 raw.frag.size = csd->used;
536 raw.frag.data = csd->data;
537 raw.size = csd->used;
541 overflow = perf_event_overflow(event, &data, ®s);
542 debug_sprintf_event(cf_diag_dbg, 6,
543 "%s event %p cpu %d sample_type %#llx raw %d "
544 "ov %d\n", __func__, event, event->cpu,
545 event->attr.sample_type, raw.size, overflow);
547 event->pmu->stop(event, 0);
549 perf_event_update_userpage(event);
553 static void cf_diag_start(struct perf_event *event, int flags)
555 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
556 struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd);
557 struct hw_perf_event *hwc = &event->hw;
559 debug_sprintf_event(cf_diag_dbg, 5,
560 "%s event %p cpu %d flags %#x hwc-state %#x\n",
561 __func__, event, event->cpu, flags, hwc->state);
562 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
565 /* (Re-)enable and activate all counter sets */
566 lcctl(0); /* Reset counter sets */
568 ctr_set_multiple_enable(&cpuhw->state, hwc->config_base);
569 lcctl(cpuhw->state); /* Enable counter sets */
570 csd->used = cf_diag_getctr(csd->start, sizeof(csd->start),
571 event->hw.config_base);
572 ctr_set_multiple_start(&cpuhw->state, hwc->config_base);
573 /* Function cf_diag_enable() starts the counter sets. */
576 static void cf_diag_stop(struct perf_event *event, int flags)
578 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
579 struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd);
580 struct hw_perf_event *hwc = &event->hw;
582 debug_sprintf_event(cf_diag_dbg, 5,
583 "%s event %p cpu %d flags %#x hwc-state %#x\n",
584 __func__, event, event->cpu, flags, hwc->state);
586 /* Deactivate all counter sets */
587 ctr_set_multiple_stop(&cpuhw->state, hwc->config_base);
588 local64_inc(&event->count);
589 csd->used = cf_diag_getctr(csd->data, sizeof(csd->data),
590 event->hw.config_base);
591 if (cf_diag_diffctr(csd, event->hw.config_base))
592 cf_diag_push_sample(event, csd);
593 hwc->state |= PERF_HES_STOPPED;
596 static int cf_diag_add(struct perf_event *event, int flags)
598 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
601 debug_sprintf_event(cf_diag_dbg, 5,
602 "%s event %p cpu %d flags %#x cpuhw %p\n",
603 __func__, event, event->cpu, flags, cpuhw);
605 if (cpuhw->flags & PMU_F_IN_USE) {
610 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
612 cpuhw->flags |= PMU_F_IN_USE;
613 if (flags & PERF_EF_START)
614 cf_diag_start(event, PERF_EF_RELOAD);
616 debug_sprintf_event(cf_diag_dbg, 5, "%s err %d\n", __func__, err);
620 static void cf_diag_del(struct perf_event *event, int flags)
622 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
624 debug_sprintf_event(cf_diag_dbg, 5,
625 "%s event %p cpu %d flags %#x\n",
626 __func__, event, event->cpu, flags);
628 cf_diag_stop(event, PERF_EF_UPDATE);
629 ctr_set_multiple_stop(&cpuhw->state, event->hw.config_base);
630 ctr_set_multiple_disable(&cpuhw->state, event->hw.config_base);
631 cpuhw->flags &= ~PMU_F_IN_USE;
634 /* Default counter set events and format attribute groups */
636 CPUMF_EVENT_ATTR(CF_DIAG, CF_DIAG, PERF_EVENT_CPUM_CF_DIAG);
638 static struct attribute *cf_diag_events_attr[] = {
639 CPUMF_EVENT_PTR(CF_DIAG, CF_DIAG),
643 PMU_FORMAT_ATTR(event, "config:0-63");
645 static struct attribute *cf_diag_format_attr[] = {
646 &format_attr_event.attr,
650 static struct attribute_group cf_diag_events_group = {
652 .attrs = cf_diag_events_attr,
654 static struct attribute_group cf_diag_format_group = {
656 .attrs = cf_diag_format_attr,
658 static const struct attribute_group *cf_diag_attr_groups[] = {
659 &cf_diag_events_group,
660 &cf_diag_format_group,
664 /* Performance monitoring unit for s390x */
665 static struct pmu cf_diag = {
666 .task_ctx_nr = perf_sw_context,
667 .pmu_enable = cf_diag_enable,
668 .pmu_disable = cf_diag_disable,
669 .event_init = cf_diag_event_init,
672 .start = cf_diag_start,
673 .stop = cf_diag_stop,
674 .read = cf_diag_read,
676 .attr_groups = cf_diag_attr_groups
679 /* Get the CPU speed, try sampling facility first and CPU attributes second. */
680 static void cf_diag_get_cpu_speed(void)
682 if (cpum_sf_avail()) { /* Sampling facility first */
683 struct hws_qsi_info_block si;
685 memset(&si, 0, sizeof(si));
687 cf_diag_cpu_speed = si.cpu_speed;
692 if (test_facility(34)) { /* CPU speed extract static part */
693 unsigned long mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
696 cf_diag_cpu_speed = mhz & 0xffffffff;
700 /* Code to create device and file I/O operations */
701 static atomic_t ctrset_opencnt = ATOMIC_INIT(0); /* Excl. access */
703 static int cf_diag_open(struct inode *inode, struct file *file)
707 if (!capable(CAP_SYS_ADMIN))
709 if (atomic_xchg(&ctrset_opencnt, 1))
712 /* Avoid concurrent access with perf_event_open() system call */
713 mutex_lock(&cf_diag_reserve_mutex);
714 if (atomic_read(&cf_diag_events) || __kernel_cpumcf_begin())
716 mutex_unlock(&cf_diag_reserve_mutex);
718 atomic_set(&ctrset_opencnt, 0);
721 file->private_data = NULL;
722 debug_sprintf_event(cf_diag_dbg, 2, "%s\n", __func__);
723 /* nonseekable_open() never fails */
724 return nonseekable_open(inode, file);
727 /* Variables for ioctl() interface support */
728 static DEFINE_MUTEX(cf_diag_ctrset_mutex);
729 static struct cf_diag_ctrset {
730 unsigned long ctrset; /* Bit mask of counter set to read */
731 cpumask_t mask; /* CPU mask to read from */
732 time64_t lastread; /* Epoch counter set last read */
735 static void cf_diag_ctrset_clear(void)
737 cpumask_clear(&cf_diag_ctrset.mask);
738 cf_diag_ctrset.ctrset = 0;
741 static void cf_diag_release_cpu(void *p)
743 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
745 debug_sprintf_event(cf_diag_dbg, 3, "%s cpu %d\n", __func__,
747 lcctl(0); /* Reset counter sets */
748 cpuhw->state = 0; /* Save state in CPU hardware state */
751 /* Release function is also called when application gets terminated without
752 * doing a proper ioctl(..., S390_HWCTR_STOP, ...) command.
753 * Since only one application is allowed to open the device, simple stop all
756 static int cf_diag_release(struct inode *inode, struct file *file)
758 on_each_cpu(cf_diag_release_cpu, NULL, 1);
759 cf_diag_ctrset_clear();
760 atomic_set(&ctrset_opencnt, 0);
761 __kernel_cpumcf_end();
762 debug_sprintf_event(cf_diag_dbg, 2, "%s\n", __func__);
766 struct cf_diag_call_on_cpu_parm { /* Parm struct for smp_call_on_cpu */
767 unsigned int sets; /* Counter set bit mask */
768 atomic_t cpus_ack; /* # CPUs successfully executed func */
771 static int cf_diag_all_copy(unsigned long arg, cpumask_t *mask)
773 struct s390_ctrset_read __user *ctrset_read;
774 unsigned int cpu, cpus, rc;
777 ctrset_read = (struct s390_ctrset_read __user *)arg;
778 uptr = ctrset_read->data;
779 for_each_cpu(cpu, mask) {
780 struct cf_diag_csd *csd = per_cpu_ptr(&cf_diag_csd, cpu);
781 struct s390_ctrset_cpudata __user *ctrset_cpudata;
783 ctrset_cpudata = uptr;
784 debug_sprintf_event(cf_diag_dbg, 5, "%s cpu %d used %zd\n",
785 __func__, cpu, csd->used);
786 rc = put_user(cpu, &ctrset_cpudata->cpu_nr);
787 rc |= put_user(csd->sets, &ctrset_cpudata->no_sets);
788 rc |= copy_to_user(ctrset_cpudata->data, csd->data, csd->used);
791 uptr += sizeof(struct s390_ctrset_cpudata) + csd->used;
794 cpus = cpumask_weight(mask);
795 if (put_user(cpus, &ctrset_read->no_cpus))
797 debug_sprintf_event(cf_diag_dbg, 5, "%s copied %ld\n",
798 __func__, uptr - (void __user *)ctrset_read->data);
802 static size_t cf_diag_cpuset_read(struct s390_ctrset_setdata *p, int ctrset,
803 int ctrset_size, size_t room)
808 need = sizeof(*p) + sizeof(u64) * ctrset_size;
809 debug_sprintf_event(cf_diag_dbg, 5,
810 "%s room %zd need %zd set %#x set_size %d\n",
811 __func__, room, need, ctrset, ctrset_size);
813 p->set = cpumf_ctr_ctl[ctrset];
814 p->no_cnts = ctrset_size;
815 rc = ctr_stcctm(ctrset, ctrset_size, (u64 *)p->cv);
816 if (rc == 3) /* Nothing stored */
819 debug_sprintf_event(cf_diag_dbg, 5, "%s need %zd rc %d\n", __func__,
824 /* Read all counter sets. Since the perf_event_open() system call with
825 * event cpum_cf_diag/.../ is blocked when this interface is active, reuse
826 * the perf_event_open() data buffer to store the counter sets.
828 static void cf_diag_cpu_read(void *parm)
830 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
831 struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd);
832 struct cf_diag_call_on_cpu_parm *p = parm;
836 debug_sprintf_event(cf_diag_dbg, 5,
837 "%s new %#x flags %#x state %#llx\n",
838 __func__, p->sets, cpuhw->flags,
840 /* No data saved yet */
843 memset(csd->data, 0, sizeof(csd->data));
845 /* Scan the counter sets */
846 for (set = CPUMF_CTR_SET_BASIC; set < CPUMF_CTR_SET_MAX; ++set) {
847 struct s390_ctrset_setdata *sp = (void *)csd->data + csd->used;
849 if (!(p->sets & cpumf_ctr_ctl[set]))
850 continue; /* Counter set not in list */
851 set_size = cf_diag_ctrset_size(set, &cpuhw->info);
852 space = sizeof(csd->data) - csd->used;
853 space = cf_diag_cpuset_read(sp, set, set_size, space);
858 debug_sprintf_event(cf_diag_dbg, 5, "%s sp %px space %zd\n",
859 __func__, sp, space);
861 debug_sprintf_event(cf_diag_dbg, 5, "%s sets %d used %zd\n", __func__,
862 csd->sets, csd->used);
865 static int cf_diag_all_read(unsigned long arg)
867 struct cf_diag_call_on_cpu_parm p;
872 debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__);
873 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
875 now = ktime_get_seconds();
876 if (cf_diag_ctrset.lastread + cf_diag_interval > now) {
877 debug_sprintf_event(cf_diag_dbg, 5, "%s now %lld "
878 " lastread %lld\n", __func__, now,
879 cf_diag_ctrset.lastread);
883 cf_diag_ctrset.lastread = now;
885 p.sets = cf_diag_ctrset.ctrset;
886 cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask);
887 on_each_cpu_mask(mask, cf_diag_cpu_read, &p, 1);
888 rc = cf_diag_all_copy(arg, mask);
890 free_cpumask_var(mask);
891 debug_sprintf_event(cf_diag_dbg, 5, "%s rc %d\n", __func__, rc);
895 /* Stop all counter sets via ioctl interface */
896 static void cf_diag_ioctl_off(void *parm)
898 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
899 struct cf_diag_call_on_cpu_parm *p = parm;
902 debug_sprintf_event(cf_diag_dbg, 5,
903 "%s new %#x flags %#x state %#llx\n",
904 __func__, p->sets, cpuhw->flags,
907 ctr_set_multiple_disable(&cpuhw->state, p->sets);
908 ctr_set_multiple_stop(&cpuhw->state, p->sets);
909 rc = lcctl(cpuhw->state); /* Stop counter sets */
911 cpuhw->flags &= ~PMU_F_IN_USE;
912 debug_sprintf_event(cf_diag_dbg, 5,
913 "%s rc %d flags %#x state %#llx\n", __func__,
914 rc, cpuhw->flags, cpuhw->state);
917 /* Start counter sets on particular CPU */
918 static void cf_diag_ioctl_on(void *parm)
920 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
921 struct cf_diag_call_on_cpu_parm *p = parm;
924 debug_sprintf_event(cf_diag_dbg, 5,
925 "%s new %#x flags %#x state %#llx\n",
926 __func__, p->sets, cpuhw->flags,
929 if (!(cpuhw->flags & PMU_F_IN_USE))
931 cpuhw->flags |= PMU_F_IN_USE;
932 rc = lcctl(cpuhw->state); /* Reset unused counter sets */
933 ctr_set_multiple_enable(&cpuhw->state, p->sets);
934 ctr_set_multiple_start(&cpuhw->state, p->sets);
935 rc |= lcctl(cpuhw->state); /* Start counter sets */
937 atomic_inc(&p->cpus_ack);
938 debug_sprintf_event(cf_diag_dbg, 5, "%s rc %d state %#llx\n",
939 __func__, rc, cpuhw->state);
942 static int cf_diag_all_stop(void)
944 struct cf_diag_call_on_cpu_parm p = {
945 .sets = cf_diag_ctrset.ctrset,
949 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
951 cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask);
952 on_each_cpu_mask(mask, cf_diag_ioctl_off, &p, 1);
953 free_cpumask_var(mask);
957 static int cf_diag_all_start(void)
959 struct cf_diag_call_on_cpu_parm p = {
960 .sets = cf_diag_ctrset.ctrset,
961 .cpus_ack = ATOMIC_INIT(0),
966 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
968 cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask);
969 on_each_cpu_mask(mask, cf_diag_ioctl_on, &p, 1);
970 if (atomic_read(&p.cpus_ack) != cpumask_weight(mask)) {
971 on_each_cpu_mask(mask, cf_diag_ioctl_off, &p, 1);
974 free_cpumask_var(mask);
978 /* Return the maximum required space for all possible CPUs in case one
979 * CPU will be onlined during the START, READ, STOP cycles.
980 * To find out the size of the counter sets, any one CPU will do. They
981 * all have the same counter sets.
983 static size_t cf_diag_needspace(unsigned int sets)
985 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
989 for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
990 if (!(sets & cpumf_ctr_ctl[i]))
992 bytes += cf_diag_ctrset_size(i, &cpuhw->info) * sizeof(u64) +
993 sizeof(((struct s390_ctrset_setdata *)0)->set) +
994 sizeof(((struct s390_ctrset_setdata *)0)->no_cnts);
996 bytes = sizeof(((struct s390_ctrset_read *)0)->no_cpus) + nr_cpu_ids *
997 (bytes + sizeof(((struct s390_ctrset_cpudata *)0)->cpu_nr) +
998 sizeof(((struct s390_ctrset_cpudata *)0)->no_sets));
999 debug_sprintf_event(cf_diag_dbg, 5, "%s bytes %ld\n", __func__,
1004 static long cf_diag_ioctl_read(unsigned long arg)
1006 struct s390_ctrset_read read;
1009 debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__);
1010 if (copy_from_user(&read, (char __user *)arg, sizeof(read)))
1012 ret = cf_diag_all_read(arg);
1013 debug_sprintf_event(cf_diag_dbg, 5, "%s ret %d\n", __func__, ret);
1017 static long cf_diag_ioctl_stop(void)
1021 debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__);
1022 ret = cf_diag_all_stop();
1023 cf_diag_ctrset_clear();
1024 debug_sprintf_event(cf_diag_dbg, 5, "%s ret %d\n", __func__, ret);
1028 static long cf_diag_ioctl_start(unsigned long arg)
1030 struct s390_ctrset_start __user *ustart;
1031 struct s390_ctrset_start start;
1037 if (cf_diag_ctrset.ctrset)
1039 ustart = (struct s390_ctrset_start __user *)arg;
1040 if (copy_from_user(&start, ustart, sizeof(start)))
1042 if (start.version != S390_HWCTR_START_VERSION)
1044 if (start.counter_sets & ~(cpumf_ctr_ctl[CPUMF_CTR_SET_BASIC] |
1045 cpumf_ctr_ctl[CPUMF_CTR_SET_USER] |
1046 cpumf_ctr_ctl[CPUMF_CTR_SET_CRYPTO] |
1047 cpumf_ctr_ctl[CPUMF_CTR_SET_EXT] |
1048 cpumf_ctr_ctl[CPUMF_CTR_SET_MT_DIAG]))
1049 return -EINVAL; /* Invalid counter set */
1050 if (!start.counter_sets)
1051 return -EINVAL; /* No counter set at all? */
1052 cpumask_clear(&cf_diag_ctrset.mask);
1053 len = min_t(u64, start.cpumask_len, cpumask_size());
1054 umask = (void __user *)start.cpumask;
1055 if (copy_from_user(&cf_diag_ctrset.mask, umask, len))
1057 if (cpumask_empty(&cf_diag_ctrset.mask))
1059 need = cf_diag_needspace(start.counter_sets);
1060 if (put_user(need, &ustart->data_bytes))
1064 cf_diag_ctrset.ctrset = start.counter_sets;
1065 ret = cf_diag_all_start();
1068 cf_diag_ctrset_clear();
1069 debug_sprintf_event(cf_diag_dbg, 2, "%s sets %#lx need %ld ret %d\n",
1070 __func__, cf_diag_ctrset.ctrset, need, ret);
1074 static long cf_diag_ioctl(struct file *file, unsigned int cmd,
1079 debug_sprintf_event(cf_diag_dbg, 2, "%s cmd %#x arg %lx\n", __func__,
1082 mutex_lock(&cf_diag_ctrset_mutex);
1084 case S390_HWCTR_START:
1085 ret = cf_diag_ioctl_start(arg);
1087 case S390_HWCTR_STOP:
1088 ret = cf_diag_ioctl_stop();
1090 case S390_HWCTR_READ:
1091 ret = cf_diag_ioctl_read(arg);
1097 mutex_unlock(&cf_diag_ctrset_mutex);
1099 debug_sprintf_event(cf_diag_dbg, 2, "%s ret %d\n", __func__, ret);
1103 static const struct file_operations cf_diag_fops = {
1104 .owner = THIS_MODULE,
1105 .open = cf_diag_open,
1106 .release = cf_diag_release,
1107 .unlocked_ioctl = cf_diag_ioctl,
1108 .compat_ioctl = cf_diag_ioctl,
1112 static struct miscdevice cf_diag_dev = {
1113 .name = S390_HWCTR_DEVICE,
1114 .minor = MISC_DYNAMIC_MINOR,
1115 .fops = &cf_diag_fops,
1118 static int cf_diag_online_cpu(unsigned int cpu)
1120 struct cf_diag_call_on_cpu_parm p;
1122 mutex_lock(&cf_diag_ctrset_mutex);
1123 if (!cf_diag_ctrset.ctrset)
1125 p.sets = cf_diag_ctrset.ctrset;
1126 cf_diag_ioctl_on(&p);
1128 mutex_unlock(&cf_diag_ctrset_mutex);
1132 static int cf_diag_offline_cpu(unsigned int cpu)
1134 struct cf_diag_call_on_cpu_parm p;
1136 mutex_lock(&cf_diag_ctrset_mutex);
1137 if (!cf_diag_ctrset.ctrset)
1139 p.sets = cf_diag_ctrset.ctrset;
1140 cf_diag_ioctl_off(&p);
1142 mutex_unlock(&cf_diag_ctrset_mutex);
1146 /* Initialize the counter set PMU to generate complete counter set data as
1147 * event raw data. This relies on the CPU Measurement Counter Facility device
1148 * already being loaded and initialized.
1150 static int __init cf_diag_init(void)
1152 struct cpumf_ctr_info info;
1156 if (!kernel_cpumcf_avail() || !stccm_avail() || qctri(&info))
1158 cf_diag_get_cpu_speed();
1160 /* Make sure the counter set data fits into predefined buffer. */
1161 need = cf_diag_ctrset_maxsize(&info);
1162 if (need > sizeof(((struct cf_diag_csd *)0)->start)) {
1163 pr_err("Insufficient memory for PMU(cpum_cf_diag) need=%zu\n",
1168 rc = misc_register(&cf_diag_dev);
1170 pr_err("Registration of /dev/" S390_HWCTR_DEVICE
1171 "failed rc=%d\n", rc);
1175 /* Setup s390dbf facility */
1176 cf_diag_dbg = debug_register(KMSG_COMPONENT, 2, 1, 128);
1178 pr_err("Registration of s390dbf(cpum_cf_diag) failed\n");
1182 debug_register_view(cf_diag_dbg, &debug_sprintf_view);
1184 rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", -1);
1186 pr_err("Registration of PMU(cpum_cf_diag) failed with rc=%i\n",
1190 rc = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_S390_CFD_ONLINE,
1191 "perf/s390/cfd:online",
1192 cf_diag_online_cpu, cf_diag_offline_cpu);
1196 pr_err("Registration of CPUHP_AP_PERF_S390_CFD_ONLINE failed rc=%i\n",
1198 perf_pmu_unregister(&cf_diag);
1200 debug_unregister_view(cf_diag_dbg, &debug_sprintf_view);
1201 debug_unregister(cf_diag_dbg);
1203 misc_deregister(&cf_diag_dev);
1207 device_initcall(cf_diag_init);