2 * Copyright © 2015-2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Robert Bragg <robert@sixbynine.org>
29 * DOC: i915 Perf Overview
31 * Gen graphics supports a large number of performance counters that can help
32 * driver and application developers understand and optimize their use of the
35 * This i915 perf interface enables userspace to configure and open a file
36 * descriptor representing a stream of GPU metrics which can then be read() as
37 * a stream of sample records.
39 * The interface is particularly suited to exposing buffered metrics that are
40 * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
42 * Streams representing a single context are accessible to applications with a
43 * corresponding drm file descriptor, such that OpenGL can use the interface
44 * without special privileges. Access to system-wide metrics requires root
45 * privileges by default, unless changed via the dev.i915.perf_event_paranoid
51 * DOC: i915 Perf History and Comparison with Core Perf
53 * The interface was initially inspired by the core Perf infrastructure but
54 * some notable differences are:
56 * i915 perf file descriptors represent a "stream" instead of an "event"; where
57 * a perf event primarily corresponds to a single 64bit value, while a stream
58 * might sample sets of tightly-coupled counters, depending on the
59 * configuration. For example the Gen OA unit isn't designed to support
60 * orthogonal configurations of individual counters; it's configured for a set
61 * of related counters. Samples for an i915 perf stream capturing OA metrics
62 * will include a set of counter values packed in a compact HW specific format.
63 * The OA unit supports a number of different packing formats which can be
64 * selected by the user opening the stream. Perf has support for grouping
65 * events, but each event in the group is configured, validated and
66 * authenticated individually with separate system calls.
68 * i915 perf stream configurations are provided as an array of u64 (key,value)
69 * pairs, instead of a fixed struct with multiple miscellaneous config members,
70 * interleaved with event-type specific members.
72 * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
73 * The supported metrics are being written to memory by the GPU unsynchronized
74 * with the CPU, using HW specific packing formats for counter sets. Sometimes
75 * the constraints on HW configuration require reports to be filtered before it
76 * would be acceptable to expose them to unprivileged applications - to hide
77 * the metrics of other processes/contexts. For these use cases a read() based
78 * interface is a good fit, and provides an opportunity to filter data as it
79 * gets copied from the GPU mapped buffers to userspace buffers.
82 * Issues hit with first prototype based on Core Perf
83 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
85 * The first prototype of this driver was based on the core perf
86 * infrastructure, and while we did make that mostly work, with some changes to
87 * perf, we found we were breaking or working around too many assumptions baked
88 * into perf's currently cpu centric design.
90 * In the end we didn't see a clear benefit to making perf's implementation and
91 * interface more complex by changing design assumptions while we knew we still
92 * wouldn't be able to use any existing perf based userspace tools.
94 * Also considering the Gen specific nature of the Observability hardware and
95 * how userspace will sometimes need to combine i915 perf OA metrics with
96 * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
97 * expecting the interface to be used by a platform specific userspace such as
98 * OpenGL or tools. This is to say; we aren't inherently missing out on having
99 * a standard vendor/architecture agnostic interface by not using perf.
102 * For posterity, in case we might re-visit trying to adapt core perf to be
103 * better suited to exposing i915 metrics these were the main pain points we
106 * - The perf based OA PMU driver broke some significant design assumptions:
108 * Existing perf pmus are used for profiling work on a cpu and we were
109 * introducing the idea of _IS_DEVICE pmus with different security
110 * implications, the need to fake cpu-related data (such as user/kernel
111 * registers) to fit with perf's current design, and adding _DEVICE records
112 * as a way to forward device-specific status records.
114 * The OA unit writes reports of counters into a circular buffer, without
115 * involvement from the CPU, making our PMU driver the first of a kind.
117 * Given the way we were periodically forward data from the GPU-mapped, OA
118 * buffer to perf's buffer, those bursts of sample writes looked to perf like
119 * we were sampling too fast and so we had to subvert its throttling checks.
121 * Perf supports groups of counters and allows those to be read via
122 * transactions internally but transactions currently seem designed to be
123 * explicitly initiated from the cpu (say in response to a userspace read())
124 * and while we could pull a report out of the OA buffer we can't
125 * trigger a report from the cpu on demand.
127 * Related to being report based; the OA counters are configured in HW as a
128 * set while perf generally expects counter configurations to be orthogonal.
129 * Although counters can be associated with a group leader as they are
130 * opened, there's no clear precedent for being able to provide group-wide
131 * configuration attributes (for example we want to let userspace choose the
132 * OA unit report format used to capture all counters in a set, or specify a
133 * GPU context to filter metrics on). We avoided using perf's grouping
134 * feature and forwarded OA reports to userspace via perf's 'raw' sample
135 * field. This suited our userspace well considering how coupled the counters
136 * are when dealing with normalizing. It would be inconvenient to split
137 * counters up into separate events, only to require userspace to recombine
138 * them. For Mesa it's also convenient to be forwarded raw, periodic reports
139 * for combining with the side-band raw reports it captures using
140 * MI_REPORT_PERF_COUNT commands.
142 * - As a side note on perf's grouping feature; there was also some concern
143 * that using PERF_FORMAT_GROUP as a way to pack together counter values
144 * would quite drastically inflate our sample sizes, which would likely
145 * lower the effective sampling resolutions we could use when the available
146 * memory bandwidth is limited.
148 * With the OA unit's report formats, counters are packed together as 32
149 * or 40bit values, with the largest report size being 256 bytes.
151 * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
152 * documented ordering to the values, implying PERF_FORMAT_ID must also be
153 * used to add a 64bit ID before each value; giving 16 bytes per counter.
155 * Related to counter orthogonality; we can't time share the OA unit, while
156 * event scheduling is a central design idea within perf for allowing
157 * userspace to open + enable more events than can be configured in HW at any
158 * one time. The OA unit is not designed to allow re-configuration while in
159 * use. We can't reconfigure the OA unit without losing internal OA unit
160 * state which we can't access explicitly to save and restore. Reconfiguring
161 * the OA unit is also relatively slow, involving ~100 register writes. From
162 * userspace Mesa also depends on a stable OA configuration when emitting
163 * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
164 * disabled while there are outstanding MI_RPC commands lest we hang the
167 * The contents of sample records aren't extensible by device drivers (i.e.
168 * the sample_type bits). As an example; Sourab Gupta had been looking to
169 * attach GPU timestamps to our OA samples. We were shoehorning OA reports
170 * into sample records by using the 'raw' field, but it's tricky to pack more
171 * than one thing into this field because events/core.c currently only lets a
172 * pmu give a single raw data pointer plus len which will be copied into the
173 * ring buffer. To include more than the OA report we'd have to copy the
174 * report into an intermediate larger buffer. I'd been considering allowing a
175 * vector of data+len values to be specified for copying the raw data, but
176 * it felt like a kludge to being using the raw field for this purpose.
178 * - It felt like our perf based PMU was making some technical compromises
179 * just for the sake of using perf:
181 * perf_event_open() requires events to either relate to a pid or a specific
182 * cpu core, while our device pmu related to neither. Events opened with a
183 * pid will be automatically enabled/disabled according to the scheduling of
184 * that process - so not appropriate for us. When an event is related to a
185 * cpu id, perf ensures pmu methods will be invoked via an inter process
186 * interrupt on that core. To avoid invasive changes our userspace opened OA
187 * perf events for a specific cpu. This was workable but it meant the
188 * majority of the OA driver ran in atomic context, including all OA report
189 * forwarding, which wasn't really necessary in our case and seems to make
190 * our locking requirements somewhat complex as we handled the interaction
191 * with the rest of the i915 driver.
194 #include <linux/anon_inodes.h>
195 #include <linux/sizes.h>
196 #include <linux/uuid.h>
198 #include "gem/i915_gem_context.h"
199 #include "gt/intel_engine_pm.h"
200 #include "gt/intel_engine_user.h"
201 #include "gt/intel_gt.h"
202 #include "gt/intel_lrc_reg.h"
203 #include "gt/intel_ring.h"
205 #include "i915_drv.h"
206 #include "i915_perf.h"
208 /* HW requires this to be a power of two, between 128k and 16M, though driver
209 * is currently generally designed assuming the largest 16M size is used such
210 * that the overflow cases are unlikely in normal operation.
212 #define OA_BUFFER_SIZE SZ_16M
214 #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1))
217 * DOC: OA Tail Pointer Race
219 * There's a HW race condition between OA unit tail pointer register updates and
220 * writes to memory whereby the tail pointer can sometimes get ahead of what's
221 * been written out to the OA buffer so far (in terms of what's visible to the
224 * Although this can be observed explicitly while copying reports to userspace
225 * by checking for a zeroed report-id field in tail reports, we want to account
226 * for this earlier, as part of the oa_buffer_check to avoid lots of redundant
229 * In effect we define a tail pointer for reading that lags the real tail
230 * pointer by at least %OA_TAIL_MARGIN_NSEC nanoseconds, which gives enough
231 * time for the corresponding reports to become visible to the CPU.
233 * To manage this we actually track two tail pointers:
234 * 1) An 'aging' tail with an associated timestamp that is tracked until we
235 * can trust the corresponding data is visible to the CPU; at which point
236 * it is considered 'aged'.
237 * 2) An 'aged' tail that can be used for read()ing.
239 * The two separate pointers let us decouple read()s from tail pointer aging.
241 * The tail pointers are checked and updated at a limited rate within a hrtimer
242 * callback (the same callback that is used for delivering EPOLLIN events)
244 * Initially the tails are marked invalid with %INVALID_TAIL_PTR which
245 * indicates that an updated tail pointer is needed.
247 * Most of the implementation details for this workaround are in
248 * oa_buffer_check_unlocked() and _append_oa_reports()
250 * Note for posterity: previously the driver used to define an effective tail
251 * pointer that lagged the real pointer by a 'tail margin' measured in bytes
252 * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
253 * This was flawed considering that the OA unit may also automatically generate
254 * non-periodic reports (such as on context switch) or the OA unit may be
255 * enabled without any periodic sampling.
257 #define OA_TAIL_MARGIN_NSEC 100000ULL
258 #define INVALID_TAIL_PTR 0xffffffff
260 /* frequency for checking whether the OA unit has written new reports to the
261 * circular OA buffer...
263 #define POLL_FREQUENCY 200
264 #define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY)
266 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
267 static u32 i915_perf_stream_paranoid = true;
269 /* The maximum exponent the hardware accepts is 63 (essentially it selects one
270 * of the 64bit timestamp bits to trigger reports from) but there's currently
271 * no known use case for sampling as infrequently as once per 47 thousand years.
273 * Since the timestamps included in OA reports are only 32bits it seems
274 * reasonable to limit the OA exponent where it's still possible to account for
275 * overflow in OA report timestamps.
277 #define OA_EXPONENT_MAX 31
279 #define INVALID_CTX_ID 0xffffffff
281 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */
282 #define OAREPORT_REASON_MASK 0x3f
283 #define OAREPORT_REASON_MASK_EXTENDED 0x7f
284 #define OAREPORT_REASON_SHIFT 19
285 #define OAREPORT_REASON_TIMER (1<<0)
286 #define OAREPORT_REASON_CTX_SWITCH (1<<3)
287 #define OAREPORT_REASON_CLK_RATIO (1<<5)
290 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
292 * The highest sampling frequency we can theoretically program the OA unit
293 * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
295 * Initialized just before we register the sysctl parameter.
297 static int oa_sample_rate_hard_limit;
299 /* Theoretically we can program the OA unit to sample every 160ns but don't
300 * allow that by default unless root...
302 * The default threshold of 100000Hz is based on perf's similar
303 * kernel.perf_event_max_sample_rate sysctl parameter.
305 static u32 i915_oa_max_sample_rate = 100000;
307 /* XXX: beware if future OA HW adds new report formats that the current
308 * code assumes all reports have a power-of-two size and ~(size - 1) can
309 * be used as a mask to align the OA tail pointer.
311 static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
312 [I915_OA_FORMAT_A13] = { 0, 64 },
313 [I915_OA_FORMAT_A29] = { 1, 128 },
314 [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 },
315 /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
316 [I915_OA_FORMAT_B4_C8] = { 4, 64 },
317 [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 },
318 [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 },
319 [I915_OA_FORMAT_C4_B8] = { 7, 64 },
322 static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
323 [I915_OA_FORMAT_A12] = { 0, 64 },
324 [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 },
325 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
326 [I915_OA_FORMAT_C4_B8] = { 7, 64 },
329 static const struct i915_oa_format gen12_oa_formats[I915_OA_FORMAT_MAX] = {
330 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
333 #define SAMPLE_OA_REPORT (1<<0)
336 * struct perf_open_properties - for validated properties given to open a stream
337 * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
338 * @single_context: Whether a single or all gpu contexts should be monitored
339 * @hold_preemption: Whether the preemption is disabled for the filtered
341 * @ctx_handle: A gem ctx handle for use with @single_context
342 * @metrics_set: An ID for an OA unit metric set advertised via sysfs
343 * @oa_format: An OA unit HW report format
344 * @oa_periodic: Whether to enable periodic OA unit sampling
345 * @oa_period_exponent: The OA unit sampling period is derived from this
346 * @engine: The engine (typically rcs0) being monitored by the OA unit
348 * As read_properties_unlocked() enumerates and validates the properties given
349 * to open a stream of metrics the configuration is built up in the structure
350 * which starts out zero initialized.
352 struct perf_open_properties {
355 u64 single_context:1;
356 u64 hold_preemption:1;
359 /* OA sampling state */
363 int oa_period_exponent;
365 struct intel_engine_cs *engine;
368 struct i915_oa_config_bo {
369 struct llist_node node;
371 struct i915_oa_config *oa_config;
372 struct i915_vma *vma;
375 static struct ctl_table_header *sysctl_header;
377 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
379 void i915_oa_config_release(struct kref *ref)
381 struct i915_oa_config *oa_config =
382 container_of(ref, typeof(*oa_config), ref);
384 kfree(oa_config->flex_regs);
385 kfree(oa_config->b_counter_regs);
386 kfree(oa_config->mux_regs);
388 kfree_rcu(oa_config, rcu);
391 struct i915_oa_config *
392 i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
394 struct i915_oa_config *oa_config;
397 oa_config = idr_find(&perf->metrics_idr, metrics_set);
399 oa_config = i915_oa_config_get(oa_config);
405 static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo)
407 i915_oa_config_put(oa_bo->oa_config);
408 i915_vma_put(oa_bo->vma);
412 static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream)
414 struct intel_uncore *uncore = stream->uncore;
416 return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) &
417 GEN12_OAG_OATAILPTR_MASK;
420 static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream)
422 struct intel_uncore *uncore = stream->uncore;
424 return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
427 static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
429 struct intel_uncore *uncore = stream->uncore;
430 u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
432 return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
436 * oa_buffer_check_unlocked - check for data and update tail ptr state
437 * @stream: i915 stream instance
439 * This is either called via fops (for blocking reads in user ctx) or the poll
440 * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
441 * if there is data available for userspace to read.
443 * This function is central to providing a workaround for the OA unit tail
444 * pointer having a race with respect to what data is visible to the CPU.
445 * It is responsible for reading tail pointers from the hardware and giving
446 * the pointers time to 'age' before they are made available for reading.
447 * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
449 * Besides returning true when there is data available to read() this function
450 * also has the side effect of updating the oa_buffer.tails[], .aging_timestamp
451 * and .aged_tail_idx state used for reading.
453 * Note: It's safe to read OA config state here unlocked, assuming that this is
454 * only called while the stream is enabled, while the global OA configuration
457 * Returns: %true if the OA buffer contains data, else %false
459 static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
461 int report_size = stream->oa_buffer.format_size;
463 unsigned int aged_idx;
464 u32 head, hw_tail, aged_tail, aging_tail;
467 /* We have to consider the (unlikely) possibility that read() errors
468 * could result in an OA buffer reset which might reset the head,
469 * tails[] and aged_tail state.
471 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
473 /* NB: The head we observe here might effectively be a little out of
474 * date (between head and tails[aged_idx].offset if there is currently
475 * a read() in progress.
477 head = stream->oa_buffer.head;
479 aged_idx = stream->oa_buffer.aged_tail_idx;
480 aged_tail = stream->oa_buffer.tails[aged_idx].offset;
481 aging_tail = stream->oa_buffer.tails[!aged_idx].offset;
483 hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
485 /* The tail pointer increases in 64 byte increments,
486 * not in report_size steps...
488 hw_tail &= ~(report_size - 1);
490 now = ktime_get_mono_fast_ns();
492 /* Update the aged tail
494 * Flip the tail pointer available for read()s once the aging tail is
495 * old enough to trust that the corresponding data will be visible to
498 * Do this before updating the aging pointer in case we may be able to
499 * immediately start aging a new pointer too (if new data has become
500 * available) without needing to wait for a later hrtimer callback.
502 if (aging_tail != INVALID_TAIL_PTR &&
503 ((now - stream->oa_buffer.aging_timestamp) >
504 OA_TAIL_MARGIN_NSEC)) {
507 stream->oa_buffer.aged_tail_idx = aged_idx;
509 aged_tail = aging_tail;
511 /* Mark that we need a new pointer to start aging... */
512 stream->oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR;
513 aging_tail = INVALID_TAIL_PTR;
516 /* Update the aging tail
518 * We throttle aging tail updates until we have a new tail that
519 * represents >= one report more data than is already available for
520 * reading. This ensures there will be enough data for a successful
521 * read once this new pointer has aged and ensures we will give the new
522 * pointer time to age.
524 if (aging_tail == INVALID_TAIL_PTR &&
525 (aged_tail == INVALID_TAIL_PTR ||
526 OA_TAKEN(hw_tail, aged_tail) >= report_size)) {
527 struct i915_vma *vma = stream->oa_buffer.vma;
528 u32 gtt_offset = i915_ggtt_offset(vma);
530 /* Be paranoid and do a bounds check on the pointer read back
531 * from hardware, just in case some spurious hardware condition
532 * could put the tail out of bounds...
534 if (hw_tail >= gtt_offset &&
535 hw_tail < (gtt_offset + OA_BUFFER_SIZE)) {
536 stream->oa_buffer.tails[!aged_idx].offset =
537 aging_tail = hw_tail;
538 stream->oa_buffer.aging_timestamp = now;
540 drm_err(&stream->perf->i915->drm,
541 "Ignoring spurious out of range OA buffer tail pointer = %x\n",
546 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
548 return aged_tail == INVALID_TAIL_PTR ?
549 false : OA_TAKEN(aged_tail, head) >= report_size;
553 * append_oa_status - Appends a status record to a userspace read() buffer.
554 * @stream: An i915-perf stream opened for OA metrics
555 * @buf: destination buffer given by userspace
556 * @count: the number of bytes userspace wants to read
557 * @offset: (inout): the current position for writing into @buf
558 * @type: The kind of status to report to userspace
560 * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
561 * into the userspace read() buffer.
563 * The @buf @offset will only be updated on success.
565 * Returns: 0 on success, negative error code on failure.
567 static int append_oa_status(struct i915_perf_stream *stream,
571 enum drm_i915_perf_record_type type)
573 struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
575 if ((count - *offset) < header.size)
578 if (copy_to_user(buf + *offset, &header, sizeof(header)))
581 (*offset) += header.size;
587 * append_oa_sample - Copies single OA report into userspace read() buffer.
588 * @stream: An i915-perf stream opened for OA metrics
589 * @buf: destination buffer given by userspace
590 * @count: the number of bytes userspace wants to read
591 * @offset: (inout): the current position for writing into @buf
592 * @report: A single OA report to (optionally) include as part of the sample
594 * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
595 * properties when opening a stream, tracked as `stream->sample_flags`. This
596 * function copies the requested components of a single sample to the given
599 * The @buf @offset will only be updated on success.
601 * Returns: 0 on success, negative error code on failure.
603 static int append_oa_sample(struct i915_perf_stream *stream,
609 int report_size = stream->oa_buffer.format_size;
610 struct drm_i915_perf_record_header header;
611 u32 sample_flags = stream->sample_flags;
613 header.type = DRM_I915_PERF_RECORD_SAMPLE;
615 header.size = stream->sample_size;
617 if ((count - *offset) < header.size)
621 if (copy_to_user(buf, &header, sizeof(header)))
623 buf += sizeof(header);
625 if (sample_flags & SAMPLE_OA_REPORT) {
626 if (copy_to_user(buf, report, report_size))
630 (*offset) += header.size;
636 * Copies all buffered OA reports into userspace read() buffer.
637 * @stream: An i915-perf stream opened for OA metrics
638 * @buf: destination buffer given by userspace
639 * @count: the number of bytes userspace wants to read
640 * @offset: (inout): the current position for writing into @buf
642 * Notably any error condition resulting in a short read (-%ENOSPC or
643 * -%EFAULT) will be returned even though one or more records may
644 * have been successfully copied. In this case it's up to the caller
645 * to decide if the error should be squashed before returning to
648 * Note: reports are consumed from the head, and appended to the
649 * tail, so the tail chases the head?... If you think that's mad
650 * and back-to-front you're not alone, but this follows the
651 * Gen PRM naming convention.
653 * Returns: 0 on success, negative error code on failure.
655 static int gen8_append_oa_reports(struct i915_perf_stream *stream,
660 struct intel_uncore *uncore = stream->uncore;
661 int report_size = stream->oa_buffer.format_size;
662 u8 *oa_buf_base = stream->oa_buffer.vaddr;
663 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
664 u32 mask = (OA_BUFFER_SIZE - 1);
665 size_t start_offset = *offset;
667 unsigned int aged_tail_idx;
672 if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
675 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
677 head = stream->oa_buffer.head;
678 aged_tail_idx = stream->oa_buffer.aged_tail_idx;
679 tail = stream->oa_buffer.tails[aged_tail_idx].offset;
681 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
684 * An invalid tail pointer here means we're still waiting for the poll
685 * hrtimer callback to give us a pointer
687 if (tail == INVALID_TAIL_PTR)
691 * NB: oa_buffer.head/tail include the gtt_offset which we don't want
692 * while indexing relative to oa_buf_base.
698 * An out of bounds or misaligned head or tail pointer implies a driver
699 * bug since we validate + align the tail pointers we read from the
700 * hardware and we are in full control of the head pointer which should
701 * only be incremented by multiples of the report size (notably also
702 * all a power of two).
704 if (drm_WARN_ONCE(&uncore->i915->drm,
705 head > OA_BUFFER_SIZE || head % report_size ||
706 tail > OA_BUFFER_SIZE || tail % report_size,
707 "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
713 (taken = OA_TAKEN(tail, head));
714 head = (head + report_size) & mask) {
715 u8 *report = oa_buf_base + head;
716 u32 *report32 = (void *)report;
721 * All the report sizes factor neatly into the buffer
722 * size so we never expect to see a report split
723 * between the beginning and end of the buffer.
725 * Given the initial alignment check a misalignment
726 * here would imply a driver bug that would result
729 if (drm_WARN_ON(&uncore->i915->drm,
730 (OA_BUFFER_SIZE - head) < report_size)) {
731 drm_err(&uncore->i915->drm,
732 "Spurious OA head ptr: non-integral report offset\n");
737 * The reason field includes flags identifying what
738 * triggered this specific report (mostly timer
739 * triggered or e.g. due to a context switch).
741 * This field is never expected to be zero so we can
742 * check that the report isn't invalid before copying
745 reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
746 (IS_GEN(stream->perf->i915, 12) ?
747 OAREPORT_REASON_MASK_EXTENDED :
748 OAREPORT_REASON_MASK));
750 if (__ratelimit(&stream->perf->spurious_report_rs))
751 DRM_NOTE("Skipping spurious, invalid OA report\n");
755 ctx_id = report32[2] & stream->specific_ctx_id_mask;
758 * Squash whatever is in the CTX_ID field if it's marked as
759 * invalid to be sure we avoid false-positive, single-context
762 * Note: that we don't clear the valid_ctx_bit so userspace can
763 * understand that the ID has been squashed by the kernel.
765 if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) &&
766 INTEL_GEN(stream->perf->i915) <= 11)
767 ctx_id = report32[2] = INVALID_CTX_ID;
770 * NB: For Gen 8 the OA unit no longer supports clock gating
771 * off for a specific context and the kernel can't securely
772 * stop the counters from updating as system-wide / global
775 * Automatic reports now include a context ID so reports can be
776 * filtered on the cpu but it's not worth trying to
777 * automatically subtract/hide counter progress for other
778 * contexts while filtering since we can't stop userspace
779 * issuing MI_REPORT_PERF_COUNT commands which would still
780 * provide a side-band view of the real values.
782 * To allow userspace (such as Mesa/GL_INTEL_performance_query)
783 * to normalize counters for a single filtered context then it
784 * needs be forwarded bookend context-switch reports so that it
785 * can track switches in between MI_REPORT_PERF_COUNT commands
786 * and can itself subtract/ignore the progress of counters
787 * associated with other contexts. Note that the hardware
788 * automatically triggers reports when switching to a new
789 * context which are tagged with the ID of the newly active
790 * context. To avoid the complexity (and likely fragility) of
791 * reading ahead while parsing reports to try and minimize
792 * forwarding redundant context switch reports (i.e. between
793 * other, unrelated contexts) we simply elect to forward them
796 * We don't rely solely on the reason field to identify context
797 * switches since it's not-uncommon for periodic samples to
798 * identify a switch before any 'context switch' report.
800 if (!stream->perf->exclusive_stream->ctx ||
801 stream->specific_ctx_id == ctx_id ||
802 stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
803 reason & OAREPORT_REASON_CTX_SWITCH) {
806 * While filtering for a single context we avoid
807 * leaking the IDs of other contexts.
809 if (stream->perf->exclusive_stream->ctx &&
810 stream->specific_ctx_id != ctx_id) {
811 report32[2] = INVALID_CTX_ID;
814 ret = append_oa_sample(stream, buf, count, offset,
819 stream->oa_buffer.last_ctx_id = ctx_id;
823 * The above reason field sanity check is based on
824 * the assumption that the OA buffer is initially
825 * zeroed and we reset the field after copying so the
826 * check is still meaningful once old reports start
832 if (start_offset != *offset) {
833 i915_reg_t oaheadptr;
835 oaheadptr = IS_GEN(stream->perf->i915, 12) ?
836 GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR;
838 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
841 * We removed the gtt_offset for the copy loop above, indexing
842 * relative to oa_buf_base so put back here...
845 intel_uncore_write(uncore, oaheadptr,
846 head & GEN12_OAG_OAHEADPTR_MASK);
847 stream->oa_buffer.head = head;
849 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
856 * gen8_oa_read - copy status records then buffered OA reports
857 * @stream: An i915-perf stream opened for OA metrics
858 * @buf: destination buffer given by userspace
859 * @count: the number of bytes userspace wants to read
860 * @offset: (inout): the current position for writing into @buf
862 * Checks OA unit status registers and if necessary appends corresponding
863 * status records for userspace (such as for a buffer full condition) and then
864 * initiate appending any buffered OA reports.
866 * Updates @offset according to the number of bytes successfully copied into
867 * the userspace buffer.
869 * NB: some data may be successfully copied to the userspace buffer
870 * even if an error is returned, and this is reflected in the
873 * Returns: zero on success or a negative error code
875 static int gen8_oa_read(struct i915_perf_stream *stream,
880 struct intel_uncore *uncore = stream->uncore;
882 i915_reg_t oastatus_reg;
885 if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
888 oastatus_reg = IS_GEN(stream->perf->i915, 12) ?
889 GEN12_OAG_OASTATUS : GEN8_OASTATUS;
891 oastatus = intel_uncore_read(uncore, oastatus_reg);
894 * We treat OABUFFER_OVERFLOW as a significant error:
896 * Although theoretically we could handle this more gracefully
897 * sometimes, some Gens don't correctly suppress certain
898 * automatically triggered reports in this condition and so we
899 * have to assume that old reports are now being trampled
902 * Considering how we don't currently give userspace control
903 * over the OA buffer size and always configure a large 16MB
904 * buffer, then a buffer overflow does anyway likely indicate
905 * that something has gone quite badly wrong.
907 if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
908 ret = append_oa_status(stream, buf, count, offset,
909 DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
913 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
914 stream->period_exponent);
916 stream->perf->ops.oa_disable(stream);
917 stream->perf->ops.oa_enable(stream);
920 * Note: .oa_enable() is expected to re-init the oabuffer and
921 * reset GEN8_OASTATUS for us
923 oastatus = intel_uncore_read(uncore, oastatus_reg);
926 if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
927 ret = append_oa_status(stream, buf, count, offset,
928 DRM_I915_PERF_RECORD_OA_REPORT_LOST);
931 intel_uncore_write(uncore, oastatus_reg,
932 oastatus & ~GEN8_OASTATUS_REPORT_LOST);
935 return gen8_append_oa_reports(stream, buf, count, offset);
939 * Copies all buffered OA reports into userspace read() buffer.
940 * @stream: An i915-perf stream opened for OA metrics
941 * @buf: destination buffer given by userspace
942 * @count: the number of bytes userspace wants to read
943 * @offset: (inout): the current position for writing into @buf
945 * Notably any error condition resulting in a short read (-%ENOSPC or
946 * -%EFAULT) will be returned even though one or more records may
947 * have been successfully copied. In this case it's up to the caller
948 * to decide if the error should be squashed before returning to
951 * Note: reports are consumed from the head, and appended to the
952 * tail, so the tail chases the head?... If you think that's mad
953 * and back-to-front you're not alone, but this follows the
954 * Gen PRM naming convention.
956 * Returns: 0 on success, negative error code on failure.
958 static int gen7_append_oa_reports(struct i915_perf_stream *stream,
963 struct intel_uncore *uncore = stream->uncore;
964 int report_size = stream->oa_buffer.format_size;
965 u8 *oa_buf_base = stream->oa_buffer.vaddr;
966 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
967 u32 mask = (OA_BUFFER_SIZE - 1);
968 size_t start_offset = *offset;
970 unsigned int aged_tail_idx;
975 if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
978 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
980 head = stream->oa_buffer.head;
981 aged_tail_idx = stream->oa_buffer.aged_tail_idx;
982 tail = stream->oa_buffer.tails[aged_tail_idx].offset;
984 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
986 /* An invalid tail pointer here means we're still waiting for the poll
987 * hrtimer callback to give us a pointer
989 if (tail == INVALID_TAIL_PTR)
992 /* NB: oa_buffer.head/tail include the gtt_offset which we don't want
993 * while indexing relative to oa_buf_base.
998 /* An out of bounds or misaligned head or tail pointer implies a driver
999 * bug since we validate + align the tail pointers we read from the
1000 * hardware and we are in full control of the head pointer which should
1001 * only be incremented by multiples of the report size (notably also
1002 * all a power of two).
1004 if (drm_WARN_ONCE(&uncore->i915->drm,
1005 head > OA_BUFFER_SIZE || head % report_size ||
1006 tail > OA_BUFFER_SIZE || tail % report_size,
1007 "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
1013 (taken = OA_TAKEN(tail, head));
1014 head = (head + report_size) & mask) {
1015 u8 *report = oa_buf_base + head;
1016 u32 *report32 = (void *)report;
1018 /* All the report sizes factor neatly into the buffer
1019 * size so we never expect to see a report split
1020 * between the beginning and end of the buffer.
1022 * Given the initial alignment check a misalignment
1023 * here would imply a driver bug that would result
1026 if (drm_WARN_ON(&uncore->i915->drm,
1027 (OA_BUFFER_SIZE - head) < report_size)) {
1028 drm_err(&uncore->i915->drm,
1029 "Spurious OA head ptr: non-integral report offset\n");
1033 /* The report-ID field for periodic samples includes
1034 * some undocumented flags related to what triggered
1035 * the report and is never expected to be zero so we
1036 * can check that the report isn't invalid before
1037 * copying it to userspace...
1039 if (report32[0] == 0) {
1040 if (__ratelimit(&stream->perf->spurious_report_rs))
1041 DRM_NOTE("Skipping spurious, invalid OA report\n");
1045 ret = append_oa_sample(stream, buf, count, offset, report);
1049 /* The above report-id field sanity check is based on
1050 * the assumption that the OA buffer is initially
1051 * zeroed and we reset the field after copying so the
1052 * check is still meaningful once old reports start
1053 * being overwritten.
1058 if (start_offset != *offset) {
1059 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1061 /* We removed the gtt_offset for the copy loop above, indexing
1062 * relative to oa_buf_base so put back here...
1066 intel_uncore_write(uncore, GEN7_OASTATUS2,
1067 (head & GEN7_OASTATUS2_HEAD_MASK) |
1068 GEN7_OASTATUS2_MEM_SELECT_GGTT);
1069 stream->oa_buffer.head = head;
1071 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1078 * gen7_oa_read - copy status records then buffered OA reports
1079 * @stream: An i915-perf stream opened for OA metrics
1080 * @buf: destination buffer given by userspace
1081 * @count: the number of bytes userspace wants to read
1082 * @offset: (inout): the current position for writing into @buf
1084 * Checks Gen 7 specific OA unit status registers and if necessary appends
1085 * corresponding status records for userspace (such as for a buffer full
1086 * condition) and then initiate appending any buffered OA reports.
1088 * Updates @offset according to the number of bytes successfully copied into
1089 * the userspace buffer.
1091 * Returns: zero on success or a negative error code
1093 static int gen7_oa_read(struct i915_perf_stream *stream,
1098 struct intel_uncore *uncore = stream->uncore;
1102 if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
1105 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1107 /* XXX: On Haswell we don't have a safe way to clear oastatus1
1108 * bits while the OA unit is enabled (while the tail pointer
1109 * may be updated asynchronously) so we ignore status bits
1110 * that have already been reported to userspace.
1112 oastatus1 &= ~stream->perf->gen7_latched_oastatus1;
1114 /* We treat OABUFFER_OVERFLOW as a significant error:
1116 * - The status can be interpreted to mean that the buffer is
1117 * currently full (with a higher precedence than OA_TAKEN()
1118 * which will start to report a near-empty buffer after an
1119 * overflow) but it's awkward that we can't clear the status
1120 * on Haswell, so without a reset we won't be able to catch
1123 * - Since it also implies the HW has started overwriting old
1124 * reports it may also affect our sanity checks for invalid
1125 * reports when copying to userspace that assume new reports
1126 * are being written to cleared memory.
1128 * - In the future we may want to introduce a flight recorder
1129 * mode where the driver will automatically maintain a safe
1130 * guard band between head/tail, avoiding this overflow
1131 * condition, but we avoid the added driver complexity for
1134 if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
1135 ret = append_oa_status(stream, buf, count, offset,
1136 DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
1140 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
1141 stream->period_exponent);
1143 stream->perf->ops.oa_disable(stream);
1144 stream->perf->ops.oa_enable(stream);
1146 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1149 if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
1150 ret = append_oa_status(stream, buf, count, offset,
1151 DRM_I915_PERF_RECORD_OA_REPORT_LOST);
1154 stream->perf->gen7_latched_oastatus1 |=
1155 GEN7_OASTATUS1_REPORT_LOST;
1158 return gen7_append_oa_reports(stream, buf, count, offset);
1162 * i915_oa_wait_unlocked - handles blocking IO until OA data available
1163 * @stream: An i915-perf stream opened for OA metrics
1165 * Called when userspace tries to read() from a blocking stream FD opened
1166 * for OA metrics. It waits until the hrtimer callback finds a non-empty
1167 * OA buffer and wakes us.
1169 * Note: it's acceptable to have this return with some false positives
1170 * since any subsequent read handling will return -EAGAIN if there isn't
1171 * really data ready for userspace yet.
1173 * Returns: zero on success or a negative error code
1175 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
1177 /* We would wait indefinitely if periodic sampling is not enabled */
1178 if (!stream->periodic)
1181 return wait_event_interruptible(stream->poll_wq,
1182 oa_buffer_check_unlocked(stream));
1186 * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
1187 * @stream: An i915-perf stream opened for OA metrics
1188 * @file: An i915 perf stream file
1189 * @wait: poll() state table
1191 * For handling userspace polling on an i915 perf stream opened for OA metrics,
1192 * this starts a poll_wait with the wait queue that our hrtimer callback wakes
1193 * when it sees data ready to read in the circular OA buffer.
1195 static void i915_oa_poll_wait(struct i915_perf_stream *stream,
1199 poll_wait(file, &stream->poll_wq, wait);
1203 * i915_oa_read - just calls through to &i915_oa_ops->read
1204 * @stream: An i915-perf stream opened for OA metrics
1205 * @buf: destination buffer given by userspace
1206 * @count: the number of bytes userspace wants to read
1207 * @offset: (inout): the current position for writing into @buf
1209 * Updates @offset according to the number of bytes successfully copied into
1210 * the userspace buffer.
1212 * Returns: zero on success or a negative error code
1214 static int i915_oa_read(struct i915_perf_stream *stream,
1219 return stream->perf->ops.read(stream, buf, count, offset);
1222 static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
1224 struct i915_gem_engines_iter it;
1225 struct i915_gem_context *ctx = stream->ctx;
1226 struct intel_context *ce;
1229 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1230 if (ce->engine != stream->engine) /* first match! */
1234 * As the ID is the gtt offset of the context's vma we
1235 * pin the vma to ensure the ID remains fixed.
1237 err = intel_context_pin(ce);
1239 stream->pinned_ctx = ce;
1243 i915_gem_context_unlock_engines(ctx);
1245 return stream->pinned_ctx;
1249 * oa_get_render_ctx_id - determine and hold ctx hw id
1250 * @stream: An i915-perf stream opened for OA metrics
1252 * Determine the render context hw id, and ensure it remains fixed for the
1253 * lifetime of the stream. This ensures that we don't have to worry about
1254 * updating the context ID in OACONTROL on the fly.
1256 * Returns: zero on success or a negative error code
1258 static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
1260 struct intel_context *ce;
1262 ce = oa_pin_context(stream);
1266 switch (INTEL_GEN(ce->engine->i915)) {
1269 * On Haswell we don't do any post processing of the reports
1270 * and don't need to use the mask.
1272 stream->specific_ctx_id = i915_ggtt_offset(ce->state);
1273 stream->specific_ctx_id_mask = 0;
1280 if (intel_engine_in_execlists_submission_mode(ce->engine)) {
1281 stream->specific_ctx_id_mask =
1282 (1U << GEN8_CTX_ID_WIDTH) - 1;
1283 stream->specific_ctx_id = stream->specific_ctx_id_mask;
1286 * When using GuC, the context descriptor we write in
1287 * i915 is read by GuC and rewritten before it's
1288 * actually written into the hardware. The LRCA is
1289 * what is put into the context id field of the
1290 * context descriptor by GuC. Because it's aligned to
1291 * a page, the lower 12bits are always at 0 and
1292 * dropped by GuC. They won't be part of the context
1293 * ID in the OA reports, so squash those lower bits.
1295 stream->specific_ctx_id =
1296 lower_32_bits(ce->lrc_desc) >> 12;
1299 * GuC uses the top bit to signal proxy submission, so
1302 stream->specific_ctx_id_mask =
1303 (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
1309 stream->specific_ctx_id_mask =
1310 ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
1312 * Pick an unused context id
1313 * 0 - (NUM_CONTEXT_TAG - 1) are used by other contexts
1314 * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context
1316 stream->specific_ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
1317 BUILD_BUG_ON((GEN12_MAX_CONTEXT_HW_ID - 1) < NUM_CONTEXT_TAG);
1322 MISSING_CASE(INTEL_GEN(ce->engine->i915));
1325 ce->tag = stream->specific_ctx_id;
1327 drm_dbg(&stream->perf->i915->drm,
1328 "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
1329 stream->specific_ctx_id,
1330 stream->specific_ctx_id_mask);
1336 * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
1337 * @stream: An i915-perf stream opened for OA metrics
1339 * In case anything needed doing to ensure the context HW ID would remain valid
1340 * for the lifetime of the stream, then that can be undone here.
1342 static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
1344 struct intel_context *ce;
1346 ce = fetch_and_zero(&stream->pinned_ctx);
1348 ce->tag = 0; /* recomputed on next submission after parking */
1349 intel_context_unpin(ce);
1352 stream->specific_ctx_id = INVALID_CTX_ID;
1353 stream->specific_ctx_id_mask = 0;
1357 free_oa_buffer(struct i915_perf_stream *stream)
1359 i915_vma_unpin_and_release(&stream->oa_buffer.vma,
1360 I915_VMA_RELEASE_MAP);
1362 stream->oa_buffer.vaddr = NULL;
1366 free_oa_configs(struct i915_perf_stream *stream)
1368 struct i915_oa_config_bo *oa_bo, *tmp;
1370 i915_oa_config_put(stream->oa_config);
1371 llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
1372 free_oa_config_bo(oa_bo);
1376 free_noa_wait(struct i915_perf_stream *stream)
1378 i915_vma_unpin_and_release(&stream->noa_wait, 0);
1381 static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
1383 struct i915_perf *perf = stream->perf;
1385 BUG_ON(stream != perf->exclusive_stream);
1388 * Unset exclusive_stream first, it will be checked while disabling
1389 * the metric set on gen8+.
1391 * See i915_oa_init_reg_state() and lrc_configure_all_contexts()
1393 WRITE_ONCE(perf->exclusive_stream, NULL);
1394 perf->ops.disable_metric_set(stream);
1396 free_oa_buffer(stream);
1398 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
1399 intel_engine_pm_put(stream->engine);
1402 oa_put_render_ctx_id(stream);
1404 free_oa_configs(stream);
1405 free_noa_wait(stream);
1407 if (perf->spurious_report_rs.missed) {
1408 DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
1409 perf->spurious_report_rs.missed);
1413 static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
1415 struct intel_uncore *uncore = stream->uncore;
1416 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1417 unsigned long flags;
1419 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1421 /* Pre-DevBDW: OABUFFER must be set with counters off,
1422 * before OASTATUS1, but after OASTATUS2
1424 intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */
1425 gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT);
1426 stream->oa_buffer.head = gtt_offset;
1428 intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset);
1430 intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */
1431 gtt_offset | OABUFFER_SIZE_16M);
1433 /* Mark that we need updated tail pointers to read from... */
1434 stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
1435 stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
1437 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1439 /* On Haswell we have to track which OASTATUS1 flags we've
1440 * already seen since they can't be cleared while periodic
1441 * sampling is enabled.
1443 stream->perf->gen7_latched_oastatus1 = 0;
1445 /* NB: although the OA buffer will initially be allocated
1446 * zeroed via shmfs (and so this memset is redundant when
1447 * first allocating), we may re-init the OA buffer, either
1448 * when re-enabling a stream or in error/reset paths.
1450 * The reason we clear the buffer for each re-init is for the
1451 * sanity check in gen7_append_oa_reports() that looks at the
1452 * report-id field to make sure it's non-zero which relies on
1453 * the assumption that new reports are being written to zeroed
1456 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1458 stream->pollin = false;
1461 static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
1463 struct intel_uncore *uncore = stream->uncore;
1464 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1465 unsigned long flags;
1467 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1469 intel_uncore_write(uncore, GEN8_OASTATUS, 0);
1470 intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset);
1471 stream->oa_buffer.head = gtt_offset;
1473 intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0);
1478 * "This MMIO must be set before the OATAILPTR
1479 * register and after the OAHEADPTR register. This is
1480 * to enable proper functionality of the overflow
1483 intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset |
1484 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1485 intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
1487 /* Mark that we need updated tail pointers to read from... */
1488 stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
1489 stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
1492 * Reset state used to recognise context switches, affecting which
1493 * reports we will forward to userspace while filtering for a single
1496 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1498 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1501 * NB: although the OA buffer will initially be allocated
1502 * zeroed via shmfs (and so this memset is redundant when
1503 * first allocating), we may re-init the OA buffer, either
1504 * when re-enabling a stream or in error/reset paths.
1506 * The reason we clear the buffer for each re-init is for the
1507 * sanity check in gen8_append_oa_reports() that looks at the
1508 * reason field to make sure it's non-zero which relies on
1509 * the assumption that new reports are being written to zeroed
1512 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1514 stream->pollin = false;
1517 static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
1519 struct intel_uncore *uncore = stream->uncore;
1520 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1521 unsigned long flags;
1523 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1525 intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0);
1526 intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR,
1527 gtt_offset & GEN12_OAG_OAHEADPTR_MASK);
1528 stream->oa_buffer.head = gtt_offset;
1533 * "This MMIO must be set before the OATAILPTR
1534 * register and after the OAHEADPTR register. This is
1535 * to enable proper functionality of the overflow
1538 intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset |
1539 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1540 intel_uncore_write(uncore, GEN12_OAG_OATAILPTR,
1541 gtt_offset & GEN12_OAG_OATAILPTR_MASK);
1543 /* Mark that we need updated tail pointers to read from... */
1544 stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
1545 stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
1548 * Reset state used to recognise context switches, affecting which
1549 * reports we will forward to userspace while filtering for a single
1552 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1554 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1557 * NB: although the OA buffer will initially be allocated
1558 * zeroed via shmfs (and so this memset is redundant when
1559 * first allocating), we may re-init the OA buffer, either
1560 * when re-enabling a stream or in error/reset paths.
1562 * The reason we clear the buffer for each re-init is for the
1563 * sanity check in gen8_append_oa_reports() that looks at the
1564 * reason field to make sure it's non-zero which relies on
1565 * the assumption that new reports are being written to zeroed
1568 memset(stream->oa_buffer.vaddr, 0,
1569 stream->oa_buffer.vma->size);
1571 stream->pollin = false;
1574 static int alloc_oa_buffer(struct i915_perf_stream *stream)
1576 struct drm_i915_private *i915 = stream->perf->i915;
1577 struct drm_i915_gem_object *bo;
1578 struct i915_vma *vma;
1581 if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma))
1584 BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
1585 BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
1587 bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
1589 drm_err(&i915->drm, "Failed to allocate OA buffer\n");
1593 i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
1595 /* PreHSW required 512K alignment, HSW requires 16M */
1596 vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
1601 stream->oa_buffer.vma = vma;
1603 stream->oa_buffer.vaddr =
1604 i915_gem_object_pin_map(bo, I915_MAP_WB);
1605 if (IS_ERR(stream->oa_buffer.vaddr)) {
1606 ret = PTR_ERR(stream->oa_buffer.vaddr);
1613 __i915_vma_unpin(vma);
1616 i915_gem_object_put(bo);
1618 stream->oa_buffer.vaddr = NULL;
1619 stream->oa_buffer.vma = NULL;
1624 static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
1625 bool save, i915_reg_t reg, u32 offset,
1631 cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
1632 if (INTEL_GEN(stream->perf->i915) >= 8)
1635 for (d = 0; d < dword_count; d++) {
1637 *cs++ = i915_mmio_reg_offset(reg) + 4 * d;
1638 *cs++ = intel_gt_scratch_offset(stream->engine->gt,
1646 static int alloc_noa_wait(struct i915_perf_stream *stream)
1648 struct drm_i915_private *i915 = stream->perf->i915;
1649 struct drm_i915_gem_object *bo;
1650 struct i915_vma *vma;
1651 const u64 delay_ticks = 0xffffffffffffffff -
1653 atomic64_read(&stream->perf->noa_programming_delay) *
1654 RUNTIME_INFO(i915)->cs_timestamp_frequency_khz,
1656 const u32 base = stream->engine->mmio_base;
1657 #define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
1658 u32 *batch, *ts0, *cs, *jump;
1669 bo = i915_gem_object_create_internal(i915, 4096);
1672 "Failed to allocate NOA wait batchbuffer\n");
1677 * We pin in GGTT because we jump into this buffer now because
1678 * multiple OA config BOs will have a jump to this address and it
1679 * needs to be fixed during the lifetime of the i915/perf stream.
1681 vma = i915_gem_object_ggtt_pin(bo, NULL, 0, 0, PIN_HIGH);
1687 batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB);
1688 if (IS_ERR(batch)) {
1689 ret = PTR_ERR(batch);
1693 /* Save registers. */
1694 for (i = 0; i < N_CS_GPR; i++)
1695 cs = save_restore_register(
1696 stream, cs, true /* save */, CS_GPR(i),
1697 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
1698 cs = save_restore_register(
1699 stream, cs, true /* save */, MI_PREDICATE_RESULT_1,
1700 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
1702 /* First timestamp snapshot location. */
1706 * Initial snapshot of the timestamp register to implement the wait.
1707 * We work with 32b values, so clear out the top 32b bits of the
1708 * register because the ALU works 64bits.
1710 *cs++ = MI_LOAD_REGISTER_IMM(1);
1711 *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4;
1713 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1714 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
1715 *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS));
1718 * This is the location we're going to jump back into until the
1719 * required amount of time has passed.
1724 * Take another snapshot of the timestamp register. Take care to clear
1725 * up the top 32bits of CS_GPR(1) as we're using it for other
1728 *cs++ = MI_LOAD_REGISTER_IMM(1);
1729 *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4;
1731 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1732 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
1733 *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS));
1736 * Do a diff between the 2 timestamps and store the result back into
1740 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS));
1741 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS));
1742 *cs++ = MI_MATH_SUB;
1743 *cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU);
1744 *cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
1747 * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the
1748 * timestamp have rolled over the 32bits) into the predicate register
1749 * to be used for the predicated jump.
1751 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1752 *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
1753 *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
1755 /* Restart from the beginning if we had timestamps roll over. */
1756 *cs++ = (INTEL_GEN(i915) < 8 ?
1757 MI_BATCH_BUFFER_START :
1758 MI_BATCH_BUFFER_START_GEN8) |
1760 *cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4;
1764 * Now add the diff between to previous timestamps and add it to :
1765 * (((1 * << 64) - 1) - delay_ns)
1767 * When the Carry Flag contains 1 this means the elapsed time is
1768 * longer than the expected delay, and we can exit the wait loop.
1770 *cs++ = MI_LOAD_REGISTER_IMM(2);
1771 *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET));
1772 *cs++ = lower_32_bits(delay_ticks);
1773 *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4;
1774 *cs++ = upper_32_bits(delay_ticks);
1777 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS));
1778 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET));
1779 *cs++ = MI_MATH_ADD;
1780 *cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
1782 *cs++ = MI_ARB_CHECK;
1785 * Transfer the result into the predicate register to be used for the
1788 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1789 *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
1790 *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
1792 /* Predicate the jump. */
1793 *cs++ = (INTEL_GEN(i915) < 8 ?
1794 MI_BATCH_BUFFER_START :
1795 MI_BATCH_BUFFER_START_GEN8) |
1797 *cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4;
1800 /* Restore registers. */
1801 for (i = 0; i < N_CS_GPR; i++)
1802 cs = save_restore_register(
1803 stream, cs, false /* restore */, CS_GPR(i),
1804 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
1805 cs = save_restore_register(
1806 stream, cs, false /* restore */, MI_PREDICATE_RESULT_1,
1807 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
1809 /* And return to the ring. */
1810 *cs++ = MI_BATCH_BUFFER_END;
1812 GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
1814 i915_gem_object_flush_map(bo);
1815 i915_gem_object_unpin_map(bo);
1817 stream->noa_wait = vma;
1821 i915_vma_unpin_and_release(&vma, 0);
1823 i915_gem_object_put(bo);
1827 static u32 *write_cs_mi_lri(u32 *cs,
1828 const struct i915_oa_reg *reg_data,
1833 for (i = 0; i < n_regs; i++) {
1834 if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) {
1835 u32 n_lri = min_t(u32,
1837 MI_LOAD_REGISTER_IMM_MAX_REGS);
1839 *cs++ = MI_LOAD_REGISTER_IMM(n_lri);
1841 *cs++ = i915_mmio_reg_offset(reg_data[i].addr);
1842 *cs++ = reg_data[i].value;
1848 static int num_lri_dwords(int num_regs)
1853 count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS);
1854 count += num_regs * 2;
1860 static struct i915_oa_config_bo *
1861 alloc_oa_config_buffer(struct i915_perf_stream *stream,
1862 struct i915_oa_config *oa_config)
1864 struct drm_i915_gem_object *obj;
1865 struct i915_oa_config_bo *oa_bo;
1866 size_t config_length = 0;
1870 oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL);
1872 return ERR_PTR(-ENOMEM);
1874 config_length += num_lri_dwords(oa_config->mux_regs_len);
1875 config_length += num_lri_dwords(oa_config->b_counter_regs_len);
1876 config_length += num_lri_dwords(oa_config->flex_regs_len);
1877 config_length += 3; /* MI_BATCH_BUFFER_START */
1878 config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE);
1880 obj = i915_gem_object_create_shmem(stream->perf->i915, config_length);
1886 cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
1892 cs = write_cs_mi_lri(cs,
1893 oa_config->mux_regs,
1894 oa_config->mux_regs_len);
1895 cs = write_cs_mi_lri(cs,
1896 oa_config->b_counter_regs,
1897 oa_config->b_counter_regs_len);
1898 cs = write_cs_mi_lri(cs,
1899 oa_config->flex_regs,
1900 oa_config->flex_regs_len);
1902 /* Jump into the active wait. */
1903 *cs++ = (INTEL_GEN(stream->perf->i915) < 8 ?
1904 MI_BATCH_BUFFER_START :
1905 MI_BATCH_BUFFER_START_GEN8);
1906 *cs++ = i915_ggtt_offset(stream->noa_wait);
1909 i915_gem_object_flush_map(obj);
1910 i915_gem_object_unpin_map(obj);
1912 oa_bo->vma = i915_vma_instance(obj,
1913 &stream->engine->gt->ggtt->vm,
1915 if (IS_ERR(oa_bo->vma)) {
1916 err = PTR_ERR(oa_bo->vma);
1920 oa_bo->oa_config = i915_oa_config_get(oa_config);
1921 llist_add(&oa_bo->node, &stream->oa_config_bos);
1926 i915_gem_object_put(obj);
1929 return ERR_PTR(err);
1932 static struct i915_vma *
1933 get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config)
1935 struct i915_oa_config_bo *oa_bo;
1938 * Look for the buffer in the already allocated BOs attached
1941 llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) {
1942 if (oa_bo->oa_config == oa_config &&
1943 memcmp(oa_bo->oa_config->uuid,
1945 sizeof(oa_config->uuid)) == 0)
1949 oa_bo = alloc_oa_config_buffer(stream, oa_config);
1951 return ERR_CAST(oa_bo);
1954 return i915_vma_get(oa_bo->vma);
1957 static struct i915_request *
1958 emit_oa_config(struct i915_perf_stream *stream,
1959 struct i915_oa_config *oa_config,
1960 struct intel_context *ce)
1962 struct i915_request *rq;
1963 struct i915_vma *vma;
1966 vma = get_oa_vma(stream, oa_config);
1968 return ERR_CAST(vma);
1970 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
1974 intel_engine_pm_get(ce->engine);
1975 rq = i915_request_create(ce);
1976 intel_engine_pm_put(ce->engine);
1983 err = i915_request_await_object(rq, vma->obj, 0);
1985 err = i915_vma_move_to_active(vma, rq, 0);
1986 i915_vma_unlock(vma);
1988 goto err_add_request;
1990 err = rq->engine->emit_bb_start(rq,
1992 I915_DISPATCH_SECURE);
1994 goto err_add_request;
1996 i915_request_get(rq);
1998 i915_request_add(rq);
2000 i915_vma_unpin(vma);
2003 return err ? ERR_PTR(err) : rq;
2006 static struct intel_context *oa_context(struct i915_perf_stream *stream)
2008 return stream->pinned_ctx ?: stream->engine->kernel_context;
2011 static struct i915_request *
2012 hsw_enable_metric_set(struct i915_perf_stream *stream)
2014 struct intel_uncore *uncore = stream->uncore;
2019 * OA unit is using “crclk” for its functionality. When trunk
2020 * level clock gating takes place, OA clock would be gated,
2021 * unable to count the events from non-render clock domain.
2022 * Render clock gating must be disabled when OA is enabled to
2023 * count the events from non-render domain. Unit level clock
2024 * gating for RCS should also be disabled.
2026 intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2027 GEN7_DOP_CLOCK_GATE_ENABLE, 0);
2028 intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2029 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
2031 return emit_oa_config(stream, stream->oa_config, oa_context(stream));
2034 static void hsw_disable_metric_set(struct i915_perf_stream *stream)
2036 struct intel_uncore *uncore = stream->uncore;
2038 intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2039 GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0);
2040 intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2041 0, GEN7_DOP_CLOCK_GATE_ENABLE);
2043 intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2046 static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
2049 u32 mmio = i915_mmio_reg_offset(reg);
2053 * This arbitrary default will select the 'EU FPU0 Pipeline
2054 * Active' event. In the future it's anticipated that there
2055 * will be an explicit 'No Event' we can select, but not yet...
2060 for (i = 0; i < oa_config->flex_regs_len; i++) {
2061 if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio)
2062 return oa_config->flex_regs[i].value;
2068 * NB: It must always remain pointer safe to run this even if the OA unit
2069 * has been disabled.
2071 * It's fine to put out-of-date values into these per-context registers
2072 * in the case that the OA unit has been disabled.
2075 gen8_update_reg_state_unlocked(const struct intel_context *ce,
2076 const struct i915_perf_stream *stream)
2078 u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
2079 u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2080 /* The MMIO offsets for Flex EU registers aren't contiguous */
2081 i915_reg_t flex_regs[] = {
2090 u32 *reg_state = ce->lrc_reg_state;
2093 reg_state[ctx_oactxctrl + 1] =
2094 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2095 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2096 GEN8_OA_COUNTER_RESUME;
2098 for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
2099 reg_state[ctx_flexeu0 + i * 2 + 1] =
2100 oa_config_flex_reg(stream->oa_config, flex_regs[i]);
2110 gen8_store_flex(struct i915_request *rq,
2111 struct intel_context *ce,
2112 const struct flex *flex, unsigned int count)
2117 cs = intel_ring_begin(rq, 4 * count);
2121 offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
2123 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
2124 *cs++ = offset + flex->offset * sizeof(u32);
2126 *cs++ = flex->value;
2127 } while (flex++, --count);
2129 intel_ring_advance(rq, cs);
2135 gen8_load_flex(struct i915_request *rq,
2136 struct intel_context *ce,
2137 const struct flex *flex, unsigned int count)
2141 GEM_BUG_ON(!count || count > 63);
2143 cs = intel_ring_begin(rq, 2 * count + 2);
2147 *cs++ = MI_LOAD_REGISTER_IMM(count);
2149 *cs++ = i915_mmio_reg_offset(flex->reg);
2150 *cs++ = flex->value;
2151 } while (flex++, --count);
2154 intel_ring_advance(rq, cs);
2159 static int gen8_modify_context(struct intel_context *ce,
2160 const struct flex *flex, unsigned int count)
2162 struct i915_request *rq;
2165 rq = intel_engine_create_kernel_request(ce->engine);
2169 /* Serialise with the remote context */
2170 err = intel_context_prepare_remote_request(ce, rq);
2172 err = gen8_store_flex(rq, ce, flex, count);
2174 i915_request_add(rq);
2178 static int gen8_modify_self(struct intel_context *ce,
2179 const struct flex *flex, unsigned int count)
2181 struct i915_request *rq;
2184 intel_engine_pm_get(ce->engine);
2185 rq = i915_request_create(ce);
2186 intel_engine_pm_put(ce->engine);
2190 err = gen8_load_flex(rq, ce, flex, count);
2192 i915_request_add(rq);
2196 static int gen8_configure_context(struct i915_gem_context *ctx,
2197 struct flex *flex, unsigned int count)
2199 struct i915_gem_engines_iter it;
2200 struct intel_context *ce;
2203 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2204 GEM_BUG_ON(ce == ce->engine->kernel_context);
2206 if (ce->engine->class != RENDER_CLASS)
2209 /* Otherwise OA settings will be set upon first use */
2210 if (!intel_context_pin_if_active(ce))
2213 flex->value = intel_sseu_make_rpcs(ctx->i915, &ce->sseu);
2214 err = gen8_modify_context(ce, flex, count);
2216 intel_context_unpin(ce);
2220 i915_gem_context_unlock_engines(ctx);
2225 static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool enable)
2228 struct intel_context *ce = stream->pinned_ctx;
2229 u32 format = stream->oa_buffer.format;
2230 struct flex regs_context[] = {
2233 stream->perf->ctx_oactxctrl_offset + 1,
2234 enable ? GEN8_OA_COUNTER_RESUME : 0,
2237 /* Offsets in regs_lri are not used since this configuration is only
2238 * applied using LRI. Initialize the correct offsets for posterity.
2240 #define GEN12_OAR_OACONTROL_OFFSET 0x5B0
2241 struct flex regs_lri[] = {
2243 GEN12_OAR_OACONTROL,
2244 GEN12_OAR_OACONTROL_OFFSET + 1,
2245 (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
2246 (enable ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
2249 RING_CONTEXT_CONTROL(ce->engine->mmio_base),
2250 CTX_CONTEXT_CONTROL,
2251 _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
2253 GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
2258 /* Modify the context image of pinned context with regs_context*/
2259 err = intel_context_lock_pinned(ce);
2263 err = gen8_modify_context(ce, regs_context, ARRAY_SIZE(regs_context));
2264 intel_context_unlock_pinned(ce);
2268 /* Apply regs_lri using LRI with pinned context */
2269 return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri));
2273 * Manages updating the per-context aspects of the OA stream
2274 * configuration across all contexts.
2276 * The awkward consideration here is that OACTXCONTROL controls the
2277 * exponent for periodic sampling which is primarily used for system
2278 * wide profiling where we'd like a consistent sampling period even in
2279 * the face of context switches.
2281 * Our approach of updating the register state context (as opposed to
2282 * say using a workaround batch buffer) ensures that the hardware
2283 * won't automatically reload an out-of-date timer exponent even
2284 * transiently before a WA BB could be parsed.
2286 * This function needs to:
2287 * - Ensure the currently running context's per-context OA state is
2289 * - Ensure that all existing contexts will have the correct per-context
2290 * OA state if they are scheduled for use.
2291 * - Ensure any new contexts will be initialized with the correct
2292 * per-context OA state.
2294 * Note: it's only the RCS/Render context that has any OA state.
2295 * Note: the first flex register passed must always be R_PWR_CLK_STATE
2297 static int oa_configure_all_contexts(struct i915_perf_stream *stream,
2301 struct drm_i915_private *i915 = stream->perf->i915;
2302 struct intel_engine_cs *engine;
2303 struct i915_gem_context *ctx, *cn;
2306 lockdep_assert_held(&stream->perf->lock);
2309 * The OA register config is setup through the context image. This image
2310 * might be written to by the GPU on context switch (in particular on
2311 * lite-restore). This means we can't safely update a context's image,
2312 * if this context is scheduled/submitted to run on the GPU.
2314 * We could emit the OA register config through the batch buffer but
2315 * this might leave small interval of time where the OA unit is
2316 * configured at an invalid sampling period.
2318 * Note that since we emit all requests from a single ring, there
2319 * is still an implicit global barrier here that may cause a high
2320 * priority context to wait for an otherwise independent low priority
2321 * context. Contexts idle at the time of reconfiguration are not
2322 * trapped behind the barrier.
2324 spin_lock(&i915->gem.contexts.lock);
2325 list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
2326 if (!kref_get_unless_zero(&ctx->ref))
2329 spin_unlock(&i915->gem.contexts.lock);
2331 err = gen8_configure_context(ctx, regs, num_regs);
2333 i915_gem_context_put(ctx);
2337 spin_lock(&i915->gem.contexts.lock);
2338 list_safe_reset_next(ctx, cn, link);
2339 i915_gem_context_put(ctx);
2341 spin_unlock(&i915->gem.contexts.lock);
2344 * After updating all other contexts, we need to modify ourselves.
2345 * If we don't modify the kernel_context, we do not get events while
2348 for_each_uabi_engine(engine, i915) {
2349 struct intel_context *ce = engine->kernel_context;
2351 if (engine->class != RENDER_CLASS)
2354 regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu);
2356 err = gen8_modify_self(ce, regs, num_regs);
2364 static int gen12_configure_all_contexts(struct i915_perf_stream *stream,
2365 const struct i915_oa_config *oa_config)
2367 struct flex regs[] = {
2369 GEN8_R_PWR_CLK_STATE,
2370 CTX_R_PWR_CLK_STATE,
2374 return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
2377 static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
2378 const struct i915_oa_config *oa_config)
2380 /* The MMIO offsets for Flex EU registers aren't contiguous */
2381 const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2382 #define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
2383 struct flex regs[] = {
2385 GEN8_R_PWR_CLK_STATE,
2386 CTX_R_PWR_CLK_STATE,
2390 stream->perf->ctx_oactxctrl_offset + 1,
2392 { EU_PERF_CNTL0, ctx_flexeuN(0) },
2393 { EU_PERF_CNTL1, ctx_flexeuN(1) },
2394 { EU_PERF_CNTL2, ctx_flexeuN(2) },
2395 { EU_PERF_CNTL3, ctx_flexeuN(3) },
2396 { EU_PERF_CNTL4, ctx_flexeuN(4) },
2397 { EU_PERF_CNTL5, ctx_flexeuN(5) },
2398 { EU_PERF_CNTL6, ctx_flexeuN(6) },
2404 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2405 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2406 GEN8_OA_COUNTER_RESUME;
2408 for (i = 2; i < ARRAY_SIZE(regs); i++)
2409 regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
2411 return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
2414 static struct i915_request *
2415 gen8_enable_metric_set(struct i915_perf_stream *stream)
2417 struct intel_uncore *uncore = stream->uncore;
2418 struct i915_oa_config *oa_config = stream->oa_config;
2422 * We disable slice/unslice clock ratio change reports on SKL since
2423 * they are too noisy. The HW generates a lot of redundant reports
2424 * where the ratio hasn't really changed causing a lot of redundant
2425 * work to processes and increasing the chances we'll hit buffer
2428 * Although we don't currently use the 'disable overrun' OABUFFER
2429 * feature it's worth noting that clock ratio reports have to be
2430 * disabled before considering to use that feature since the HW doesn't
2431 * correctly block these reports.
2433 * Currently none of the high-level metrics we have depend on knowing
2434 * this ratio to normalize.
2436 * Note: This register is not power context saved and restored, but
2437 * that's OK considering that we disable RC6 while the OA unit is
2440 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
2441 * be read back from automatically triggered reports, as part of the
2444 if (IS_GEN_RANGE(stream->perf->i915, 9, 11)) {
2445 intel_uncore_write(uncore, GEN8_OA_DEBUG,
2446 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2447 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
2451 * Update all contexts prior writing the mux configurations as we need
2452 * to make sure all slices/subslices are ON before writing to NOA
2455 ret = lrc_configure_all_contexts(stream, oa_config);
2457 return ERR_PTR(ret);
2459 return emit_oa_config(stream, oa_config, oa_context(stream));
2462 static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
2464 return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
2465 (stream->sample_flags & SAMPLE_OA_REPORT) ?
2466 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
2469 static struct i915_request *
2470 gen12_enable_metric_set(struct i915_perf_stream *stream)
2472 struct intel_uncore *uncore = stream->uncore;
2473 struct i915_oa_config *oa_config = stream->oa_config;
2474 bool periodic = stream->periodic;
2475 u32 period_exponent = stream->period_exponent;
2478 intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG,
2479 /* Disable clk ratio reports, like previous Gens. */
2480 _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2481 GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
2483 * If the user didn't require OA reports, instruct
2484 * the hardware not to emit ctx switch reports.
2486 oag_report_ctx_switches(stream));
2488 intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ?
2489 (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME |
2490 GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE |
2491 (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT))
2495 * Update all contexts prior writing the mux configurations as we need
2496 * to make sure all slices/subslices are ON before writing to NOA
2499 ret = gen12_configure_all_contexts(stream, oa_config);
2501 return ERR_PTR(ret);
2504 * For Gen12, performance counters are context
2505 * saved/restored. Only enable it for the context that
2509 ret = gen12_configure_oar_context(stream, true);
2511 return ERR_PTR(ret);
2514 return emit_oa_config(stream, oa_config, oa_context(stream));
2517 static void gen8_disable_metric_set(struct i915_perf_stream *stream)
2519 struct intel_uncore *uncore = stream->uncore;
2521 /* Reset all contexts' slices/subslices configurations. */
2522 lrc_configure_all_contexts(stream, NULL);
2524 intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2527 static void gen10_disable_metric_set(struct i915_perf_stream *stream)
2529 struct intel_uncore *uncore = stream->uncore;
2531 /* Reset all contexts' slices/subslices configurations. */
2532 lrc_configure_all_contexts(stream, NULL);
2534 /* Make sure we disable noa to save power. */
2535 intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2538 static void gen12_disable_metric_set(struct i915_perf_stream *stream)
2540 struct intel_uncore *uncore = stream->uncore;
2542 /* Reset all contexts' slices/subslices configurations. */
2543 gen12_configure_all_contexts(stream, NULL);
2545 /* disable the context save/restore or OAR counters */
2547 gen12_configure_oar_context(stream, false);
2549 /* Make sure we disable noa to save power. */
2550 intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2553 static void gen7_oa_enable(struct i915_perf_stream *stream)
2555 struct intel_uncore *uncore = stream->uncore;
2556 struct i915_gem_context *ctx = stream->ctx;
2557 u32 ctx_id = stream->specific_ctx_id;
2558 bool periodic = stream->periodic;
2559 u32 period_exponent = stream->period_exponent;
2560 u32 report_format = stream->oa_buffer.format;
2563 * Reset buf pointers so we don't forward reports from before now.
2565 * Think carefully if considering trying to avoid this, since it
2566 * also ensures status flags and the buffer itself are cleared
2567 * in error paths, and we have checks for invalid reports based
2568 * on the assumption that certain fields are written to zeroed
2569 * memory which this helps maintains.
2571 gen7_init_oa_buffer(stream);
2573 intel_uncore_write(uncore, GEN7_OACONTROL,
2574 (ctx_id & GEN7_OACONTROL_CTX_MASK) |
2576 GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
2577 (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
2578 (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
2579 (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
2580 GEN7_OACONTROL_ENABLE);
2583 static void gen8_oa_enable(struct i915_perf_stream *stream)
2585 struct intel_uncore *uncore = stream->uncore;
2586 u32 report_format = stream->oa_buffer.format;
2589 * Reset buf pointers so we don't forward reports from before now.
2591 * Think carefully if considering trying to avoid this, since it
2592 * also ensures status flags and the buffer itself are cleared
2593 * in error paths, and we have checks for invalid reports based
2594 * on the assumption that certain fields are written to zeroed
2595 * memory which this helps maintains.
2597 gen8_init_oa_buffer(stream);
2600 * Note: we don't rely on the hardware to perform single context
2601 * filtering and instead filter on the cpu based on the context-id
2604 intel_uncore_write(uncore, GEN8_OACONTROL,
2605 (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) |
2606 GEN8_OA_COUNTER_ENABLE);
2609 static void gen12_oa_enable(struct i915_perf_stream *stream)
2611 struct intel_uncore *uncore = stream->uncore;
2612 u32 report_format = stream->oa_buffer.format;
2615 * If we don't want OA reports from the OA buffer, then we don't even
2616 * need to program the OAG unit.
2618 if (!(stream->sample_flags & SAMPLE_OA_REPORT))
2621 gen12_init_oa_buffer(stream);
2623 intel_uncore_write(uncore, GEN12_OAG_OACONTROL,
2624 (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) |
2625 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE);
2629 * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
2630 * @stream: An i915 perf stream opened for OA metrics
2632 * [Re]enables hardware periodic sampling according to the period configured
2633 * when opening the stream. This also starts a hrtimer that will periodically
2634 * check for data in the circular OA buffer for notifying userspace (e.g.
2635 * during a read() or poll()).
2637 static void i915_oa_stream_enable(struct i915_perf_stream *stream)
2639 stream->perf->ops.oa_enable(stream);
2641 if (stream->periodic)
2642 hrtimer_start(&stream->poll_check_timer,
2643 ns_to_ktime(POLL_PERIOD),
2644 HRTIMER_MODE_REL_PINNED);
2647 static void gen7_oa_disable(struct i915_perf_stream *stream)
2649 struct intel_uncore *uncore = stream->uncore;
2651 intel_uncore_write(uncore, GEN7_OACONTROL, 0);
2652 if (intel_wait_for_register(uncore,
2653 GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
2655 drm_err(&stream->perf->i915->drm,
2656 "wait for OA to be disabled timed out\n");
2659 static void gen8_oa_disable(struct i915_perf_stream *stream)
2661 struct intel_uncore *uncore = stream->uncore;
2663 intel_uncore_write(uncore, GEN8_OACONTROL, 0);
2664 if (intel_wait_for_register(uncore,
2665 GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
2667 drm_err(&stream->perf->i915->drm,
2668 "wait for OA to be disabled timed out\n");
2671 static void gen12_oa_disable(struct i915_perf_stream *stream)
2673 struct intel_uncore *uncore = stream->uncore;
2675 intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0);
2676 if (intel_wait_for_register(uncore,
2677 GEN12_OAG_OACONTROL,
2678 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
2680 drm_err(&stream->perf->i915->drm,
2681 "wait for OA to be disabled timed out\n");
2685 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
2686 * @stream: An i915 perf stream opened for OA metrics
2688 * Stops the OA unit from periodically writing counter reports into the
2689 * circular OA buffer. This also stops the hrtimer that periodically checks for
2690 * data in the circular OA buffer, for notifying userspace.
2692 static void i915_oa_stream_disable(struct i915_perf_stream *stream)
2694 stream->perf->ops.oa_disable(stream);
2696 if (stream->periodic)
2697 hrtimer_cancel(&stream->poll_check_timer);
2700 static const struct i915_perf_stream_ops i915_oa_stream_ops = {
2701 .destroy = i915_oa_stream_destroy,
2702 .enable = i915_oa_stream_enable,
2703 .disable = i915_oa_stream_disable,
2704 .wait_unlocked = i915_oa_wait_unlocked,
2705 .poll_wait = i915_oa_poll_wait,
2706 .read = i915_oa_read,
2709 static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
2711 struct i915_request *rq;
2713 rq = stream->perf->ops.enable_metric_set(stream);
2717 i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
2718 i915_request_put(rq);
2724 * i915_oa_stream_init - validate combined props for OA stream and init
2725 * @stream: An i915 perf stream
2726 * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
2727 * @props: The property state that configures stream (individually validated)
2729 * While read_properties_unlocked() validates properties in isolation it
2730 * doesn't ensure that the combination necessarily makes sense.
2732 * At this point it has been determined that userspace wants a stream of
2733 * OA metrics, but still we need to further validate the combined
2734 * properties are OK.
2736 * If the configuration makes sense then we can allocate memory for
2737 * a circular OA buffer and apply the requested metric set configuration.
2739 * Returns: zero on success or a negative error code.
2741 static int i915_oa_stream_init(struct i915_perf_stream *stream,
2742 struct drm_i915_perf_open_param *param,
2743 struct perf_open_properties *props)
2745 struct drm_i915_private *i915 = stream->perf->i915;
2746 struct i915_perf *perf = stream->perf;
2750 if (!props->engine) {
2751 DRM_DEBUG("OA engine not specified\n");
2756 * If the sysfs metrics/ directory wasn't registered for some
2757 * reason then don't let userspace try their luck with config
2760 if (!perf->metrics_kobj) {
2761 DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
2765 if (!(props->sample_flags & SAMPLE_OA_REPORT) &&
2766 (INTEL_GEN(perf->i915) < 12 || !stream->ctx)) {
2767 DRM_DEBUG("Only OA report sampling supported\n");
2771 if (!perf->ops.enable_metric_set) {
2772 DRM_DEBUG("OA unit not supported\n");
2777 * To avoid the complexity of having to accurately filter
2778 * counter reports and marshal to the appropriate client
2779 * we currently only allow exclusive access
2781 if (perf->exclusive_stream) {
2782 DRM_DEBUG("OA unit already in use\n");
2786 if (!props->oa_format) {
2787 DRM_DEBUG("OA report format not specified\n");
2791 stream->engine = props->engine;
2792 stream->uncore = stream->engine->gt->uncore;
2794 stream->sample_size = sizeof(struct drm_i915_perf_record_header);
2796 format_size = perf->oa_formats[props->oa_format].size;
2798 stream->sample_flags = props->sample_flags;
2799 stream->sample_size += format_size;
2801 stream->oa_buffer.format_size = format_size;
2802 if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format_size == 0))
2805 stream->hold_preemption = props->hold_preemption;
2807 stream->oa_buffer.format =
2808 perf->oa_formats[props->oa_format].format;
2810 stream->periodic = props->oa_periodic;
2811 if (stream->periodic)
2812 stream->period_exponent = props->oa_period_exponent;
2815 ret = oa_get_render_ctx_id(stream);
2817 DRM_DEBUG("Invalid context id to filter with\n");
2822 ret = alloc_noa_wait(stream);
2824 DRM_DEBUG("Unable to allocate NOA wait batch buffer\n");
2825 goto err_noa_wait_alloc;
2828 stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set);
2829 if (!stream->oa_config) {
2830 DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set);
2835 /* PRM - observability performance counters:
2837 * OACONTROL, performance counter enable, note:
2839 * "When this bit is set, in order to have coherent counts,
2840 * RC6 power state and trunk clock gating must be disabled.
2841 * This can be achieved by programming MMIO registers as
2842 * 0xA094=0 and 0xA090[31]=1"
2844 * In our case we are expecting that taking pm + FORCEWAKE
2845 * references will effectively disable RC6.
2847 intel_engine_pm_get(stream->engine);
2848 intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL);
2850 ret = alloc_oa_buffer(stream);
2852 goto err_oa_buf_alloc;
2854 stream->ops = &i915_oa_stream_ops;
2855 WRITE_ONCE(perf->exclusive_stream, stream);
2857 ret = i915_perf_stream_enable_sync(stream);
2859 DRM_DEBUG("Unable to enable metric set\n");
2863 DRM_DEBUG("opening stream oa config uuid=%s\n",
2864 stream->oa_config->uuid);
2866 hrtimer_init(&stream->poll_check_timer,
2867 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2868 stream->poll_check_timer.function = oa_poll_check_timer_cb;
2869 init_waitqueue_head(&stream->poll_wq);
2870 spin_lock_init(&stream->oa_buffer.ptr_lock);
2875 WRITE_ONCE(perf->exclusive_stream, NULL);
2876 perf->ops.disable_metric_set(stream);
2878 free_oa_buffer(stream);
2881 free_oa_configs(stream);
2883 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
2884 intel_engine_pm_put(stream->engine);
2887 free_noa_wait(stream);
2891 oa_put_render_ctx_id(stream);
2896 void i915_oa_init_reg_state(const struct intel_context *ce,
2897 const struct intel_engine_cs *engine)
2899 struct i915_perf_stream *stream;
2901 if (engine->class != RENDER_CLASS)
2904 /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
2905 stream = READ_ONCE(engine->i915->perf.exclusive_stream);
2906 if (stream && INTEL_GEN(stream->perf->i915) < 12)
2907 gen8_update_reg_state_unlocked(ce, stream);
2911 * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation
2912 * @stream: An i915 perf stream
2913 * @file: An i915 perf stream file
2914 * @buf: destination buffer given by userspace
2915 * @count: the number of bytes userspace wants to read
2916 * @ppos: (inout) file seek position (unused)
2918 * Besides wrapping &i915_perf_stream_ops->read this provides a common place to
2919 * ensure that if we've successfully copied any data then reporting that takes
2920 * precedence over any internal error status, so the data isn't lost.
2922 * For example ret will be -ENOSPC whenever there is more buffered data than
2923 * can be copied to userspace, but that's only interesting if we weren't able
2924 * to copy some data because it implies the userspace buffer is too small to
2925 * receive a single record (and we never split records).
2927 * Another case with ret == -EFAULT is more of a grey area since it would seem
2928 * like bad form for userspace to ask us to overrun its buffer, but the user
2931 * http://yarchive.net/comp/linux/partial_reads_writes.html
2933 * Returns: The number of bytes copied or a negative error code on failure.
2935 static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream,
2941 /* Note we keep the offset (aka bytes read) separate from any
2942 * error status so that the final check for whether we return
2943 * the bytes read with a higher precedence than any error (see
2944 * comment below) doesn't need to be handled/duplicated in
2945 * stream->ops->read() implementations.
2948 int ret = stream->ops->read(stream, buf, count, &offset);
2950 return offset ?: (ret ?: -EAGAIN);
2954 * i915_perf_read - handles read() FOP for i915 perf stream FDs
2955 * @file: An i915 perf stream file
2956 * @buf: destination buffer given by userspace
2957 * @count: the number of bytes userspace wants to read
2958 * @ppos: (inout) file seek position (unused)
2960 * The entry point for handling a read() on a stream file descriptor from
2961 * userspace. Most of the work is left to the i915_perf_read_locked() and
2962 * &i915_perf_stream_ops->read but to save having stream implementations (of
2963 * which we might have multiple later) we handle blocking read here.
2965 * We can also consistently treat trying to read from a disabled stream
2966 * as an IO error so implementations can assume the stream is enabled
2969 * Returns: The number of bytes copied or a negative error code on failure.
2971 static ssize_t i915_perf_read(struct file *file,
2976 struct i915_perf_stream *stream = file->private_data;
2977 struct i915_perf *perf = stream->perf;
2980 /* To ensure it's handled consistently we simply treat all reads of a
2981 * disabled stream as an error. In particular it might otherwise lead
2982 * to a deadlock for blocking file descriptors...
2984 if (!stream->enabled)
2987 if (!(file->f_flags & O_NONBLOCK)) {
2988 /* There's the small chance of false positives from
2989 * stream->ops->wait_unlocked.
2991 * E.g. with single context filtering since we only wait until
2992 * oabuffer has >= 1 report we don't immediately know whether
2993 * any reports really belong to the current context
2996 ret = stream->ops->wait_unlocked(stream);
3000 mutex_lock(&perf->lock);
3001 ret = i915_perf_read_locked(stream, file,
3003 mutex_unlock(&perf->lock);
3004 } while (ret == -EAGAIN);
3006 mutex_lock(&perf->lock);
3007 ret = i915_perf_read_locked(stream, file, buf, count, ppos);
3008 mutex_unlock(&perf->lock);
3011 /* We allow the poll checking to sometimes report false positive EPOLLIN
3012 * events where we might actually report EAGAIN on read() if there's
3013 * not really any data available. In this situation though we don't
3014 * want to enter a busy loop between poll() reporting a EPOLLIN event
3015 * and read() returning -EAGAIN. Clearing the oa.pollin state here
3016 * effectively ensures we back off until the next hrtimer callback
3017 * before reporting another EPOLLIN event.
3019 if (ret >= 0 || ret == -EAGAIN) {
3020 /* Maybe make ->pollin per-stream state if we support multiple
3021 * concurrent streams in the future.
3023 stream->pollin = false;
3029 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
3031 struct i915_perf_stream *stream =
3032 container_of(hrtimer, typeof(*stream), poll_check_timer);
3034 if (oa_buffer_check_unlocked(stream)) {
3035 stream->pollin = true;
3036 wake_up(&stream->poll_wq);
3039 hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD));
3041 return HRTIMER_RESTART;
3045 * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
3046 * @stream: An i915 perf stream
3047 * @file: An i915 perf stream file
3048 * @wait: poll() state table
3050 * For handling userspace polling on an i915 perf stream, this calls through to
3051 * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
3052 * will be woken for new stream data.
3054 * Note: The &perf->lock mutex has been taken to serialize
3055 * with any non-file-operation driver hooks.
3057 * Returns: any poll events that are ready without sleeping
3059 static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream,
3063 __poll_t events = 0;
3065 stream->ops->poll_wait(stream, file, wait);
3067 /* Note: we don't explicitly check whether there's something to read
3068 * here since this path may be very hot depending on what else
3069 * userspace is polling, or on the timeout in use. We rely solely on
3070 * the hrtimer/oa_poll_check_timer_cb to notify us when there are
3080 * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
3081 * @file: An i915 perf stream file
3082 * @wait: poll() state table
3084 * For handling userspace polling on an i915 perf stream, this ensures
3085 * poll_wait() gets called with a wait queue that will be woken for new stream
3088 * Note: Implementation deferred to i915_perf_poll_locked()
3090 * Returns: any poll events that are ready without sleeping
3092 static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
3094 struct i915_perf_stream *stream = file->private_data;
3095 struct i915_perf *perf = stream->perf;
3098 mutex_lock(&perf->lock);
3099 ret = i915_perf_poll_locked(stream, file, wait);
3100 mutex_unlock(&perf->lock);
3106 * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
3107 * @stream: A disabled i915 perf stream
3109 * [Re]enables the associated capture of data for this stream.
3111 * If a stream was previously enabled then there's currently no intention
3112 * to provide userspace any guarantee about the preservation of previously
3115 static void i915_perf_enable_locked(struct i915_perf_stream *stream)
3117 if (stream->enabled)
3120 /* Allow stream->ops->enable() to refer to this */
3121 stream->enabled = true;
3123 if (stream->ops->enable)
3124 stream->ops->enable(stream);
3126 if (stream->hold_preemption)
3127 intel_context_set_nopreempt(stream->pinned_ctx);
3131 * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
3132 * @stream: An enabled i915 perf stream
3134 * Disables the associated capture of data for this stream.
3136 * The intention is that disabling an re-enabling a stream will ideally be
3137 * cheaper than destroying and re-opening a stream with the same configuration,
3138 * though there are no formal guarantees about what state or buffered data
3139 * must be retained between disabling and re-enabling a stream.
3141 * Note: while a stream is disabled it's considered an error for userspace
3142 * to attempt to read from the stream (-EIO).
3144 static void i915_perf_disable_locked(struct i915_perf_stream *stream)
3146 if (!stream->enabled)
3149 /* Allow stream->ops->disable() to refer to this */
3150 stream->enabled = false;
3152 if (stream->hold_preemption)
3153 intel_context_clear_nopreempt(stream->pinned_ctx);
3155 if (stream->ops->disable)
3156 stream->ops->disable(stream);
3159 static long i915_perf_config_locked(struct i915_perf_stream *stream,
3160 unsigned long metrics_set)
3162 struct i915_oa_config *config;
3163 long ret = stream->oa_config->id;
3165 config = i915_perf_get_oa_config(stream->perf, metrics_set);
3169 if (config != stream->oa_config) {
3170 struct i915_request *rq;
3173 * If OA is bound to a specific context, emit the
3174 * reconfiguration inline from that context. The update
3175 * will then be ordered with respect to submission on that
3178 * When set globally, we use a low priority kernel context,
3179 * so it will effectively take effect when idle.
3181 rq = emit_oa_config(stream, config, oa_context(stream));
3183 config = xchg(&stream->oa_config, config);
3184 i915_request_put(rq);
3190 i915_oa_config_put(config);
3196 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
3197 * @stream: An i915 perf stream
3198 * @cmd: the ioctl request
3199 * @arg: the ioctl data
3201 * Note: The &perf->lock mutex has been taken to serialize
3202 * with any non-file-operation driver hooks.
3204 * Returns: zero on success or a negative error code. Returns -EINVAL for
3205 * an unknown ioctl request.
3207 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
3212 case I915_PERF_IOCTL_ENABLE:
3213 i915_perf_enable_locked(stream);
3215 case I915_PERF_IOCTL_DISABLE:
3216 i915_perf_disable_locked(stream);
3218 case I915_PERF_IOCTL_CONFIG:
3219 return i915_perf_config_locked(stream, arg);
3226 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
3227 * @file: An i915 perf stream file
3228 * @cmd: the ioctl request
3229 * @arg: the ioctl data
3231 * Implementation deferred to i915_perf_ioctl_locked().
3233 * Returns: zero on success or a negative error code. Returns -EINVAL for
3234 * an unknown ioctl request.
3236 static long i915_perf_ioctl(struct file *file,
3240 struct i915_perf_stream *stream = file->private_data;
3241 struct i915_perf *perf = stream->perf;
3244 mutex_lock(&perf->lock);
3245 ret = i915_perf_ioctl_locked(stream, cmd, arg);
3246 mutex_unlock(&perf->lock);
3252 * i915_perf_destroy_locked - destroy an i915 perf stream
3253 * @stream: An i915 perf stream
3255 * Frees all resources associated with the given i915 perf @stream, disabling
3256 * any associated data capture in the process.
3258 * Note: The &perf->lock mutex has been taken to serialize
3259 * with any non-file-operation driver hooks.
3261 static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
3263 if (stream->enabled)
3264 i915_perf_disable_locked(stream);
3266 if (stream->ops->destroy)
3267 stream->ops->destroy(stream);
3270 i915_gem_context_put(stream->ctx);
3276 * i915_perf_release - handles userspace close() of a stream file
3277 * @inode: anonymous inode associated with file
3278 * @file: An i915 perf stream file
3280 * Cleans up any resources associated with an open i915 perf stream file.
3282 * NB: close() can't really fail from the userspace point of view.
3284 * Returns: zero on success or a negative error code.
3286 static int i915_perf_release(struct inode *inode, struct file *file)
3288 struct i915_perf_stream *stream = file->private_data;
3289 struct i915_perf *perf = stream->perf;
3291 mutex_lock(&perf->lock);
3292 i915_perf_destroy_locked(stream);
3293 mutex_unlock(&perf->lock);
3295 /* Release the reference the perf stream kept on the driver. */
3296 drm_dev_put(&perf->i915->drm);
3302 static const struct file_operations fops = {
3303 .owner = THIS_MODULE,
3304 .llseek = no_llseek,
3305 .release = i915_perf_release,
3306 .poll = i915_perf_poll,
3307 .read = i915_perf_read,
3308 .unlocked_ioctl = i915_perf_ioctl,
3309 /* Our ioctl have no arguments, so it's safe to use the same function
3310 * to handle 32bits compatibility.
3312 .compat_ioctl = i915_perf_ioctl,
3317 * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
3318 * @perf: i915 perf instance
3319 * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
3320 * @props: individually validated u64 property value pairs
3323 * See i915_perf_ioctl_open() for interface details.
3325 * Implements further stream config validation and stream initialization on
3326 * behalf of i915_perf_open_ioctl() with the &perf->lock mutex
3327 * taken to serialize with any non-file-operation driver hooks.
3329 * Note: at this point the @props have only been validated in isolation and
3330 * it's still necessary to validate that the combination of properties makes
3333 * In the case where userspace is interested in OA unit metrics then further
3334 * config validation and stream initialization details will be handled by
3335 * i915_oa_stream_init(). The code here should only validate config state that
3336 * will be relevant to all stream types / backends.
3338 * Returns: zero on success or a negative error code.
3341 i915_perf_open_ioctl_locked(struct i915_perf *perf,
3342 struct drm_i915_perf_open_param *param,
3343 struct perf_open_properties *props,
3344 struct drm_file *file)
3346 struct i915_gem_context *specific_ctx = NULL;
3347 struct i915_perf_stream *stream = NULL;
3348 unsigned long f_flags = 0;
3349 bool privileged_op = true;
3353 if (props->single_context) {
3354 u32 ctx_handle = props->ctx_handle;
3355 struct drm_i915_file_private *file_priv = file->driver_priv;
3357 specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
3358 if (!specific_ctx) {
3359 DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
3367 * On Haswell the OA unit supports clock gating off for a specific
3368 * context and in this mode there's no visibility of metrics for the
3369 * rest of the system, which we consider acceptable for a
3370 * non-privileged client.
3372 * For Gen8->11 the OA unit no longer supports clock gating off for a
3373 * specific context and the kernel can't securely stop the counters
3374 * from updating as system-wide / global values. Even though we can
3375 * filter reports based on the included context ID we can't block
3376 * clients from seeing the raw / global counter values via
3377 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
3378 * enable the OA unit by default.
3380 * For Gen12+ we gain a new OAR unit that only monitors the RCS on a
3381 * per context basis. So we can relax requirements there if the user
3382 * doesn't request global stream access (i.e. query based sampling
3383 * using MI_RECORD_PERF_COUNT.
3385 if (IS_HASWELL(perf->i915) && specific_ctx)
3386 privileged_op = false;
3387 else if (IS_GEN(perf->i915, 12) && specific_ctx &&
3388 (props->sample_flags & SAMPLE_OA_REPORT) == 0)
3389 privileged_op = false;
3391 if (props->hold_preemption) {
3392 if (!props->single_context) {
3393 DRM_DEBUG("preemption disable with no context\n");
3397 privileged_op = true;
3400 /* Similar to perf's kernel.perf_paranoid_cpu sysctl option
3401 * we check a dev.i915.perf_stream_paranoid sysctl option
3402 * to determine if it's ok to access system wide OA counters
3403 * without CAP_SYS_ADMIN privileges.
3405 if (privileged_op &&
3406 i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
3407 DRM_DEBUG("Insufficient privileges to open i915 perf stream\n");
3412 stream = kzalloc(sizeof(*stream), GFP_KERNEL);
3418 stream->perf = perf;
3419 stream->ctx = specific_ctx;
3421 ret = i915_oa_stream_init(stream, param, props);
3425 /* we avoid simply assigning stream->sample_flags = props->sample_flags
3426 * to have _stream_init check the combination of sample flags more
3427 * thoroughly, but still this is the expected result at this point.
3429 if (WARN_ON(stream->sample_flags != props->sample_flags)) {
3434 if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
3435 f_flags |= O_CLOEXEC;
3436 if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
3437 f_flags |= O_NONBLOCK;
3439 stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
3440 if (stream_fd < 0) {
3445 if (!(param->flags & I915_PERF_FLAG_DISABLED))
3446 i915_perf_enable_locked(stream);
3448 /* Take a reference on the driver that will be kept with stream_fd
3449 * until its release.
3451 drm_dev_get(&perf->i915->drm);
3456 if (stream->ops->destroy)
3457 stream->ops->destroy(stream);
3462 i915_gem_context_put(specific_ctx);
3467 static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
3469 return div64_u64(1000000000ULL * (2ULL << exponent),
3470 1000ULL * RUNTIME_INFO(perf->i915)->cs_timestamp_frequency_khz);
3474 * read_properties_unlocked - validate + copy userspace stream open properties
3475 * @perf: i915 perf instance
3476 * @uprops: The array of u64 key value pairs given by userspace
3477 * @n_props: The number of key value pairs expected in @uprops
3478 * @props: The stream configuration built up while validating properties
3480 * Note this function only validates properties in isolation it doesn't
3481 * validate that the combination of properties makes sense or that all
3482 * properties necessary for a particular kind of stream have been set.
3484 * Note that there currently aren't any ordering requirements for properties so
3485 * we shouldn't validate or assume anything about ordering here. This doesn't
3486 * rule out defining new properties with ordering requirements in the future.
3488 static int read_properties_unlocked(struct i915_perf *perf,
3491 struct perf_open_properties *props)
3493 u64 __user *uprop = uprops;
3496 memset(props, 0, sizeof(struct perf_open_properties));
3499 DRM_DEBUG("No i915 perf properties given\n");
3503 /* At the moment we only support using i915-perf on the RCS. */
3504 props->engine = intel_engine_lookup_user(perf->i915,
3505 I915_ENGINE_CLASS_RENDER,
3507 if (!props->engine) {
3508 DRM_DEBUG("No RENDER-capable engines\n");
3512 /* Considering that ID = 0 is reserved and assuming that we don't
3513 * (currently) expect any configurations to ever specify duplicate
3514 * values for a particular property ID then the last _PROP_MAX value is
3515 * one greater than the maximum number of properties we expect to get
3518 if (n_props >= DRM_I915_PERF_PROP_MAX) {
3519 DRM_DEBUG("More i915 perf properties specified than exist\n");
3523 for (i = 0; i < n_props; i++) {
3524 u64 oa_period, oa_freq_hz;
3528 ret = get_user(id, uprop);
3532 ret = get_user(value, uprop + 1);
3536 if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
3537 DRM_DEBUG("Unknown i915 perf property ID\n");
3541 switch ((enum drm_i915_perf_property_id)id) {
3542 case DRM_I915_PERF_PROP_CTX_HANDLE:
3543 props->single_context = 1;
3544 props->ctx_handle = value;
3546 case DRM_I915_PERF_PROP_SAMPLE_OA:
3548 props->sample_flags |= SAMPLE_OA_REPORT;
3550 case DRM_I915_PERF_PROP_OA_METRICS_SET:
3552 DRM_DEBUG("Unknown OA metric set ID\n");
3555 props->metrics_set = value;
3557 case DRM_I915_PERF_PROP_OA_FORMAT:
3558 if (value == 0 || value >= I915_OA_FORMAT_MAX) {
3559 DRM_DEBUG("Out-of-range OA report format %llu\n",
3563 if (!perf->oa_formats[value].size) {
3564 DRM_DEBUG("Unsupported OA report format %llu\n",
3568 props->oa_format = value;
3570 case DRM_I915_PERF_PROP_OA_EXPONENT:
3571 if (value > OA_EXPONENT_MAX) {
3572 DRM_DEBUG("OA timer exponent too high (> %u)\n",
3577 /* Theoretically we can program the OA unit to sample
3578 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
3579 * for BXT. We don't allow such high sampling
3580 * frequencies by default unless root.
3583 BUILD_BUG_ON(sizeof(oa_period) != 8);
3584 oa_period = oa_exponent_to_ns(perf, value);
3586 /* This check is primarily to ensure that oa_period <=
3587 * UINT32_MAX (before passing to do_div which only
3588 * accepts a u32 denominator), but we can also skip
3589 * checking anything < 1Hz which implicitly can't be
3590 * limited via an integer oa_max_sample_rate.
3592 if (oa_period <= NSEC_PER_SEC) {
3593 u64 tmp = NSEC_PER_SEC;
3594 do_div(tmp, oa_period);
3599 if (oa_freq_hz > i915_oa_max_sample_rate &&
3600 !capable(CAP_SYS_ADMIN)) {
3601 DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n",
3602 i915_oa_max_sample_rate);
3606 props->oa_periodic = true;
3607 props->oa_period_exponent = value;
3609 case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
3610 props->hold_preemption = !!value;
3612 case DRM_I915_PERF_PROP_MAX:
3624 * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
3626 * @data: ioctl data copied from userspace (unvalidated)
3629 * Validates the stream open parameters given by userspace including flags
3630 * and an array of u64 key, value pair properties.
3632 * Very little is assumed up front about the nature of the stream being
3633 * opened (for instance we don't assume it's for periodic OA unit metrics). An
3634 * i915-perf stream is expected to be a suitable interface for other forms of
3635 * buffered data written by the GPU besides periodic OA metrics.
3637 * Note we copy the properties from userspace outside of the i915 perf
3638 * mutex to avoid an awkward lockdep with mmap_sem.
3640 * Most of the implementation details are handled by
3641 * i915_perf_open_ioctl_locked() after taking the &perf->lock
3642 * mutex for serializing with any non-file-operation driver hooks.
3644 * Return: A newly opened i915 Perf stream file descriptor or negative
3645 * error code on failure.
3647 int i915_perf_open_ioctl(struct drm_device *dev, void *data,
3648 struct drm_file *file)
3650 struct i915_perf *perf = &to_i915(dev)->perf;
3651 struct drm_i915_perf_open_param *param = data;
3652 struct perf_open_properties props;
3653 u32 known_open_flags;
3657 DRM_DEBUG("i915 perf interface not available for this system\n");
3661 known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
3662 I915_PERF_FLAG_FD_NONBLOCK |
3663 I915_PERF_FLAG_DISABLED;
3664 if (param->flags & ~known_open_flags) {
3665 DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n");
3669 ret = read_properties_unlocked(perf,
3670 u64_to_user_ptr(param->properties_ptr),
3671 param->num_properties,
3676 mutex_lock(&perf->lock);
3677 ret = i915_perf_open_ioctl_locked(perf, param, &props, file);
3678 mutex_unlock(&perf->lock);
3684 * i915_perf_register - exposes i915-perf to userspace
3685 * @i915: i915 device instance
3687 * In particular OA metric sets are advertised under a sysfs metrics/
3688 * directory allowing userspace to enumerate valid IDs that can be
3689 * used to open an i915-perf stream.
3691 void i915_perf_register(struct drm_i915_private *i915)
3693 struct i915_perf *perf = &i915->perf;
3698 /* To be sure we're synchronized with an attempted
3699 * i915_perf_open_ioctl(); considering that we register after
3700 * being exposed to userspace.
3702 mutex_lock(&perf->lock);
3704 perf->metrics_kobj =
3705 kobject_create_and_add("metrics",
3706 &i915->drm.primary->kdev->kobj);
3708 mutex_unlock(&perf->lock);
3712 * i915_perf_unregister - hide i915-perf from userspace
3713 * @i915: i915 device instance
3715 * i915-perf state cleanup is split up into an 'unregister' and
3716 * 'deinit' phase where the interface is first hidden from
3717 * userspace by i915_perf_unregister() before cleaning up
3718 * remaining state in i915_perf_fini().
3720 void i915_perf_unregister(struct drm_i915_private *i915)
3722 struct i915_perf *perf = &i915->perf;
3724 if (!perf->metrics_kobj)
3727 kobject_put(perf->metrics_kobj);
3728 perf->metrics_kobj = NULL;
3731 static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr)
3733 static const i915_reg_t flex_eu_regs[] = {
3744 for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
3745 if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr)
3751 #define ADDR_IN_RANGE(addr, start, end) \
3752 ((addr) >= (start) && \
3755 #define REG_IN_RANGE(addr, start, end) \
3756 ((addr) >= i915_mmio_reg_offset(start) && \
3757 (addr) <= i915_mmio_reg_offset(end))
3759 #define REG_EQUAL(addr, mmio) \
3760 ((addr) == i915_mmio_reg_offset(mmio))
3762 static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
3764 return REG_IN_RANGE(addr, OASTARTTRIG1, OASTARTTRIG8) ||
3765 REG_IN_RANGE(addr, OAREPORTTRIG1, OAREPORTTRIG8) ||
3766 REG_IN_RANGE(addr, OACEC0_0, OACEC7_1);
3769 static bool gen7_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3771 return REG_EQUAL(addr, HALF_SLICE_CHICKEN2) ||
3772 REG_IN_RANGE(addr, MICRO_BP0_0, NOA_WRITE) ||
3773 REG_IN_RANGE(addr, OA_PERFCNT1_LO, OA_PERFCNT2_HI) ||
3774 REG_IN_RANGE(addr, OA_PERFMATRIX_LO, OA_PERFMATRIX_HI);
3777 static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3779 return gen7_is_valid_mux_addr(perf, addr) ||
3780 REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) ||
3781 REG_IN_RANGE(addr, RPM_CONFIG0, NOA_CONFIG(8));
3784 static bool gen10_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3786 return gen8_is_valid_mux_addr(perf, addr) ||
3787 REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
3788 REG_IN_RANGE(addr, OA_PERFCNT3_LO, OA_PERFCNT4_HI);
3791 static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3793 return gen7_is_valid_mux_addr(perf, addr) ||
3794 ADDR_IN_RANGE(addr, 0x25100, 0x2FF90) ||
3795 REG_IN_RANGE(addr, HSW_MBVID2_NOA0, HSW_MBVID2_NOA9) ||
3796 REG_EQUAL(addr, HSW_MBVID2_MISR0);
3799 static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3801 return gen7_is_valid_mux_addr(perf, addr) ||
3802 ADDR_IN_RANGE(addr, 0x182300, 0x1823A4);
3805 static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
3807 return REG_IN_RANGE(addr, GEN12_OAG_OASTARTTRIG1, GEN12_OAG_OASTARTTRIG8) ||
3808 REG_IN_RANGE(addr, GEN12_OAG_OAREPORTTRIG1, GEN12_OAG_OAREPORTTRIG8) ||
3809 REG_IN_RANGE(addr, GEN12_OAG_CEC0_0, GEN12_OAG_CEC7_1) ||
3810 REG_IN_RANGE(addr, GEN12_OAG_SCEC0_0, GEN12_OAG_SCEC7_1) ||
3811 REG_EQUAL(addr, GEN12_OAA_DBG_REG) ||
3812 REG_EQUAL(addr, GEN12_OAG_OA_PESS) ||
3813 REG_EQUAL(addr, GEN12_OAG_SPCTR_CNF);
3816 static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3818 return REG_EQUAL(addr, NOA_WRITE) ||
3819 REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
3820 REG_EQUAL(addr, GDT_CHICKEN_BITS) ||
3821 REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) ||
3822 REG_EQUAL(addr, RPM_CONFIG0) ||
3823 REG_EQUAL(addr, RPM_CONFIG1) ||
3824 REG_IN_RANGE(addr, NOA_CONFIG(0), NOA_CONFIG(8));
3827 static u32 mask_reg_value(u32 reg, u32 val)
3829 /* HALF_SLICE_CHICKEN2 is programmed with a the
3830 * WaDisableSTUnitPowerOptimization workaround. Make sure the value
3831 * programmed by userspace doesn't change this.
3833 if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
3834 val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
3836 /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
3837 * indicated by its name and a bunch of selection fields used by OA
3840 if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT))
3841 val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
3846 static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
3847 bool (*is_valid)(struct i915_perf *perf, u32 addr),
3851 struct i915_oa_reg *oa_regs;
3858 if (!access_ok(regs, n_regs * sizeof(u32) * 2))
3859 return ERR_PTR(-EFAULT);
3861 /* No is_valid function means we're not allowing any register to be programmed. */
3862 GEM_BUG_ON(!is_valid);
3864 return ERR_PTR(-EINVAL);
3866 oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
3868 return ERR_PTR(-ENOMEM);
3870 for (i = 0; i < n_regs; i++) {
3873 err = get_user(addr, regs);
3877 if (!is_valid(perf, addr)) {
3878 DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
3883 err = get_user(value, regs + 1);
3887 oa_regs[i].addr = _MMIO(addr);
3888 oa_regs[i].value = mask_reg_value(addr, value);
3897 return ERR_PTR(err);
3900 static ssize_t show_dynamic_id(struct device *dev,
3901 struct device_attribute *attr,
3904 struct i915_oa_config *oa_config =
3905 container_of(attr, typeof(*oa_config), sysfs_metric_id);
3907 return sprintf(buf, "%d\n", oa_config->id);
3910 static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf,
3911 struct i915_oa_config *oa_config)
3913 sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
3914 oa_config->sysfs_metric_id.attr.name = "id";
3915 oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
3916 oa_config->sysfs_metric_id.show = show_dynamic_id;
3917 oa_config->sysfs_metric_id.store = NULL;
3919 oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
3920 oa_config->attrs[1] = NULL;
3922 oa_config->sysfs_metric.name = oa_config->uuid;
3923 oa_config->sysfs_metric.attrs = oa_config->attrs;
3925 return sysfs_create_group(perf->metrics_kobj,
3926 &oa_config->sysfs_metric);
3930 * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
3932 * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
3933 * userspace (unvalidated)
3936 * Validates the submitted OA register to be saved into a new OA config that
3937 * can then be used for programming the OA unit and its NOA network.
3939 * Returns: A new allocated config number to be used with the perf open ioctl
3940 * or a negative error code on failure.
3942 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
3943 struct drm_file *file)
3945 struct i915_perf *perf = &to_i915(dev)->perf;
3946 struct drm_i915_perf_oa_config *args = data;
3947 struct i915_oa_config *oa_config, *tmp;
3948 struct i915_oa_reg *regs;
3952 DRM_DEBUG("i915 perf interface not available for this system\n");
3956 if (!perf->metrics_kobj) {
3957 DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
3961 if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
3962 DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
3966 if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
3967 (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
3968 (!args->flex_regs_ptr || !args->n_flex_regs)) {
3969 DRM_DEBUG("No OA registers given\n");
3973 oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
3975 DRM_DEBUG("Failed to allocate memory for the OA config\n");
3979 oa_config->perf = perf;
3980 kref_init(&oa_config->ref);
3982 if (!uuid_is_valid(args->uuid)) {
3983 DRM_DEBUG("Invalid uuid format for OA config\n");
3988 /* Last character in oa_config->uuid will be 0 because oa_config is
3991 memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
3993 oa_config->mux_regs_len = args->n_mux_regs;
3994 regs = alloc_oa_regs(perf,
3995 perf->ops.is_valid_mux_reg,
3996 u64_to_user_ptr(args->mux_regs_ptr),
4000 DRM_DEBUG("Failed to create OA config for mux_regs\n");
4001 err = PTR_ERR(regs);
4004 oa_config->mux_regs = regs;
4006 oa_config->b_counter_regs_len = args->n_boolean_regs;
4007 regs = alloc_oa_regs(perf,
4008 perf->ops.is_valid_b_counter_reg,
4009 u64_to_user_ptr(args->boolean_regs_ptr),
4010 args->n_boolean_regs);
4013 DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
4014 err = PTR_ERR(regs);
4017 oa_config->b_counter_regs = regs;
4019 if (INTEL_GEN(perf->i915) < 8) {
4020 if (args->n_flex_regs != 0) {
4025 oa_config->flex_regs_len = args->n_flex_regs;
4026 regs = alloc_oa_regs(perf,
4027 perf->ops.is_valid_flex_reg,
4028 u64_to_user_ptr(args->flex_regs_ptr),
4032 DRM_DEBUG("Failed to create OA config for flex_regs\n");
4033 err = PTR_ERR(regs);
4036 oa_config->flex_regs = regs;
4039 err = mutex_lock_interruptible(&perf->metrics_lock);
4043 /* We shouldn't have too many configs, so this iteration shouldn't be
4046 idr_for_each_entry(&perf->metrics_idr, tmp, id) {
4047 if (!strcmp(tmp->uuid, oa_config->uuid)) {
4048 DRM_DEBUG("OA config already exists with this uuid\n");
4054 err = create_dynamic_oa_sysfs_entry(perf, oa_config);
4056 DRM_DEBUG("Failed to create sysfs entry for OA config\n");
4060 /* Config id 0 is invalid, id 1 for kernel stored test config. */
4061 oa_config->id = idr_alloc(&perf->metrics_idr,
4064 if (oa_config->id < 0) {
4065 DRM_DEBUG("Failed to create sysfs entry for OA config\n");
4066 err = oa_config->id;
4070 mutex_unlock(&perf->metrics_lock);
4072 DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id);
4074 return oa_config->id;
4077 mutex_unlock(&perf->metrics_lock);
4079 i915_oa_config_put(oa_config);
4080 DRM_DEBUG("Failed to add new OA config\n");
4085 * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
4087 * @data: ioctl data (pointer to u64 integer) copied from userspace
4090 * Configs can be removed while being used, the will stop appearing in sysfs
4091 * and their content will be freed when the stream using the config is closed.
4093 * Returns: 0 on success or a negative error code on failure.
4095 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
4096 struct drm_file *file)
4098 struct i915_perf *perf = &to_i915(dev)->perf;
4100 struct i915_oa_config *oa_config;
4104 DRM_DEBUG("i915 perf interface not available for this system\n");
4108 if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
4109 DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
4113 ret = mutex_lock_interruptible(&perf->metrics_lock);
4117 oa_config = idr_find(&perf->metrics_idr, *arg);
4119 DRM_DEBUG("Failed to remove unknown OA config\n");
4124 GEM_BUG_ON(*arg != oa_config->id);
4126 sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric);
4128 idr_remove(&perf->metrics_idr, *arg);
4130 mutex_unlock(&perf->metrics_lock);
4132 DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
4134 i915_oa_config_put(oa_config);
4139 mutex_unlock(&perf->metrics_lock);
4143 static struct ctl_table oa_table[] = {
4145 .procname = "perf_stream_paranoid",
4146 .data = &i915_perf_stream_paranoid,
4147 .maxlen = sizeof(i915_perf_stream_paranoid),
4149 .proc_handler = proc_dointvec_minmax,
4150 .extra1 = SYSCTL_ZERO,
4151 .extra2 = SYSCTL_ONE,
4154 .procname = "oa_max_sample_rate",
4155 .data = &i915_oa_max_sample_rate,
4156 .maxlen = sizeof(i915_oa_max_sample_rate),
4158 .proc_handler = proc_dointvec_minmax,
4159 .extra1 = SYSCTL_ZERO,
4160 .extra2 = &oa_sample_rate_hard_limit,
4165 static struct ctl_table i915_root[] = {
4175 static struct ctl_table dev_root[] = {
4186 * i915_perf_init - initialize i915-perf state on module bind
4187 * @i915: i915 device instance
4189 * Initializes i915-perf state without exposing anything to userspace.
4191 * Note: i915-perf initialization is split into an 'init' and 'register'
4192 * phase with the i915_perf_register() exposing state to userspace.
4194 void i915_perf_init(struct drm_i915_private *i915)
4196 struct i915_perf *perf = &i915->perf;
4198 /* XXX const struct i915_perf_ops! */
4200 if (IS_HASWELL(i915)) {
4201 perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
4202 perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr;
4203 perf->ops.is_valid_flex_reg = NULL;
4204 perf->ops.enable_metric_set = hsw_enable_metric_set;
4205 perf->ops.disable_metric_set = hsw_disable_metric_set;
4206 perf->ops.oa_enable = gen7_oa_enable;
4207 perf->ops.oa_disable = gen7_oa_disable;
4208 perf->ops.read = gen7_oa_read;
4209 perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read;
4211 perf->oa_formats = hsw_oa_formats;
4212 } else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
4213 /* Note: that although we could theoretically also support the
4214 * legacy ringbuffer mode on BDW (and earlier iterations of
4215 * this driver, before upstreaming did this) it didn't seem
4216 * worth the complexity to maintain now that BDW+ enable
4217 * execlist mode by default.
4219 perf->ops.read = gen8_oa_read;
4221 if (IS_GEN_RANGE(i915, 8, 9)) {
4222 perf->oa_formats = gen8_plus_oa_formats;
4224 perf->ops.is_valid_b_counter_reg =
4225 gen7_is_valid_b_counter_addr;
4226 perf->ops.is_valid_mux_reg =
4227 gen8_is_valid_mux_addr;
4228 perf->ops.is_valid_flex_reg =
4229 gen8_is_valid_flex_addr;
4231 if (IS_CHERRYVIEW(i915)) {
4232 perf->ops.is_valid_mux_reg =
4233 chv_is_valid_mux_addr;
4236 perf->ops.oa_enable = gen8_oa_enable;
4237 perf->ops.oa_disable = gen8_oa_disable;
4238 perf->ops.enable_metric_set = gen8_enable_metric_set;
4239 perf->ops.disable_metric_set = gen8_disable_metric_set;
4240 perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
4242 if (IS_GEN(i915, 8)) {
4243 perf->ctx_oactxctrl_offset = 0x120;
4244 perf->ctx_flexeu0_offset = 0x2ce;
4246 perf->gen8_valid_ctx_bit = BIT(25);
4248 perf->ctx_oactxctrl_offset = 0x128;
4249 perf->ctx_flexeu0_offset = 0x3de;
4251 perf->gen8_valid_ctx_bit = BIT(16);
4253 } else if (IS_GEN_RANGE(i915, 10, 11)) {
4254 perf->oa_formats = gen8_plus_oa_formats;
4256 perf->ops.is_valid_b_counter_reg =
4257 gen7_is_valid_b_counter_addr;
4258 perf->ops.is_valid_mux_reg =
4259 gen10_is_valid_mux_addr;
4260 perf->ops.is_valid_flex_reg =
4261 gen8_is_valid_flex_addr;
4263 perf->ops.oa_enable = gen8_oa_enable;
4264 perf->ops.oa_disable = gen8_oa_disable;
4265 perf->ops.enable_metric_set = gen8_enable_metric_set;
4266 perf->ops.disable_metric_set = gen10_disable_metric_set;
4267 perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
4269 if (IS_GEN(i915, 10)) {
4270 perf->ctx_oactxctrl_offset = 0x128;
4271 perf->ctx_flexeu0_offset = 0x3de;
4273 perf->ctx_oactxctrl_offset = 0x124;
4274 perf->ctx_flexeu0_offset = 0x78e;
4276 perf->gen8_valid_ctx_bit = BIT(16);
4277 } else if (IS_GEN(i915, 12)) {
4278 perf->oa_formats = gen12_oa_formats;
4280 perf->ops.is_valid_b_counter_reg =
4281 gen12_is_valid_b_counter_addr;
4282 perf->ops.is_valid_mux_reg =
4283 gen12_is_valid_mux_addr;
4284 perf->ops.is_valid_flex_reg =
4285 gen8_is_valid_flex_addr;
4287 perf->ops.oa_enable = gen12_oa_enable;
4288 perf->ops.oa_disable = gen12_oa_disable;
4289 perf->ops.enable_metric_set = gen12_enable_metric_set;
4290 perf->ops.disable_metric_set = gen12_disable_metric_set;
4291 perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read;
4293 perf->ctx_flexeu0_offset = 0;
4294 perf->ctx_oactxctrl_offset = 0x144;
4298 if (perf->ops.enable_metric_set) {
4299 mutex_init(&perf->lock);
4301 oa_sample_rate_hard_limit = 1000 *
4302 (RUNTIME_INFO(i915)->cs_timestamp_frequency_khz / 2);
4304 mutex_init(&perf->metrics_lock);
4305 idr_init(&perf->metrics_idr);
4307 /* We set up some ratelimit state to potentially throttle any
4308 * _NOTES about spurious, invalid OA reports which we don't
4309 * forward to userspace.
4311 * We print a _NOTE about any throttling when closing the
4312 * stream instead of waiting until driver _fini which no one
4315 * Using the same limiting factors as printk_ratelimit()
4317 ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10);
4318 /* Since we use a DRM_NOTE for spurious reports it would be
4319 * inconsistent to let __ratelimit() automatically print a
4320 * warning for throttling.
4322 ratelimit_set_flags(&perf->spurious_report_rs,
4323 RATELIMIT_MSG_ON_RELEASE);
4325 atomic64_set(&perf->noa_programming_delay,
4326 500 * 1000 /* 500us */);
4332 static int destroy_config(int id, void *p, void *data)
4334 i915_oa_config_put(p);
4338 void i915_perf_sysctl_register(void)
4340 sysctl_header = register_sysctl_table(dev_root);
4343 void i915_perf_sysctl_unregister(void)
4345 unregister_sysctl_table(sysctl_header);
4349 * i915_perf_fini - Counter part to i915_perf_init()
4350 * @i915: i915 device instance
4352 void i915_perf_fini(struct drm_i915_private *i915)
4354 struct i915_perf *perf = &i915->perf;
4359 idr_for_each(&perf->metrics_idr, destroy_config, perf);
4360 idr_destroy(&perf->metrics_idr);
4362 memset(&perf->ops, 0, sizeof(perf->ops));
4367 * i915_perf_ioctl_version - Version of the i915-perf subsystem
4369 * This version number is used by userspace to detect available features.
4371 int i915_perf_ioctl_version(void)
4374 * 1: Initial version
4375 * I915_PERF_IOCTL_ENABLE
4376 * I915_PERF_IOCTL_DISABLE
4378 * 2: Added runtime modification of OA config.
4379 * I915_PERF_IOCTL_CONFIG
4381 * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold
4382 * preemption on a particular context so that performance data is
4383 * accessible from a delta of MI_RPC reports without looking at the
4389 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
4390 #include "selftests/i915_perf.c"