Merge tag 'drm-intel-next-2018-09-06-2' of git://anongit.freedesktop.org/drm/drm...
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / intel_guc.c
1 /*
2  * Copyright © 2014-2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include "intel_guc.h"
26 #include "intel_guc_ads.h"
27 #include "intel_guc_submission.h"
28 #include "i915_drv.h"
29
30 static void gen8_guc_raise_irq(struct intel_guc *guc)
31 {
32         struct drm_i915_private *dev_priv = guc_to_i915(guc);
33
34         I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
35 }
36
37 static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
38 {
39         GEM_BUG_ON(!guc->send_regs.base);
40         GEM_BUG_ON(!guc->send_regs.count);
41         GEM_BUG_ON(i >= guc->send_regs.count);
42
43         return _MMIO(guc->send_regs.base + 4 * i);
44 }
45
46 void intel_guc_init_send_regs(struct intel_guc *guc)
47 {
48         struct drm_i915_private *dev_priv = guc_to_i915(guc);
49         enum forcewake_domains fw_domains = 0;
50         unsigned int i;
51
52         guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
53         guc->send_regs.count = SOFT_SCRATCH_COUNT - 1;
54
55         for (i = 0; i < guc->send_regs.count; i++) {
56                 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
57                                         guc_send_reg(guc, i),
58                                         FW_REG_READ | FW_REG_WRITE);
59         }
60         guc->send_regs.fw_domains = fw_domains;
61 }
62
63 void intel_guc_init_early(struct intel_guc *guc)
64 {
65         intel_guc_fw_init_early(guc);
66         intel_guc_ct_init_early(&guc->ct);
67         intel_guc_log_init_early(&guc->log);
68
69         mutex_init(&guc->send_mutex);
70         spin_lock_init(&guc->irq_lock);
71         guc->send = intel_guc_send_nop;
72         guc->handler = intel_guc_to_host_event_handler_nop;
73         guc->notify = gen8_guc_raise_irq;
74 }
75
76 static int guc_init_wq(struct intel_guc *guc)
77 {
78         struct drm_i915_private *dev_priv = guc_to_i915(guc);
79
80         /*
81          * GuC log buffer flush work item has to do register access to
82          * send the ack to GuC and this work item, if not synced before
83          * suspend, can potentially get executed after the GFX device is
84          * suspended.
85          * By marking the WQ as freezable, we don't have to bother about
86          * flushing of this work item from the suspend hooks, the pending
87          * work item if any will be either executed before the suspend
88          * or scheduled later on resume. This way the handling of work
89          * item can be kept same between system suspend & rpm suspend.
90          */
91         guc->log.relay.flush_wq =
92                 alloc_ordered_workqueue("i915-guc_log",
93                                         WQ_HIGHPRI | WQ_FREEZABLE);
94         if (!guc->log.relay.flush_wq) {
95                 DRM_ERROR("Couldn't allocate workqueue for GuC log\n");
96                 return -ENOMEM;
97         }
98
99         /*
100          * Even though both sending GuC action, and adding a new workitem to
101          * GuC workqueue are serialized (each with its own locking), since
102          * we're using mutliple engines, it's possible that we're going to
103          * issue a preempt request with two (or more - each for different
104          * engine) workitems in GuC queue. In this situation, GuC may submit
105          * all of them, which will make us very confused.
106          * Our preemption contexts may even already be complete - before we
107          * even had the chance to sent the preempt action to GuC!. Rather
108          * than introducing yet another lock, we can just use ordered workqueue
109          * to make sure we're always sending a single preemption request with a
110          * single workitem.
111          */
112         if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
113             USES_GUC_SUBMISSION(dev_priv)) {
114                 guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt",
115                                                           WQ_HIGHPRI);
116                 if (!guc->preempt_wq) {
117                         destroy_workqueue(guc->log.relay.flush_wq);
118                         DRM_ERROR("Couldn't allocate workqueue for GuC "
119                                   "preemption\n");
120                         return -ENOMEM;
121                 }
122         }
123
124         return 0;
125 }
126
127 static void guc_fini_wq(struct intel_guc *guc)
128 {
129         struct workqueue_struct *wq;
130
131         wq = fetch_and_zero(&guc->preempt_wq);
132         if (wq)
133                 destroy_workqueue(wq);
134
135         wq = fetch_and_zero(&guc->log.relay.flush_wq);
136         if (wq)
137                 destroy_workqueue(wq);
138 }
139
140 int intel_guc_init_misc(struct intel_guc *guc)
141 {
142         struct drm_i915_private *i915 = guc_to_i915(guc);
143         int ret;
144
145         ret = guc_init_wq(guc);
146         if (ret)
147                 return ret;
148
149         intel_uc_fw_fetch(i915, &guc->fw);
150
151         return 0;
152 }
153
154 void intel_guc_fini_misc(struct intel_guc *guc)
155 {
156         intel_uc_fw_fini(&guc->fw);
157         guc_fini_wq(guc);
158 }
159
160 static int guc_shared_data_create(struct intel_guc *guc)
161 {
162         struct i915_vma *vma;
163         void *vaddr;
164
165         vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
166         if (IS_ERR(vma))
167                 return PTR_ERR(vma);
168
169         vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
170         if (IS_ERR(vaddr)) {
171                 i915_vma_unpin_and_release(&vma, 0);
172                 return PTR_ERR(vaddr);
173         }
174
175         guc->shared_data = vma;
176         guc->shared_data_vaddr = vaddr;
177
178         return 0;
179 }
180
181 static void guc_shared_data_destroy(struct intel_guc *guc)
182 {
183         i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP);
184 }
185
186 int intel_guc_init(struct intel_guc *guc)
187 {
188         struct drm_i915_private *dev_priv = guc_to_i915(guc);
189         int ret;
190
191         ret = guc_shared_data_create(guc);
192         if (ret)
193                 goto err_fetch;
194         GEM_BUG_ON(!guc->shared_data);
195
196         ret = intel_guc_log_create(&guc->log);
197         if (ret)
198                 goto err_shared;
199
200         ret = intel_guc_ads_create(guc);
201         if (ret)
202                 goto err_log;
203         GEM_BUG_ON(!guc->ads_vma);
204
205         /* We need to notify the guc whenever we change the GGTT */
206         i915_ggtt_enable_guc(dev_priv);
207
208         return 0;
209
210 err_log:
211         intel_guc_log_destroy(&guc->log);
212 err_shared:
213         guc_shared_data_destroy(guc);
214 err_fetch:
215         intel_uc_fw_fini(&guc->fw);
216         return ret;
217 }
218
219 void intel_guc_fini(struct intel_guc *guc)
220 {
221         struct drm_i915_private *dev_priv = guc_to_i915(guc);
222
223         i915_ggtt_disable_guc(dev_priv);
224         intel_guc_ads_destroy(guc);
225         intel_guc_log_destroy(&guc->log);
226         guc_shared_data_destroy(guc);
227         intel_uc_fw_fini(&guc->fw);
228 }
229
230 static u32 guc_ctl_debug_flags(struct intel_guc *guc)
231 {
232         u32 level = intel_guc_log_get_level(&guc->log);
233         u32 flags;
234         u32 ads;
235
236         ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
237         flags = ads << GUC_ADS_ADDR_SHIFT | GUC_ADS_ENABLED;
238
239         if (!GUC_LOG_LEVEL_IS_ENABLED(level))
240                 flags |= GUC_LOG_DEFAULT_DISABLED;
241
242         if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
243                 flags |= GUC_LOG_DISABLED;
244         else
245                 flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
246                          GUC_LOG_VERBOSITY_SHIFT;
247
248         return flags;
249 }
250
251 static u32 guc_ctl_feature_flags(struct intel_guc *guc)
252 {
253         u32 flags = 0;
254
255         flags |=  GUC_CTL_VCS2_ENABLED;
256
257         if (USES_GUC_SUBMISSION(guc_to_i915(guc)))
258                 flags |= GUC_CTL_KERNEL_SUBMISSIONS;
259         else
260                 flags |= GUC_CTL_DISABLE_SCHEDULER;
261
262         return flags;
263 }
264
265 static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
266 {
267         u32 flags = 0;
268
269         if (USES_GUC_SUBMISSION(guc_to_i915(guc))) {
270                 u32 ctxnum, base;
271
272                 base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
273                 ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16;
274
275                 base >>= PAGE_SHIFT;
276                 flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) |
277                         (ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT);
278         }
279         return flags;
280 }
281
282 static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
283 {
284         u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
285         u32 flags;
286
287         #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
288         #define UNIT SZ_1M
289         #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
290         #else
291         #define UNIT SZ_4K
292         #define FLAG 0
293         #endif
294
295         BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
296         BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
297         BUILD_BUG_ON(!DPC_BUFFER_SIZE);
298         BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT));
299         BUILD_BUG_ON(!ISR_BUFFER_SIZE);
300         BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT));
301
302         BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
303                         (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
304         BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) >
305                         (GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT));
306         BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) >
307                         (GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT));
308
309         flags = GUC_LOG_VALID |
310                 GUC_LOG_NOTIFY_ON_HALF_FULL |
311                 FLAG |
312                 ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
313                 ((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) |
314                 ((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) |
315                 (offset << GUC_LOG_BUF_ADDR_SHIFT);
316
317         #undef UNIT
318         #undef FLAG
319
320         return flags;
321 }
322
323 /*
324  * Initialise the GuC parameter block before starting the firmware
325  * transfer. These parameters are read by the firmware on startup
326  * and cannot be changed thereafter.
327  */
328 void intel_guc_init_params(struct intel_guc *guc)
329 {
330         struct drm_i915_private *dev_priv = guc_to_i915(guc);
331         u32 params[GUC_CTL_MAX_DWORDS];
332         int i;
333
334         memset(params, 0, sizeof(params));
335
336         /*
337          * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
338          * second. This ARAR is calculated by:
339          * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
340          */
341         params[GUC_CTL_ARAT_HIGH] = 0;
342         params[GUC_CTL_ARAT_LOW] = 100000000;
343
344         params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
345
346         params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
347         params[GUC_CTL_LOG_PARAMS]  = guc_ctl_log_params_flags(guc);
348         params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
349         params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
350
351         for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
352                 DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
353
354         /*
355          * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
356          * they are power context saved so it's ok to release forcewake
357          * when we are done here and take it again at xfer time.
358          */
359         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_BLITTER);
360
361         I915_WRITE(SOFT_SCRATCH(0), 0);
362
363         for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
364                 I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
365
366         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER);
367 }
368
369 int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
370                        u32 *response_buf, u32 response_buf_size)
371 {
372         WARN(1, "Unexpected send: action=%#x\n", *action);
373         return -ENODEV;
374 }
375
376 void intel_guc_to_host_event_handler_nop(struct intel_guc *guc)
377 {
378         WARN(1, "Unexpected event: no suitable handler\n");
379 }
380
381 /*
382  * This function implements the MMIO based host to GuC interface.
383  */
384 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
385                         u32 *response_buf, u32 response_buf_size)
386 {
387         struct drm_i915_private *dev_priv = guc_to_i915(guc);
388         u32 status;
389         int i;
390         int ret;
391
392         GEM_BUG_ON(!len);
393         GEM_BUG_ON(len > guc->send_regs.count);
394
395         /* We expect only action code */
396         GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK);
397
398         /* If CT is available, we expect to use MMIO only during init/fini */
399         GEM_BUG_ON(HAS_GUC_CT(dev_priv) &&
400                 *action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
401                 *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
402
403         mutex_lock(&guc->send_mutex);
404         intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains);
405
406         for (i = 0; i < len; i++)
407                 I915_WRITE(guc_send_reg(guc, i), action[i]);
408
409         POSTING_READ(guc_send_reg(guc, i - 1));
410
411         intel_guc_notify(guc);
412
413         /*
414          * No GuC command should ever take longer than 10ms.
415          * Fast commands should still complete in 10us.
416          */
417         ret = __intel_wait_for_register_fw(dev_priv,
418                                            guc_send_reg(guc, 0),
419                                            INTEL_GUC_MSG_TYPE_MASK,
420                                            INTEL_GUC_MSG_TYPE_RESPONSE <<
421                                            INTEL_GUC_MSG_TYPE_SHIFT,
422                                            10, 10, &status);
423         /* If GuC explicitly returned an error, convert it to -EIO */
424         if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status))
425                 ret = -EIO;
426
427         if (ret) {
428                 DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
429                           action[0], ret, status);
430                 goto out;
431         }
432
433         if (response_buf) {
434                 int count = min(response_buf_size, guc->send_regs.count - 1);
435
436                 for (i = 0; i < count; i++)
437                         response_buf[i] = I915_READ(guc_send_reg(guc, i + 1));
438         }
439
440         /* Use data from the GuC response as our return value */
441         ret = INTEL_GUC_MSG_TO_DATA(status);
442
443 out:
444         intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
445         mutex_unlock(&guc->send_mutex);
446
447         return ret;
448 }
449
450 void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc)
451 {
452         struct drm_i915_private *dev_priv = guc_to_i915(guc);
453         u32 msg, val;
454
455         /*
456          * Sample the log buffer flush related bits & clear them out now
457          * itself from the message identity register to minimize the
458          * probability of losing a flush interrupt, when there are back
459          * to back flush interrupts.
460          * There can be a new flush interrupt, for different log buffer
461          * type (like for ISR), whilst Host is handling one (for DPC).
462          * Since same bit is used in message register for ISR & DPC, it
463          * could happen that GuC sets the bit for 2nd interrupt but Host
464          * clears out the bit on handling the 1st interrupt.
465          */
466         disable_rpm_wakeref_asserts(dev_priv);
467         spin_lock(&guc->irq_lock);
468         val = I915_READ(SOFT_SCRATCH(15));
469         msg = val & guc->msg_enabled_mask;
470         I915_WRITE(SOFT_SCRATCH(15), val & ~msg);
471         spin_unlock(&guc->irq_lock);
472         enable_rpm_wakeref_asserts(dev_priv);
473
474         intel_guc_to_host_process_recv_msg(guc, msg);
475 }
476
477 void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg)
478 {
479         /* Make sure to handle only enabled messages */
480         msg &= guc->msg_enabled_mask;
481
482         if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
483                    INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
484                 intel_guc_log_handle_flush_event(&guc->log);
485 }
486
487 int intel_guc_sample_forcewake(struct intel_guc *guc)
488 {
489         struct drm_i915_private *dev_priv = guc_to_i915(guc);
490         u32 action[2];
491
492         action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
493         /* WaRsDisableCoarsePowerGating:skl,cnl */
494         if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
495                 action[1] = 0;
496         else
497                 /* bit 0 and 1 are for Render and Media domain separately */
498                 action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
499
500         return intel_guc_send(guc, action, ARRAY_SIZE(action));
501 }
502
503 /**
504  * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
505  * @guc: intel_guc structure
506  * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
507  *
508  * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
509  * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
510  * intel_huc_auth().
511  *
512  * Return:      non-zero code on error
513  */
514 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
515 {
516         u32 action[] = {
517                 INTEL_GUC_ACTION_AUTHENTICATE_HUC,
518                 rsa_offset
519         };
520
521         return intel_guc_send(guc, action, ARRAY_SIZE(action));
522 }
523
524 /**
525  * intel_guc_suspend() - notify GuC entering suspend state
526  * @guc:        the guc
527  */
528 int intel_guc_suspend(struct intel_guc *guc)
529 {
530         u32 data[] = {
531                 INTEL_GUC_ACTION_ENTER_S_STATE,
532                 GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
533                 intel_guc_ggtt_offset(guc, guc->shared_data)
534         };
535
536         return intel_guc_send(guc, data, ARRAY_SIZE(data));
537 }
538
539 /**
540  * intel_guc_reset_engine() - ask GuC to reset an engine
541  * @guc:        intel_guc structure
542  * @engine:     engine to be reset
543  */
544 int intel_guc_reset_engine(struct intel_guc *guc,
545                            struct intel_engine_cs *engine)
546 {
547         u32 data[7];
548
549         GEM_BUG_ON(!guc->execbuf_client);
550
551         data[0] = INTEL_GUC_ACTION_REQUEST_ENGINE_RESET;
552         data[1] = engine->guc_id;
553         data[2] = 0;
554         data[3] = 0;
555         data[4] = 0;
556         data[5] = guc->execbuf_client->stage_id;
557         data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
558
559         return intel_guc_send(guc, data, ARRAY_SIZE(data));
560 }
561
562 /**
563  * intel_guc_resume() - notify GuC resuming from suspend state
564  * @guc:        the guc
565  */
566 int intel_guc_resume(struct intel_guc *guc)
567 {
568         u32 data[] = {
569                 INTEL_GUC_ACTION_EXIT_S_STATE,
570                 GUC_POWER_D0,
571                 intel_guc_ggtt_offset(guc, guc->shared_data)
572         };
573
574         return intel_guc_send(guc, data, ARRAY_SIZE(data));
575 }
576
577 /**
578  * DOC: GuC Address Space
579  *
580  * The layout of GuC address space is shown below:
581  *
582  * ::
583  *
584  *     +===========> +====================+ <== FFFF_FFFF
585  *     ^             |      Reserved      |
586  *     |             +====================+ <== GUC_GGTT_TOP
587  *     |             |                    |
588  *     |             |        DRAM        |
589  *    GuC            |                    |
590  *  Address    +===> +====================+ <== GuC ggtt_pin_bias
591  *   Space     ^     |                    |
592  *     |       |     |                    |
593  *     |      GuC    |        GuC         |
594  *     |     WOPCM   |       WOPCM        |
595  *     |      Size   |                    |
596  *     |       |     |                    |
597  *     v       v     |                    |
598  *     +=======+===> +====================+ <== 0000_0000
599  *
600  * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
601  * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
602  * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
603  */
604
605 /**
606  * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
607  * @guc:        the guc
608  * @size:       size of area to allocate (both virtual space and memory)
609  *
610  * This is a wrapper to create an object for use with the GuC. In order to
611  * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
612  * both some backing storage and a range inside the Global GTT. We must pin
613  * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
614  * range is reserved inside GuC.
615  *
616  * Return:      A i915_vma if successful, otherwise an ERR_PTR.
617  */
618 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
619 {
620         struct drm_i915_private *dev_priv = guc_to_i915(guc);
621         struct drm_i915_gem_object *obj;
622         struct i915_vma *vma;
623         u64 flags;
624         int ret;
625
626         obj = i915_gem_object_create(dev_priv, size);
627         if (IS_ERR(obj))
628                 return ERR_CAST(obj);
629
630         vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
631         if (IS_ERR(vma))
632                 goto err;
633
634         flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
635         ret = i915_vma_pin(vma, 0, 0, flags);
636         if (ret) {
637                 vma = ERR_PTR(ret);
638                 goto err;
639         }
640
641         return vma;
642
643 err:
644         i915_gem_object_put(obj);
645         return vma;
646 }
647
648 /**
649  * intel_guc_reserved_gtt_size()
650  * @guc:        intel_guc structure
651  *
652  * The GuC WOPCM mapping shadows the lower part of the GGTT, so if we are using
653  * GuC we can't have any objects pinned in that region. This function returns
654  * the size of the shadowed region.
655  *
656  * Returns:
657  * 0 if GuC is not present or not in use.
658  * Otherwise, the GuC WOPCM size.
659  */
660 u32 intel_guc_reserved_gtt_size(struct intel_guc *guc)
661 {
662         return guc_to_i915(guc)->wopcm.guc.size;
663 }