OSDN Git Service

Merge drm/drm-next into drm-intel-next-queued
[tomoyo/tomoyo-test1.git] / drivers / gpu / drm / i915 / gt / uc / intel_guc.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2014-2019 Intel Corporation
4  */
5
6 #include "gt/intel_gt.h"
7 #include "gt/intel_gt_irq.h"
8 #include "gt/intel_gt_pm_irq.h"
9 #include "intel_guc.h"
10 #include "intel_guc_ads.h"
11 #include "intel_guc_submission.h"
12 #include "i915_drv.h"
13
14 /**
15  * DOC: GuC
16  *
17  * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is
18  * designed to offload some of the functionality usually performed by the host
19  * driver; currently the main operations it can take care of are:
20  *
21  * - Authentication of the HuC, which is required to fully enable HuC usage.
22  * - Low latency graphics context scheduling (a.k.a. GuC submission).
23  * - GT Power management.
24  *
25  * The enable_guc module parameter can be used to select which of those
26  * operations to enable within GuC. Note that not all the operations are
27  * supported on all gen9+ platforms.
28  *
29  * Enabling the GuC is not mandatory and therefore the firmware is only loaded
30  * if at least one of the operations is selected. However, not loading the GuC
31  * might result in the loss of some features that do require the GuC (currently
32  * just the HuC, but more are expected to land in the future).
33  */
34
35 void intel_guc_notify(struct intel_guc *guc)
36 {
37         struct intel_gt *gt = guc_to_gt(guc);
38
39         /*
40          * On Gen11+, the value written to the register is passes as a payload
41          * to the FW. However, the FW currently treats all values the same way
42          * (H2G interrupt), so we can just write the value that the HW expects
43          * on older gens.
44          */
45         intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER);
46 }
47
48 static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
49 {
50         GEM_BUG_ON(!guc->send_regs.base);
51         GEM_BUG_ON(!guc->send_regs.count);
52         GEM_BUG_ON(i >= guc->send_regs.count);
53
54         return _MMIO(guc->send_regs.base + 4 * i);
55 }
56
57 void intel_guc_init_send_regs(struct intel_guc *guc)
58 {
59         struct intel_gt *gt = guc_to_gt(guc);
60         enum forcewake_domains fw_domains = 0;
61         unsigned int i;
62
63         if (INTEL_GEN(gt->i915) >= 11) {
64                 guc->send_regs.base =
65                                 i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
66                 guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
67         } else {
68                 guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
69                 guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
70                 BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
71         }
72
73         for (i = 0; i < guc->send_regs.count; i++) {
74                 fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
75                                         guc_send_reg(guc, i),
76                                         FW_REG_READ | FW_REG_WRITE);
77         }
78         guc->send_regs.fw_domains = fw_domains;
79 }
80
81 static void gen9_reset_guc_interrupts(struct intel_guc *guc)
82 {
83         struct intel_gt *gt = guc_to_gt(guc);
84
85         assert_rpm_wakelock_held(&gt->i915->runtime_pm);
86
87         spin_lock_irq(&gt->irq_lock);
88         gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
89         spin_unlock_irq(&gt->irq_lock);
90 }
91
92 static void gen9_enable_guc_interrupts(struct intel_guc *guc)
93 {
94         struct intel_gt *gt = guc_to_gt(guc);
95
96         assert_rpm_wakelock_held(&gt->i915->runtime_pm);
97
98         spin_lock_irq(&gt->irq_lock);
99         if (!guc->interrupts.enabled) {
100                 WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
101                              gt->pm_guc_events);
102                 guc->interrupts.enabled = true;
103                 gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
104         }
105         spin_unlock_irq(&gt->irq_lock);
106 }
107
108 static void gen9_disable_guc_interrupts(struct intel_guc *guc)
109 {
110         struct intel_gt *gt = guc_to_gt(guc);
111
112         assert_rpm_wakelock_held(&gt->i915->runtime_pm);
113
114         spin_lock_irq(&gt->irq_lock);
115         guc->interrupts.enabled = false;
116
117         gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
118
119         spin_unlock_irq(&gt->irq_lock);
120         intel_synchronize_irq(gt->i915);
121
122         gen9_reset_guc_interrupts(guc);
123 }
124
125 static void gen11_reset_guc_interrupts(struct intel_guc *guc)
126 {
127         struct intel_gt *gt = guc_to_gt(guc);
128
129         spin_lock_irq(&gt->irq_lock);
130         gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
131         spin_unlock_irq(&gt->irq_lock);
132 }
133
134 static void gen11_enable_guc_interrupts(struct intel_guc *guc)
135 {
136         struct intel_gt *gt = guc_to_gt(guc);
137
138         spin_lock_irq(&gt->irq_lock);
139         if (!guc->interrupts.enabled) {
140                 u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
141
142                 WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
143                 intel_uncore_write(gt->uncore,
144                                    GEN11_GUC_SG_INTR_ENABLE, events);
145                 intel_uncore_write(gt->uncore,
146                                    GEN11_GUC_SG_INTR_MASK, ~events);
147                 guc->interrupts.enabled = true;
148         }
149         spin_unlock_irq(&gt->irq_lock);
150 }
151
152 static void gen11_disable_guc_interrupts(struct intel_guc *guc)
153 {
154         struct intel_gt *gt = guc_to_gt(guc);
155
156         spin_lock_irq(&gt->irq_lock);
157         guc->interrupts.enabled = false;
158
159         intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
160         intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
161
162         spin_unlock_irq(&gt->irq_lock);
163         intel_synchronize_irq(gt->i915);
164
165         gen11_reset_guc_interrupts(guc);
166 }
167
168 void intel_guc_init_early(struct intel_guc *guc)
169 {
170         struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
171
172         intel_guc_fw_init_early(guc);
173         intel_guc_ct_init_early(&guc->ct);
174         intel_guc_log_init_early(&guc->log);
175         intel_guc_submission_init_early(guc);
176
177         mutex_init(&guc->send_mutex);
178         spin_lock_init(&guc->irq_lock);
179         if (INTEL_GEN(i915) >= 11) {
180                 guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
181                 guc->interrupts.reset = gen11_reset_guc_interrupts;
182                 guc->interrupts.enable = gen11_enable_guc_interrupts;
183                 guc->interrupts.disable = gen11_disable_guc_interrupts;
184         } else {
185                 guc->notify_reg = GUC_SEND_INTERRUPT;
186                 guc->interrupts.reset = gen9_reset_guc_interrupts;
187                 guc->interrupts.enable = gen9_enable_guc_interrupts;
188                 guc->interrupts.disable = gen9_disable_guc_interrupts;
189         }
190 }
191
192 static u32 guc_ctl_debug_flags(struct intel_guc *guc)
193 {
194         u32 level = intel_guc_log_get_level(&guc->log);
195         u32 flags = 0;
196
197         if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
198                 flags |= GUC_LOG_DISABLED;
199         else
200                 flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
201                          GUC_LOG_VERBOSITY_SHIFT;
202
203         return flags;
204 }
205
206 static u32 guc_ctl_feature_flags(struct intel_guc *guc)
207 {
208         u32 flags = 0;
209
210         if (!intel_guc_submission_is_used(guc))
211                 flags |= GUC_CTL_DISABLE_SCHEDULER;
212
213         return flags;
214 }
215
216 static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
217 {
218         u32 flags = 0;
219
220         if (intel_guc_submission_is_used(guc)) {
221                 u32 ctxnum, base;
222
223                 base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
224                 ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16;
225
226                 base >>= PAGE_SHIFT;
227                 flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) |
228                         (ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT);
229         }
230         return flags;
231 }
232
233 static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
234 {
235         u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
236         u32 flags;
237
238         #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
239         #define UNIT SZ_1M
240         #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
241         #else
242         #define UNIT SZ_4K
243         #define FLAG 0
244         #endif
245
246         BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
247         BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
248         BUILD_BUG_ON(!DPC_BUFFER_SIZE);
249         BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT));
250         BUILD_BUG_ON(!ISR_BUFFER_SIZE);
251         BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT));
252
253         BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
254                         (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
255         BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) >
256                         (GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT));
257         BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) >
258                         (GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT));
259
260         flags = GUC_LOG_VALID |
261                 GUC_LOG_NOTIFY_ON_HALF_FULL |
262                 FLAG |
263                 ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
264                 ((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) |
265                 ((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) |
266                 (offset << GUC_LOG_BUF_ADDR_SHIFT);
267
268         #undef UNIT
269         #undef FLAG
270
271         return flags;
272 }
273
274 static u32 guc_ctl_ads_flags(struct intel_guc *guc)
275 {
276         u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
277         u32 flags = ads << GUC_ADS_ADDR_SHIFT;
278
279         return flags;
280 }
281
282 /*
283  * Initialise the GuC parameter block before starting the firmware
284  * transfer. These parameters are read by the firmware on startup
285  * and cannot be changed thereafter.
286  */
287 static void guc_init_params(struct intel_guc *guc)
288 {
289         u32 *params = guc->params;
290         int i;
291
292         BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
293
294         params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
295         params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
296         params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
297         params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
298         params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
299
300         for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
301                 DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
302 }
303
304 /*
305  * Initialise the GuC parameter block before starting the firmware
306  * transfer. These parameters are read by the firmware on startup
307  * and cannot be changed thereafter.
308  */
309 void intel_guc_write_params(struct intel_guc *guc)
310 {
311         struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
312         int i;
313
314         /*
315          * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
316          * they are power context saved so it's ok to release forcewake
317          * when we are done here and take it again at xfer time.
318          */
319         intel_uncore_forcewake_get(uncore, FORCEWAKE_BLITTER);
320
321         intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
322
323         for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
324                 intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
325
326         intel_uncore_forcewake_put(uncore, FORCEWAKE_BLITTER);
327 }
328
329 int intel_guc_init(struct intel_guc *guc)
330 {
331         struct intel_gt *gt = guc_to_gt(guc);
332         int ret;
333
334         ret = intel_uc_fw_init(&guc->fw);
335         if (ret)
336                 goto out;
337
338         ret = intel_guc_log_create(&guc->log);
339         if (ret)
340                 goto err_fw;
341
342         ret = intel_guc_ads_create(guc);
343         if (ret)
344                 goto err_log;
345         GEM_BUG_ON(!guc->ads_vma);
346
347         ret = intel_guc_ct_init(&guc->ct);
348         if (ret)
349                 goto err_ads;
350
351         if (intel_guc_submission_is_used(guc)) {
352                 /*
353                  * This is stuff we need to have available at fw load time
354                  * if we are planning to enable submission later
355                  */
356                 ret = intel_guc_submission_init(guc);
357                 if (ret)
358                         goto err_ct;
359         }
360
361         /* now that everything is perma-pinned, initialize the parameters */
362         guc_init_params(guc);
363
364         /* We need to notify the guc whenever we change the GGTT */
365         i915_ggtt_enable_guc(gt->ggtt);
366
367         intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOADABLE);
368
369         return 0;
370
371 err_ct:
372         intel_guc_ct_fini(&guc->ct);
373 err_ads:
374         intel_guc_ads_destroy(guc);
375 err_log:
376         intel_guc_log_destroy(&guc->log);
377 err_fw:
378         intel_uc_fw_fini(&guc->fw);
379 out:
380         i915_probe_error(gt->i915, "failed with %d\n", ret);
381         return ret;
382 }
383
384 void intel_guc_fini(struct intel_guc *guc)
385 {
386         struct intel_gt *gt = guc_to_gt(guc);
387
388         if (!intel_uc_fw_is_loadable(&guc->fw))
389                 return;
390
391         i915_ggtt_disable_guc(gt->ggtt);
392
393         if (intel_guc_submission_is_used(guc))
394                 intel_guc_submission_fini(guc);
395
396         intel_guc_ct_fini(&guc->ct);
397
398         intel_guc_ads_destroy(guc);
399         intel_guc_log_destroy(&guc->log);
400         intel_uc_fw_fini(&guc->fw);
401 }
402
403 /*
404  * This function implements the MMIO based host to GuC interface.
405  */
406 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
407                         u32 *response_buf, u32 response_buf_size)
408 {
409         struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
410         u32 status;
411         int i;
412         int ret;
413
414         GEM_BUG_ON(!len);
415         GEM_BUG_ON(len > guc->send_regs.count);
416
417         /* We expect only action code */
418         GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK);
419
420         /* If CT is available, we expect to use MMIO only during init/fini */
421         GEM_BUG_ON(*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
422                    *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
423
424         mutex_lock(&guc->send_mutex);
425         intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
426
427         for (i = 0; i < len; i++)
428                 intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]);
429
430         intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
431
432         intel_guc_notify(guc);
433
434         /*
435          * No GuC command should ever take longer than 10ms.
436          * Fast commands should still complete in 10us.
437          */
438         ret = __intel_wait_for_register_fw(uncore,
439                                            guc_send_reg(guc, 0),
440                                            INTEL_GUC_MSG_TYPE_MASK,
441                                            INTEL_GUC_MSG_TYPE_RESPONSE <<
442                                            INTEL_GUC_MSG_TYPE_SHIFT,
443                                            10, 10, &status);
444         /* If GuC explicitly returned an error, convert it to -EIO */
445         if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status))
446                 ret = -EIO;
447
448         if (ret) {
449                 DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
450                           action[0], ret, status);
451                 goto out;
452         }
453
454         if (response_buf) {
455                 int count = min(response_buf_size, guc->send_regs.count - 1);
456
457                 for (i = 0; i < count; i++)
458                         response_buf[i] = intel_uncore_read(uncore,
459                                                             guc_send_reg(guc, i + 1));
460         }
461
462         /* Use data from the GuC response as our return value */
463         ret = INTEL_GUC_MSG_TO_DATA(status);
464
465 out:
466         intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
467         mutex_unlock(&guc->send_mutex);
468
469         return ret;
470 }
471
472 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
473                                        const u32 *payload, u32 len)
474 {
475         u32 msg;
476
477         if (unlikely(!len))
478                 return -EPROTO;
479
480         /* Make sure to handle only enabled messages */
481         msg = payload[0] & guc->msg_enabled_mask;
482
483         if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
484                    INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
485                 intel_guc_log_handle_flush_event(&guc->log);
486
487         return 0;
488 }
489
490 int intel_guc_sample_forcewake(struct intel_guc *guc)
491 {
492         struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
493         u32 action[2];
494
495         action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
496         /* WaRsDisableCoarsePowerGating:skl,cnl */
497         if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
498                 action[1] = 0;
499         else
500                 /* bit 0 and 1 are for Render and Media domain separately */
501                 action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
502
503         return intel_guc_send(guc, action, ARRAY_SIZE(action));
504 }
505
506 /**
507  * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
508  * @guc: intel_guc structure
509  * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
510  *
511  * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
512  * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
513  * intel_huc_auth().
514  *
515  * Return:      non-zero code on error
516  */
517 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
518 {
519         u32 action[] = {
520                 INTEL_GUC_ACTION_AUTHENTICATE_HUC,
521                 rsa_offset
522         };
523
524         return intel_guc_send(guc, action, ARRAY_SIZE(action));
525 }
526
527 /**
528  * intel_guc_suspend() - notify GuC entering suspend state
529  * @guc:        the guc
530  */
531 int intel_guc_suspend(struct intel_guc *guc)
532 {
533         struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
534         int ret;
535         u32 status;
536         u32 action[] = {
537                 INTEL_GUC_ACTION_ENTER_S_STATE,
538                 GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
539         };
540
541         /*
542          * If GuC communication is enabled but submission is not supported,
543          * we do not need to suspend the GuC.
544          */
545         if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc))
546                 return 0;
547
548         /*
549          * The ENTER_S_STATE action queues the save/restore operation in GuC FW
550          * and then returns, so waiting on the H2G is not enough to guarantee
551          * GuC is done. When all the processing is done, GuC writes
552          * INTEL_GUC_SLEEP_STATE_SUCCESS to scratch register 14, so we can poll
553          * on that. Note that GuC does not ensure that the value in the register
554          * is different from INTEL_GUC_SLEEP_STATE_SUCCESS while the action is
555          * in progress so we need to take care of that ourselves as well.
556          */
557
558         intel_uncore_write(uncore, SOFT_SCRATCH(14),
559                            INTEL_GUC_SLEEP_STATE_INVALID_MASK);
560
561         ret = intel_guc_send(guc, action, ARRAY_SIZE(action));
562         if (ret)
563                 return ret;
564
565         ret = __intel_wait_for_register(uncore, SOFT_SCRATCH(14),
566                                         INTEL_GUC_SLEEP_STATE_INVALID_MASK,
567                                         0, 0, 10, &status);
568         if (ret)
569                 return ret;
570
571         if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) {
572                 DRM_ERROR("GuC failed to change sleep state. "
573                           "action=0x%x, err=%u\n",
574                           action[0], status);
575                 return -EIO;
576         }
577
578         return 0;
579 }
580
581 /**
582  * intel_guc_reset_engine() - ask GuC to reset an engine
583  * @guc:        intel_guc structure
584  * @engine:     engine to be reset
585  */
586 int intel_guc_reset_engine(struct intel_guc *guc,
587                            struct intel_engine_cs *engine)
588 {
589         /* XXX: to be implemented with submission interface rework */
590
591         return -ENODEV;
592 }
593
594 /**
595  * intel_guc_resume() - notify GuC resuming from suspend state
596  * @guc:        the guc
597  */
598 int intel_guc_resume(struct intel_guc *guc)
599 {
600         u32 action[] = {
601                 INTEL_GUC_ACTION_EXIT_S_STATE,
602                 GUC_POWER_D0,
603         };
604
605         /*
606          * If GuC communication is enabled but submission is not supported,
607          * we do not need to resume the GuC but we do need to enable the
608          * GuC communication on resume (above).
609          */
610         if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc))
611                 return 0;
612
613         return intel_guc_send(guc, action, ARRAY_SIZE(action));
614 }
615
616 /**
617  * DOC: GuC Memory Management
618  *
619  * GuC can't allocate any memory for its own usage, so all the allocations must
620  * be handled by the host driver. GuC accesses the memory via the GGTT, with the
621  * exception of the top and bottom parts of the 4GB address space, which are
622  * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM)
623  * or other parts of the HW. The driver must take care not to place objects that
624  * the GuC is going to access in these reserved ranges. The layout of the GuC
625  * address space is shown below:
626  *
627  * ::
628  *
629  *     +===========> +====================+ <== FFFF_FFFF
630  *     ^             |      Reserved      |
631  *     |             +====================+ <== GUC_GGTT_TOP
632  *     |             |                    |
633  *     |             |        DRAM        |
634  *    GuC            |                    |
635  *  Address    +===> +====================+ <== GuC ggtt_pin_bias
636  *   Space     ^     |                    |
637  *     |       |     |                    |
638  *     |      GuC    |        GuC         |
639  *     |     WOPCM   |       WOPCM        |
640  *     |      Size   |                    |
641  *     |       |     |                    |
642  *     v       v     |                    |
643  *     +=======+===> +====================+ <== 0000_0000
644  *
645  * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
646  * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
647  * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
648  */
649
650 /**
651  * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
652  * @guc:        the guc
653  * @size:       size of area to allocate (both virtual space and memory)
654  *
655  * This is a wrapper to create an object for use with the GuC. In order to
656  * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
657  * both some backing storage and a range inside the Global GTT. We must pin
658  * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
659  * range is reserved inside GuC.
660  *
661  * Return:      A i915_vma if successful, otherwise an ERR_PTR.
662  */
663 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
664 {
665         struct intel_gt *gt = guc_to_gt(guc);
666         struct drm_i915_gem_object *obj;
667         struct i915_vma *vma;
668         u64 flags;
669         int ret;
670
671         obj = i915_gem_object_create_shmem(gt->i915, size);
672         if (IS_ERR(obj))
673                 return ERR_CAST(obj);
674
675         vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
676         if (IS_ERR(vma))
677                 goto err;
678
679         flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
680         ret = i915_ggtt_pin(vma, 0, flags);
681         if (ret) {
682                 vma = ERR_PTR(ret);
683                 goto err;
684         }
685
686         return i915_vma_make_unshrinkable(vma);
687
688 err:
689         i915_gem_object_put(obj);
690         return vma;
691 }
692
693 /**
694  * intel_guc_allocate_and_map_vma() - Allocate and map VMA for GuC usage
695  * @guc:        the guc
696  * @size:       size of area to allocate (both virtual space and memory)
697  * @out_vma:    return variable for the allocated vma pointer
698  * @out_vaddr:  return variable for the obj mapping
699  *
700  * This wrapper calls intel_guc_allocate_vma() and then maps the allocated
701  * object with I915_MAP_WB.
702  *
703  * Return:      0 if successful, a negative errno code otherwise.
704  */
705 int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
706                                    struct i915_vma **out_vma, void **out_vaddr)
707 {
708         struct i915_vma *vma;
709         void *vaddr;
710
711         vma = intel_guc_allocate_vma(guc, size);
712         if (IS_ERR(vma))
713                 return PTR_ERR(vma);
714
715         vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
716         if (IS_ERR(vaddr)) {
717                 i915_vma_unpin_and_release(&vma, 0);
718                 return PTR_ERR(vaddr);
719         }
720
721         *out_vma = vma;
722         *out_vaddr = vaddr;
723
724         return 0;
725 }