1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
5 #include <linux/interconnect.h>
6 #include <linux/of_platform.h>
7 #include <linux/platform_device.h>
8 #include <linux/pm_domain.h>
9 #include <linux/pm_opp.h>
10 #include <soc/qcom/cmd-db.h>
11 #include <drm/drm_gem.h>
14 #include "a6xx_gmu.xml.h"
16 #include "msm_gpu_trace.h"
19 static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
21 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
22 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
23 struct msm_gpu *gpu = &adreno_gpu->base;
25 /* FIXME: add a banner here */
28 /* Turn off the hangcheck timer while we are resetting */
29 del_timer(&gpu->hangcheck_timer);
31 /* Queue the GPU handler because we need to treat this as a recovery */
32 kthread_queue_work(gpu->worker, &gpu->recover_work);
35 static irqreturn_t a6xx_gmu_irq(int irq, void *data)
37 struct a6xx_gmu *gmu = data;
40 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
41 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
43 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
44 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
49 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
50 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");
52 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
53 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
54 gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));
59 static irqreturn_t a6xx_hfi_irq(int irq, void *data)
61 struct a6xx_gmu *gmu = data;
64 status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
65 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
67 if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
68 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
76 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
80 /* This can be called from gpu state code so make sure GMU is valid */
81 if (!gmu->initialized)
84 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
87 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF |
88 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF));
91 /* Check to see if the GX rail is still powered */
92 bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
96 /* This can be called from gpu state code so make sure GMU is valid */
97 if (!gmu->initialized)
100 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
103 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
104 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
107 void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
110 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
111 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
112 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
114 unsigned long gpu_freq;
117 gpu_freq = dev_pm_opp_get_freq(opp);
119 if (gpu_freq == gmu->freq)
122 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
123 if (gpu_freq == gmu->gpu_freqs[perf_index])
126 gmu->current_perf_index = perf_index;
127 gmu->freq = gmu->gpu_freqs[perf_index];
129 trace_msm_gmu_freq_change(gmu->freq, perf_index);
132 * This can get called from devfreq while the hardware is idle. Don't
133 * bring up the power if it isn't already active. All we're doing here
134 * is updating the frequency so that when we come back online we're at
141 a6xx_hfi_set_freq(gmu, perf_index);
142 dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
146 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
148 gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
149 ((3 & 0xf) << 28) | perf_index);
152 * Send an invalid index as a vote for the bus bandwidth and let the
153 * firmware decide on the right vote
155 gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);
157 /* Set and clear the OOB for DCVS to trigger the GMU */
158 a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
159 a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
161 ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
163 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
165 dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
168 unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
170 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
171 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
172 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
177 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
180 int local = gmu->idle_level;
182 /* SPTP and IFPC both report as IFPC */
183 if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
184 local = GMU_IDLE_STATE_IFPC;
186 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
189 if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
190 !a6xx_gmu_gx_is_on(gmu))
197 /* Wait for the GMU to get to its most idle state */
198 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu)
200 return spin_until(a6xx_gmu_check_idle_level(gmu));
203 static int a6xx_gmu_start(struct a6xx_gmu *gmu)
209 val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8);
210 if (val <= 0x20010004) {
212 reset_val = 0xbabeface;
218 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
220 /* Set the log wptr index
221 * note: downstream saves the value in poweroff and restores it here
223 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
225 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
227 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
228 (val & mask) == reset_val, 100, 10000);
231 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
236 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
241 gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
243 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
244 val & 1, 100, 10000);
246 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");
251 struct a6xx_gmu_oob_bits {
252 int set, ack, set_new, ack_new, clear, clear_new;
256 /* These are the interrupt / ack bits for each OOB request that are set
257 * in a6xx_gmu_set_oob and a6xx_clear_oob
259 static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
260 [GMU_OOB_GPU_SET] = {
270 [GMU_OOB_PERFCOUNTER_SET] = {
271 .name = "PERFCOUNTER",
280 [GMU_OOB_BOOT_SLUMBER] = {
281 .name = "BOOT_SLUMBER",
287 [GMU_OOB_DCVS_SET] = {
295 /* Trigger a OOB (out of band) request to the GMU */
296 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
302 WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
304 if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
308 request = a6xx_gmu_oob_bits[state].set;
309 ack = a6xx_gmu_oob_bits[state].ack;
311 request = a6xx_gmu_oob_bits[state].set_new;
312 ack = a6xx_gmu_oob_bits[state].ack_new;
313 if (!request || !ack) {
314 DRM_DEV_ERROR(gmu->dev,
315 "Invalid non-legacy GMU request %s\n",
316 a6xx_gmu_oob_bits[state].name);
321 /* Trigger the equested OOB operation */
322 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
324 /* Wait for the acknowledge interrupt */
325 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
326 val & (1 << ack), 100, 10000);
329 DRM_DEV_ERROR(gmu->dev,
330 "Timeout waiting for GMU OOB set %s: 0x%x\n",
331 a6xx_gmu_oob_bits[state].name,
332 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
334 /* Clear the acknowledge interrupt */
335 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);
340 /* Clear a pending OOB state in the GMU */
341 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
345 WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
347 if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
351 bit = a6xx_gmu_oob_bits[state].clear;
353 bit = a6xx_gmu_oob_bits[state].clear_new;
355 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit);
358 /* Enable CPU control of SPTP power power collapse */
359 int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
367 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
369 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
370 (val & 0x38) == 0x28, 1, 100);
373 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
374 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
380 /* Disable CPU control of SPTP power power collapse */
381 void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
389 /* Make sure retention is on */
390 gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
392 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);
394 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
395 (val & 0x04), 100, 10000);
398 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
399 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
402 /* Let the GMU know we are starting a boot sequence */
403 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
407 /* Let the GMU know we are getting ready for boot */
408 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);
410 /* Choose the "default" power level as the highest available */
411 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];
413 gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
414 gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);
416 /* Let the GMU know the boot sequence has started */
417 return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
420 /* Let the GMU know that we are about to go into slumber */
421 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
425 /* Disable the power counter so the GMU isn't busy */
426 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
428 /* Disable SPTP_PC if the CPU is responsible for it */
429 if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
430 a6xx_sptprac_disable(gmu);
433 ret = a6xx_hfi_send_prep_slumber(gmu);
437 /* Tell the GMU to get ready to slumber */
438 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
440 ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
441 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
444 /* Check to see if the GMU really did slumber */
445 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
447 DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
453 /* Put fence into allow mode */
454 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
458 static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
463 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
464 /* Wait for the register to finish posting */
467 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
468 val & (1 << 1), 100, 10000);
470 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
474 ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
478 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
482 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
487 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
492 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
494 ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
495 val, val & (1 << 16), 100, 10000);
497 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
499 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
502 static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
504 msm_writel(value, ptr + (offset << 2));
507 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
510 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
512 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
513 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
514 struct platform_device *pdev = to_platform_device(gmu->dev);
515 void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
516 void __iomem *seqptr = NULL;
517 uint32_t pdc_address_offset;
518 bool pdc_in_aop = false;
523 if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu))
525 else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu))
526 pdc_address_offset = 0x30090;
527 else if (adreno_is_a619(adreno_gpu))
528 pdc_address_offset = 0x300a0;
530 pdc_address_offset = 0x30080;
533 seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
538 /* Disable SDE clock gating */
539 gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
541 /* Setup RSC PDC handshake for sleep and wakeup */
542 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
543 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
544 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
545 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
546 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
547 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
548 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
549 gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
550 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
551 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
552 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
554 /* Load RSC sequencer uCode for sleep and wakeup */
555 if (adreno_is_a650_family(adreno_gpu)) {
556 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0);
557 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab);
558 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581);
559 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2);
560 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad);
562 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
563 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
564 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
565 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
566 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
572 /* Load PDC sequencer uCode for power up and power down sequence */
573 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
574 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
575 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
576 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
577 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
579 /* Set TCS commands used by PDC sequence for low power modes */
580 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
581 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
582 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
583 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
584 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
585 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
586 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
587 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
588 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
590 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
591 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, pdc_address_offset);
592 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
594 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
595 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
596 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
597 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
598 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
599 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
601 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
602 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
603 if (adreno_is_a618(adreno_gpu) || adreno_is_a619(adreno_gpu) ||
604 adreno_is_a650_family(adreno_gpu))
605 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2);
607 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
608 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
609 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, pdc_address_offset);
610 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
614 pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
615 pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
617 /* ensure no writes happen before the uCode is fully written */
623 if (!IS_ERR_OR_NULL(pdcptr))
625 if (!IS_ERR_OR_NULL(seqptr))
630 * The lowest 16 bits of this value are the number of XO clock cycles for main
631 * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are
632 * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
635 #define GMU_PWR_COL_HYST 0x000a1680
637 /* Set up the idle state for the GMU */
638 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
640 /* Disable GMU WB/RB buffer */
641 gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
642 gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1);
643 gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1);
645 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
647 switch (gmu->idle_level) {
648 case GMU_IDLE_STATE_IFPC:
649 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
651 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
652 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
653 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
655 case GMU_IDLE_STATE_SPTP:
656 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
658 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
659 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
660 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE);
663 /* Enable RPMh GPU client */
664 gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
665 A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE |
666 A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE |
667 A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE |
668 A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE |
669 A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE |
670 A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
673 struct block_header {
681 /* this should be a general kernel helper */
682 static int in_range(u32 addr, u32 start, u32 size)
684 return addr >= start && addr < start + size;
687 static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
689 if (!in_range(blk->addr, bo->iova, bo->size))
692 memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size);
696 static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
698 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
699 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
700 const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU];
701 const struct block_header *blk;
704 u32 itcm_base = 0x00000000;
705 u32 dtcm_base = 0x00040000;
707 if (adreno_is_a650_family(adreno_gpu))
708 dtcm_base = 0x10004000;
711 /* Sanity check the size of the firmware that was loaded */
712 if (fw_image->size > 0x8000) {
713 DRM_DEV_ERROR(gmu->dev,
714 "GMU firmware is bigger than the available region\n");
718 gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START,
719 (u32*) fw_image->data, fw_image->size);
724 for (blk = (const struct block_header *) fw_image->data;
725 (const u8*) blk < fw_image->data + fw_image->size;
726 blk = (const struct block_header *) &blk->data[blk->size >> 2]) {
730 if (in_range(blk->addr, itcm_base, SZ_16K)) {
731 reg_offset = (blk->addr - itcm_base) >> 2;
733 REG_A6XX_GMU_CM3_ITCM_START + reg_offset,
734 blk->data, blk->size);
735 } else if (in_range(blk->addr, dtcm_base, SZ_16K)) {
736 reg_offset = (blk->addr - dtcm_base) >> 2;
738 REG_A6XX_GMU_CM3_DTCM_START + reg_offset,
739 blk->data, blk->size);
740 } else if (!fw_block_mem(&gmu->icache, blk) &&
741 !fw_block_mem(&gmu->dcache, blk) &&
742 !fw_block_mem(&gmu->dummy, blk)) {
743 DRM_DEV_ERROR(gmu->dev,
744 "failed to match fw block (addr=%.8x size=%d data[0]=%.8x)\n",
745 blk->addr, blk->size, blk->data[0]);
752 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
754 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
755 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
759 if (adreno_is_a650_family(adreno_gpu)) {
760 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1);
761 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
764 if (state == GMU_WARM_BOOT) {
765 ret = a6xx_rpmh_start(gmu);
769 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
770 "GMU firmware is not loaded\n"))
773 /* Turn on register retention */
774 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
776 ret = a6xx_rpmh_start(gmu);
780 ret = a6xx_gmu_fw_load(gmu);
785 gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
786 gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
788 /* Write the iova of the HFI table */
789 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova);
790 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
792 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
793 (1 << 31) | (0xa << 18) | (0xa0));
796 * Snapshots toggle the NMI bit which will result in a jump to the NMI
797 * handler instead of __main. Set the M3 config value to avoid that.
799 gmu_write(gmu, REG_A6XX_GMU_CM3_CFG, 0x4052);
802 * Note that the GMU has a slightly different layout for
803 * chip_id, for whatever reason, so a bit of massaging
804 * is needed. The upper 16b are the same, but minor and
805 * patchid are packed in four bits each with the lower
808 chipid = adreno_gpu->chip_id & 0xffff0000;
809 chipid |= (adreno_gpu->chip_id << 4) & 0xf000; /* minor */
810 chipid |= (adreno_gpu->chip_id << 8) & 0x0f00; /* patchid */
812 gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
814 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG,
815 gmu->log.iova | (gmu->log.size / SZ_4K - 1));
817 /* Set up the lowest idle level on the GMU */
818 a6xx_gmu_power_config(gmu);
820 ret = a6xx_gmu_start(gmu);
825 ret = a6xx_gmu_gfx_rail_on(gmu);
830 /* Enable SPTP_PC if the CPU is responsible for it */
831 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
832 ret = a6xx_sptprac_enable(gmu);
837 ret = a6xx_gmu_hfi_start(gmu);
841 /* FIXME: Do we need this wmb() here? */
847 #define A6XX_HFI_IRQ_MASK \
848 (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
850 #define A6XX_GMU_IRQ_MASK \
851 (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
852 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
853 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
855 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
857 disable_irq(gmu->gmu_irq);
858 disable_irq(gmu->hfi_irq);
860 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
861 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
864 static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
868 /* Make sure there are no outstanding RPMh votes */
869 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
870 (val & 1), 100, 10000);
871 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
872 (val & 1), 100, 10000);
873 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
874 (val & 1), 100, 10000);
875 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
876 (val & 1), 100, 1000);
879 /* Force the GMU off in case it isn't responsive */
880 static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
882 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
883 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
884 struct msm_gpu *gpu = &adreno_gpu->base;
887 * Turn off keep alive that might have been enabled by the hang
890 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
892 /* Flush all the queues */
895 /* Stop the interrupts */
896 a6xx_gmu_irq_disable(gmu);
898 /* Force off SPTP in case the GMU is managing it */
899 a6xx_sptprac_disable(gmu);
901 /* Make sure there are no outstanding RPMh votes */
902 a6xx_gmu_rpmh_off(gmu);
904 /* Clear the WRITEDROPPED fields and put fence into allow mode */
905 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS_CLR, 0x7);
906 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
908 /* Make sure the above writes go through */
911 /* Halt the gmu cm3 core */
912 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
914 a6xx_bus_clear_pending_transactions(adreno_gpu, true);
916 /* Reset GPU core blocks */
917 a6xx_gpu_sw_reset(gpu, true);
920 static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
922 struct dev_pm_opp *gpu_opp;
923 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
925 gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
929 gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */
930 a6xx_gmu_set_freq(gpu, gpu_opp, false);
931 dev_pm_opp_put(gpu_opp);
934 static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
936 struct dev_pm_opp *gpu_opp;
937 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
939 gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
943 dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp);
944 dev_pm_opp_put(gpu_opp);
947 int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
949 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
950 struct msm_gpu *gpu = &adreno_gpu->base;
951 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
954 if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
959 /* Turn on the resources */
960 pm_runtime_get_sync(gmu->dev);
963 * "enable" the GX power domain which won't actually do anything but it
964 * will make sure that the refcounting is correct in case we need to
965 * bring down the GX after a GMU failure
967 if (!IS_ERR_OR_NULL(gmu->gxpd))
968 pm_runtime_get_sync(gmu->gxpd);
970 /* Use a known rate to bring up the GMU */
971 clk_set_rate(gmu->core_clk, 200000000);
972 clk_set_rate(gmu->hub_clk, 150000000);
973 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
975 pm_runtime_put(gmu->gxpd);
976 pm_runtime_put(gmu->dev);
980 /* Set the bus quota to a reasonable value for boot */
981 a6xx_gmu_set_initial_bw(gpu, gmu);
983 /* Enable the GMU interrupt */
984 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
985 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK);
986 enable_irq(gmu->gmu_irq);
988 /* Check to see if we are doing a cold or warm boot */
989 status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
990 GMU_WARM_BOOT : GMU_COLD_BOOT;
993 * Warm boot path does not work on newer GPUs
994 * Presumably this is because icache/dcache regions must be restored
997 status = GMU_COLD_BOOT;
999 ret = a6xx_gmu_fw_start(gmu, status);
1003 ret = a6xx_hfi_start(gmu, status);
1008 * Turn on the GMU firmware fault interrupt after we know the boot
1009 * sequence is successful
1011 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
1012 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
1013 enable_irq(gmu->hfi_irq);
1015 /* Set the GPU to the current freq */
1016 a6xx_gmu_set_initial_freq(gpu, gmu);
1019 /* On failure, shut down the GMU to leave it in a good state */
1021 disable_irq(gmu->gmu_irq);
1022 a6xx_rpmh_stop(gmu);
1023 pm_runtime_put(gmu->gxpd);
1024 pm_runtime_put(gmu->dev);
1030 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
1034 if (!gmu->initialized)
1037 reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
1039 if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB)
1045 /* Gracefully try to shut down the GMU and by extension the GPU */
1046 static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
1048 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1049 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1053 * The GMU may still be in slumber unless the GPU started so check and
1054 * skip putting it back into slumber if so
1056 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
1059 int ret = a6xx_gmu_wait_for_idle(gmu);
1061 /* If the GMU isn't responding assume it is hung */
1063 a6xx_gmu_force_off(gmu);
1067 a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
1069 /* tell the GMU we want to slumber */
1070 ret = a6xx_gmu_notify_slumber(gmu);
1072 a6xx_gmu_force_off(gmu);
1076 ret = gmu_poll_timeout(gmu,
1077 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
1078 !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
1082 * Let the user know we failed to slumber but don't worry too
1083 * much because we are powering down anyway
1087 DRM_DEV_ERROR(gmu->dev,
1088 "Unable to slumber GMU: status = 0%x/0%x\n",
1090 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
1092 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
1098 /* Stop the interrupts and mask the hardware */
1099 a6xx_gmu_irq_disable(gmu);
1101 /* Tell RPMh to power off the GPU */
1102 a6xx_rpmh_stop(gmu);
1106 int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
1108 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1109 struct msm_gpu *gpu = &a6xx_gpu->base.base;
1111 if (!pm_runtime_active(gmu->dev))
1115 * Force the GMU off if we detected a hang, otherwise try to shut it
1119 a6xx_gmu_force_off(gmu);
1121 a6xx_gmu_shutdown(gmu);
1123 /* Remove the bus vote */
1124 dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
1127 * Make sure the GX domain is off before turning off the GMU (CX)
1128 * domain. Usually the GMU does this but only if the shutdown sequence
1131 if (!IS_ERR_OR_NULL(gmu->gxpd))
1132 pm_runtime_put_sync(gmu->gxpd);
1134 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
1136 pm_runtime_put_sync(gmu->dev);
1141 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
1143 msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace);
1144 msm_gem_kernel_put(gmu->debug.obj, gmu->aspace);
1145 msm_gem_kernel_put(gmu->icache.obj, gmu->aspace);
1146 msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace);
1147 msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace);
1148 msm_gem_kernel_put(gmu->log.obj, gmu->aspace);
1150 gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu);
1151 msm_gem_address_space_put(gmu->aspace);
1154 static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
1155 size_t size, u64 iova, const char *name)
1157 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1158 struct drm_device *dev = a6xx_gpu->base.base.dev;
1159 uint32_t flags = MSM_BO_WC;
1160 u64 range_start, range_end;
1163 size = PAGE_ALIGN(size);
1165 /* no fixed address - use GMU's uncached range */
1166 range_start = 0x60000000 + PAGE_SIZE; /* skip dummy page */
1167 range_end = 0x80000000;
1169 /* range for fixed address */
1171 range_end = iova + size;
1172 /* use IOMMU_PRIV for icache/dcache */
1173 flags |= MSM_BO_MAP_PRIV;
1176 bo->obj = msm_gem_new(dev, size, flags);
1177 if (IS_ERR(bo->obj))
1178 return PTR_ERR(bo->obj);
1180 ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova,
1181 range_start, range_end);
1183 drm_gem_object_put(bo->obj);
1187 bo->virt = msm_gem_get_vaddr(bo->obj);
1190 msm_gem_object_set_name(bo->obj, name);
1195 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
1197 struct msm_mmu *mmu;
1199 mmu = msm_iommu_new(gmu->dev, 0);
1203 return PTR_ERR(mmu);
1205 gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
1206 if (IS_ERR(gmu->aspace))
1207 return PTR_ERR(gmu->aspace);
1212 /* Return the 'arc-level' for the given frequency */
1213 static unsigned int a6xx_gmu_get_arc_level(struct device *dev,
1216 struct dev_pm_opp *opp;
1222 opp = dev_pm_opp_find_freq_exact(dev, freq, true);
1226 val = dev_pm_opp_get_level(opp);
1228 dev_pm_opp_put(opp);
1233 static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
1234 unsigned long *freqs, int freqs_count, const char *id)
1237 const u16 *pri, *sec;
1238 size_t pri_count, sec_count;
1240 pri = cmd_db_read_aux_data(id, &pri_count);
1242 return PTR_ERR(pri);
1244 * The data comes back as an array of unsigned shorts so adjust the
1251 sec = cmd_db_read_aux_data("mx.lvl", &sec_count);
1253 return PTR_ERR(sec);
1259 /* Construct a vote for each frequency */
1260 for (i = 0; i < freqs_count; i++) {
1261 u8 pindex = 0, sindex = 0;
1262 unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]);
1264 /* Get the primary index that matches the arc level */
1265 for (j = 0; j < pri_count; j++) {
1266 if (pri[j] >= level) {
1272 if (j == pri_count) {
1274 "Level %u not found in the RPMh list\n",
1276 DRM_DEV_ERROR(dev, "Available levels:\n");
1277 for (j = 0; j < pri_count; j++)
1278 DRM_DEV_ERROR(dev, " %u\n", pri[j]);
1284 * Look for a level in in the secondary list that matches. If
1285 * nothing fits, use the maximum non zero vote
1288 for (j = 0; j < sec_count; j++) {
1289 if (sec[j] >= level) {
1292 } else if (sec[j]) {
1297 /* Construct the vote */
1298 votes[i] = ((pri[pindex] & 0xffff) << 16) |
1299 (sindex << 8) | pindex;
1306 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
1307 * to construct the list of votes on the CPU and send it over. Query the RPMh
1308 * voltage levels and build the votes
1311 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
1313 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1314 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1315 struct msm_gpu *gpu = &adreno_gpu->base;
1318 /* Build the GX votes */
1319 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
1320 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl");
1322 /* Build the CX votes */
1323 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
1324 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
1329 static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
1332 int count = dev_pm_opp_get_opp_count(dev);
1333 struct dev_pm_opp *opp;
1335 unsigned long freq = 1;
1338 * The OPP table doesn't contain the "off" frequency level so we need to
1339 * add 1 to the table size to account for it
1342 if (WARN(count + 1 > size,
1343 "The GMU frequency table is being truncated\n"))
1346 /* Set the "off" frequency */
1349 for (i = 0; i < count; i++) {
1350 opp = dev_pm_opp_find_freq_ceil(dev, &freq);
1354 dev_pm_opp_put(opp);
1355 freqs[index++] = freq++;
1361 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
1363 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1364 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1365 struct msm_gpu *gpu = &adreno_gpu->base;
1370 * The GMU handles its own frequency switching so build a list of
1371 * available frequencies to send during initialization
1373 ret = devm_pm_opp_of_add_table(gmu->dev);
1375 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
1379 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev,
1380 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs));
1383 * The GMU also handles GPU frequency switching so build a list
1384 * from the GPU OPP table
1386 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
1387 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
1389 gmu->current_perf_index = gmu->nr_gpu_freqs - 1;
1391 /* Build the list of RPMh votes that we'll send to the GMU */
1392 return a6xx_gmu_rpmh_votes_init(gmu);
1395 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
1397 int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
1402 gmu->nr_clocks = ret;
1404 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
1405 gmu->nr_clocks, "gmu");
1407 gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks,
1408 gmu->nr_clocks, "hub");
1413 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
1417 struct resource *res = platform_get_resource_byname(pdev,
1418 IORESOURCE_MEM, name);
1421 DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
1422 return ERR_PTR(-EINVAL);
1425 ret = ioremap(res->start, resource_size(res));
1427 DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
1428 return ERR_PTR(-EINVAL);
1434 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
1435 const char *name, irq_handler_t handler)
1439 irq = platform_get_irq_byname(pdev, name);
1441 ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu);
1443 DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n",
1453 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
1455 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1456 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1457 struct platform_device *pdev = to_platform_device(gmu->dev);
1459 mutex_lock(&gmu->lock);
1460 if (!gmu->initialized) {
1461 mutex_unlock(&gmu->lock);
1465 gmu->initialized = false;
1467 mutex_unlock(&gmu->lock);
1469 pm_runtime_force_suspend(gmu->dev);
1472 * Since cxpd is a virt device, the devlink with gmu-dev will be removed
1473 * automatically when we do detach
1475 dev_pm_domain_detach(gmu->cxpd, false);
1477 if (!IS_ERR_OR_NULL(gmu->gxpd)) {
1478 pm_runtime_disable(gmu->gxpd);
1479 dev_pm_domain_detach(gmu->gxpd, false);
1483 if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
1488 if (!adreno_has_gmu_wrapper(adreno_gpu)) {
1489 a6xx_gmu_memory_free(gmu);
1491 free_irq(gmu->gmu_irq, gmu);
1492 free_irq(gmu->hfi_irq, gmu);
1495 /* Drop reference taken in of_find_device_by_node */
1496 put_device(gmu->dev);
1499 static int cxpd_notifier_cb(struct notifier_block *nb,
1500 unsigned long action, void *data)
1502 struct a6xx_gmu *gmu = container_of(nb, struct a6xx_gmu, pd_nb);
1504 if (action == GENPD_NOTIFY_OFF)
1505 complete_all(&gmu->pd_gate);
1510 int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
1512 struct platform_device *pdev = of_find_device_by_node(node);
1513 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1519 gmu->dev = &pdev->dev;
1521 of_dma_configure(gmu->dev, node, true);
1523 pm_runtime_enable(gmu->dev);
1525 /* Mark legacy for manual SPTPRAC control */
1528 /* Map the GMU registers */
1529 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
1530 if (IS_ERR(gmu->mmio)) {
1531 ret = PTR_ERR(gmu->mmio);
1535 gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx");
1536 if (IS_ERR(gmu->cxpd)) {
1537 ret = PTR_ERR(gmu->cxpd);
1541 if (!device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME)) {
1546 init_completion(&gmu->pd_gate);
1547 complete_all(&gmu->pd_gate);
1548 gmu->pd_nb.notifier_call = cxpd_notifier_cb;
1550 /* Get a link to the GX power domain to reset the GPU */
1551 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
1552 if (IS_ERR(gmu->gxpd)) {
1553 ret = PTR_ERR(gmu->gxpd);
1557 gmu->initialized = true;
1562 dev_pm_domain_detach(gmu->cxpd, false);
1567 /* Drop reference taken in of_find_device_by_node */
1568 put_device(gmu->dev);
1573 int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
1575 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1576 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1577 struct platform_device *pdev = of_find_device_by_node(node);
1583 gmu->dev = &pdev->dev;
1585 of_dma_configure(gmu->dev, node, true);
1587 /* Fow now, don't do anything fancy until we get our feet under us */
1588 gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
1590 pm_runtime_enable(gmu->dev);
1592 /* Get the list of clocks */
1593 ret = a6xx_gmu_clocks_probe(gmu);
1595 goto err_put_device;
1597 ret = a6xx_gmu_memory_probe(gmu);
1599 goto err_put_device;
1602 /* A660 now requires handling "prealloc requests" in GMU firmware
1603 * For now just hardcode allocations based on the known firmware.
1604 * note: there is no indication that these correspond to "dummy" or
1605 * "debug" regions, but this "guess" allows reusing these BOs which
1606 * are otherwise unused by a660.
1608 gmu->dummy.size = SZ_4K;
1609 if (adreno_is_a660_family(adreno_gpu)) {
1610 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7,
1611 0x60400000, "debug");
1615 gmu->dummy.size = SZ_8K;
1618 /* Allocate memory for the GMU dummy page */
1619 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size,
1620 0x60000000, "dummy");
1624 /* Note that a650 family also includes a660 family: */
1625 if (adreno_is_a650_family(adreno_gpu)) {
1626 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
1627 SZ_16M - SZ_16K, 0x04000, "icache");
1631 * NOTE: when porting legacy ("pre-650-family") GPUs you may be tempted to add a condition
1632 * to allocate icache/dcache here, as per downstream code flow, but it may not actually be
1633 * necessary. If you omit this step and you don't get random pagefaults, you are likely
1634 * good to go without this!
1636 } else if (adreno_is_a640_family(adreno_gpu)) {
1637 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
1638 SZ_256K - SZ_16K, 0x04000, "icache");
1642 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache,
1643 SZ_256K - SZ_16K, 0x44000, "dcache");
1646 } else if (adreno_is_a630_family(adreno_gpu)) {
1647 /* HFI v1, has sptprac */
1650 /* Allocate memory for the GMU debug region */
1651 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug");
1656 /* Allocate memory for the GMU log region */
1657 ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_16K, 0, "log");
1661 /* Allocate memory for for the HFI queues */
1662 ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi");
1666 /* Map the GMU registers */
1667 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
1668 if (IS_ERR(gmu->mmio)) {
1669 ret = PTR_ERR(gmu->mmio);
1673 if (adreno_is_a650_family(adreno_gpu)) {
1674 gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc");
1675 if (IS_ERR(gmu->rscc)) {
1680 gmu->rscc = gmu->mmio + 0x23000;
1683 /* Get the HFI and GMU interrupts */
1684 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
1685 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
1687 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) {
1692 gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx");
1693 if (IS_ERR(gmu->cxpd)) {
1694 ret = PTR_ERR(gmu->cxpd);
1698 if (!device_link_add(gmu->dev, gmu->cxpd,
1699 DL_FLAG_PM_RUNTIME)) {
1704 init_completion(&gmu->pd_gate);
1705 complete_all(&gmu->pd_gate);
1706 gmu->pd_nb.notifier_call = cxpd_notifier_cb;
1709 * Get a link to the GX power domain to reset the GPU in case of GMU
1712 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
1714 /* Get the power levels for the GMU and GPU */
1715 a6xx_gmu_pwrlevels_probe(gmu);
1717 /* Set up the HFI queues */
1720 /* Initialize RPMh */
1721 a6xx_gmu_rpmh_init(gmu);
1723 gmu->initialized = true;
1728 dev_pm_domain_detach(gmu->cxpd, false);
1732 if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
1734 free_irq(gmu->gmu_irq, gmu);
1735 free_irq(gmu->hfi_irq, gmu);
1738 a6xx_gmu_memory_free(gmu);
1740 /* Drop reference taken in of_find_device_by_node */
1741 put_device(gmu->dev);