/* MM HUB */
amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
/* GFX HUB */
- amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
+ /* This works because this interrupt is only
+ * enabled at init/resume and disabled in
+ * fini/suspend, so the overall state doesn't
+ * change over the course of suspend/resume.
+ */
+ if (!adev->in_s0ix)
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
break;
case AMDGPU_IRQ_STATE_ENABLE:
/* MM HUB */
amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
/* GFX HUB */
- amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
+ /* This works because this interrupt is only
+ * enabled at init/resume and disabled in
+ * fini/suspend, so the overall state doesn't
+ * change over the course of suspend/resume.
+ */
+ if (!adev->in_s0ix)
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
break;
default:
break;
static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
- bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+ bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
unsigned eng = ring->vm_inv_eng;
if (ring->is_mes_queue)
return;
- if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
+ if (ring->vm_hub == AMDGPU_GFXHUB_0)
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
else
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
+ adev->umc.retire_unit = 1;
adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
adev->umc.ras = &umc_v8_7_ras;
break;
default:
break;
}
- if (adev->umc.ras) {
- amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
-
- strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
- adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
- adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm;
-
- /* If don't define special ras_late_init function, use default ras_late_init */
- if (!adev->umc.ras->ras_block.ras_late_init)
- adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
-
- /* If not defined special ras_cb function, use default ras_cb */
- if (!adev->umc.ras->ras_block.ras_cb)
- adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
- }
}
-
static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
{
switch (adev->ip_versions[MMHUB_HWIP][0]) {
static int gmc_v10_0_early_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v10_0_set_mmhub_funcs(adev);
adev->gmc.private_aperture_end =
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
- r = amdgpu_gmc_ras_early_init(adev);
- if (r)
- return r;
-
return 0;
}
}
#endif
- /* In case the PCI BAR is larger than the actual amount of vram */
adev->gmc.visible_vram_size = adev->gmc.aper_size;
- if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
- adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
/* set the gart size */
if (amdgpu_gart_size == -1) {
amdgpu_vm_manager_init(adev);
+ r = amdgpu_gmc_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
}
amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
- r = adev->gfxhub.funcs->gart_enable(adev);
- if (r)
- return r;
+
+ if (!adev->in_s0ix) {
+ r = adev->gfxhub.funcs->gart_enable(adev);
+ if (r)
+ return r;
+ }
r = adev->mmhub.funcs->gart_enable(adev);
if (r)
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
- adev->gfxhub.funcs->set_fault_enable_default(adev, value);
+ if (!adev->in_s0ix)
+ adev->gfxhub.funcs->set_fault_enable_default(adev, value);
adev->mmhub.funcs->set_fault_enable_default(adev, value);
gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
- gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
+ if (!adev->in_s0ix)
+ gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
* harvestable groups in gc_utcl2 need to be programmed before any GFX block
* register setup within GMC, or else system hang when harvesting SA.
*/
- if (adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest)
+ if (!adev->in_s0ix && adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest)
adev->gfxhub.funcs->utcl2_harvest(adev);
r = gmc_v10_0_gart_enable(adev);
*/
static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
{
- adev->gfxhub.funcs->gart_disable(adev);
+ if (!adev->in_s0ix)
+ adev->gfxhub.funcs->gart_disable(adev);
adev->mmhub.funcs->gart_disable(adev);
}
return 0;
}
- amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
return 0;