return false;
}
+bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ return adev->have_atomics_support;
+}
+
#ifndef CONFIG_HSA_AMD
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
{
uint32_t vmid, uint64_t gpu_addr,
uint32_t *ib_cmd, uint32_t ib_len);
void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
+bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd);
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
* 32 and 64-bit requests are possible and must be
* supported.
*/
- ret = pci_enable_atomic_ops_to_root(pdev,
- PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
- PCI_EXP_DEVCAP2_ATOMIC_COMP64);
- if (device_info->needs_pci_atomics && ret < 0) {
+ kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kgd);
+ if (device_info->needs_pci_atomics &&
+ !kfd->pci_atomic_requested) {
dev_info(kfd_device,
"skipped device %x:%x, PCI rejects atomics\n",
pdev->vendor, pdev->device);
kfree(kfd);
return NULL;
- } else if (!ret)
- kfd->pci_atomic_requested = true;
+ }
kfd->kgd = kgd;
kfd->device_info = device_info;