* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
+ *
*/
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
#include <errno.h>
#include <string.h>
uint32_t *version, uint32_t *feature)
{
struct drm_amdgpu_info request;
- struct drm_amdgpu_info_firmware firmware;
+ struct drm_amdgpu_info_firmware firmware = {};
int r;
memset(&request, 0, sizeof(request));
return 0;
}
-int amdgpu_query_gpu_info_init(amdgpu_device_handle dev)
+drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev)
{
int r, i;
dev->info.num_shader_engines = dev->dev_info.num_shader_engines;
dev->info.num_shader_arrays_per_engine =
dev->dev_info.num_shader_arrays_per_engine;
+ dev->info.vram_type = dev->dev_info.vram_type;
+ dev->info.vram_bit_width = dev->dev_info.vram_bit_width;
+ dev->info.ce_ram_size = dev->dev_info.ce_ram_size;
+ dev->info.vce_harvest_config = dev->dev_info.vce_harvest_config;
+ dev->info.pci_rev_id = dev->dev_info.pci_rev;
+
+ if (dev->info.family_id < AMDGPU_FAMILY_AI) {
+ for (i = 0; i < (int)dev->info.num_shader_engines; i++) {
+ unsigned instance = (i << AMDGPU_INFO_MMR_SE_INDEX_SHIFT) |
+ (AMDGPU_INFO_MMR_SH_INDEX_MASK <<
+ AMDGPU_INFO_MMR_SH_INDEX_SHIFT);
+
+ r = amdgpu_read_mm_registers(dev, 0x263d, 1, instance, 0,
+ &dev->info.backend_disable[i]);
+ if (r)
+ return r;
+ /* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */
+ dev->info.backend_disable[i] =
+ (dev->info.backend_disable[i] >> 16) & 0xff;
+
+ r = amdgpu_read_mm_registers(dev, 0xa0d4, 1, instance, 0,
+ &dev->info.pa_sc_raster_cfg[i]);
+ if (r)
+ return r;
+
+ if (dev->info.family_id >= AMDGPU_FAMILY_CI) {
+ r = amdgpu_read_mm_registers(dev, 0xa0d5, 1, instance, 0,
+ &dev->info.pa_sc_raster_cfg1[i]);
+ if (r)
+ return r;
+ }
+ }
+ }
- for (i = 0; i < (int)dev->info.num_shader_engines; i++) {
- unsigned instance = (i << AMDGPU_INFO_MMR_SE_INDEX_SHIFT) |
- (AMDGPU_INFO_MMR_SH_INDEX_MASK <<
- AMDGPU_INFO_MMR_SH_INDEX_SHIFT);
+ r = amdgpu_read_mm_registers(dev, 0x263e, 1, 0xffffffff, 0,
+ &dev->info.gb_addr_cfg);
+ if (r)
+ return r;
- r = amdgpu_read_mm_registers(dev, 0x263d, 1, instance, 0,
- &dev->info.backend_disable[i]);
+ if (dev->info.family_id < AMDGPU_FAMILY_AI) {
+ r = amdgpu_read_mm_registers(dev, 0x2644, 32, 0xffffffff, 0,
+ dev->info.gb_tile_mode);
if (r)
return r;
- /* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */
- dev->info.backend_disable[i] =
- (dev->info.backend_disable[i] >> 16) & 0xff;
- r = amdgpu_read_mm_registers(dev, 0xa0d4, 1, instance, 0,
- &dev->info.pa_sc_raster_cfg[i]);
- if (r)
- return r;
+ if (dev->info.family_id >= AMDGPU_FAMILY_CI) {
+ r = amdgpu_read_mm_registers(dev, 0x2664, 16, 0xffffffff, 0,
+ dev->info.gb_macro_tile_mode);
+ if (r)
+ return r;
+ }
- r = amdgpu_read_mm_registers(dev, 0xa0d5, 1, instance, 0,
- &dev->info.pa_sc_raster_cfg1[i]);
+ r = amdgpu_read_mm_registers(dev, 0x9d8, 1, 0xffffffff, 0,
+ &dev->info.mc_arb_ramcfg);
if (r)
return r;
}
- r = amdgpu_read_mm_registers(dev, 0x2644, 32, 0xffffffff, 0,
- dev->info.gb_tile_mode);
- if (r)
- return r;
-
- r = amdgpu_read_mm_registers(dev, 0x2664, 16, 0xffffffff, 0,
- dev->info.gb_macro_tile_mode);
- if (r)
- return r;
-
- r = amdgpu_read_mm_registers(dev, 0x263e, 1, 0xffffffff, 0,
- &dev->info.gb_addr_cfg);
- if (r)
- return r;
-
- r = amdgpu_read_mm_registers(dev, 0x9d8, 1, 0xffffffff, 0,
- &dev->info.mc_arb_ramcfg);
- if (r)
- return r;
-
dev->info.cu_active_number = dev->dev_info.cu_active_number;
dev->info.cu_ao_mask = dev->dev_info.cu_ao_mask;
memcpy(&dev->info.cu_bitmap[0][0], &dev->dev_info.cu_bitmap[0][0], sizeof(dev->info.cu_bitmap));
int amdgpu_query_gpu_info(amdgpu_device_handle dev,
struct amdgpu_gpu_info *info)
{
+ if ((dev == NULL) || (info == NULL))
+ return -EINVAL;
/* Get ASIC info*/
*info = dev->info;
uint32_t flags,
struct amdgpu_heap_info *info)
{
- struct drm_amdgpu_info_vram_gtt vram_gtt_info;
+ struct drm_amdgpu_info_vram_gtt vram_gtt_info = {};
int r;
r = amdgpu_query_info(dev, AMDGPU_INFO_VRAM_GTT,
return 0;
}
+
+int amdgpu_query_gds_info(amdgpu_device_handle dev,
+ struct amdgpu_gds_resource_info *gds_info)
+{
+ struct drm_amdgpu_info_gds gds_config = {};
+ int r;
+
+ if (gds_info == NULL)
+ return -EINVAL;
+
+ r = amdgpu_query_info(dev, AMDGPU_INFO_GDS_CONFIG,
+ sizeof(gds_config), &gds_config);
+ if (r)
+ return r;
+
+ gds_info->gds_gfx_partition_size = gds_config.gds_gfx_partition_size;
+ gds_info->compute_partition_size = gds_config.compute_partition_size;
+ gds_info->gds_total_size = gds_config.gds_total_size;
+ gds_info->gws_per_gfx_partition = gds_config.gws_per_gfx_partition;
+ gds_info->gws_per_compute_partition = gds_config.gws_per_compute_partition;
+ gds_info->oa_per_gfx_partition = gds_config.oa_per_gfx_partition;
+ gds_info->oa_per_compute_partition = gds_config.oa_per_compute_partition;
+
+ return 0;
+}