* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
+ *
*/
#ifndef _AMDGPU_INTERNAL_H_
#include <assert.h>
#include <pthread.h>
+
+#include "libdrm_macros.h"
#include "xf86atomic.h"
#include "amdgpu.h"
#include "util_double_list.h"
#define AMDGPU_CS_MAX_RINGS 8
/* do not use below macro if b is not power of 2 aligned value */
-#define ROUND_DOWN(a,b) ((a) & (~((b)-1)))
-#define ROUND_UP(a,b) (((a)+((b)-1)) & (~((b)-1)))
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define ROUND_UP(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#define ROUND_DOWN(x, y) ((x) & ~__round_mask(x, y))
+
+#define AMDGPU_INVALID_VA_ADDRESS 0xffffffffffffffff
+#define AMDGPU_NULL_SUBMIT_SEQ 0
struct amdgpu_bo_va_hole {
struct list_head list;
struct amdgpu_bo_va_mgr {
/* the start virtual address */
uint64_t va_offset;
+ uint64_t va_max;
struct list_head va_holes;
pthread_mutex_t bo_va_mutex;
uint32_t va_alignment;
};
+struct amdgpu_va {
+ amdgpu_device_handle dev;
+ uint64_t address;
+ uint64_t size;
+ enum amdgpu_gpu_va_range range;
+ struct amdgpu_bo_va_mgr *vamgr;
+};
+
+struct amdgpu_asic_id {
+ uint32_t did;
+ uint32_t rid;
+ char *marketing_name;
+};
+
struct amdgpu_device {
atomic_t refcount;
int fd;
unsigned major_version;
unsigned minor_version;
+ /** Lookup table of asic device id, revision id and marketing name */
+ struct amdgpu_asic_id *asic_ids;
/** List of buffer handles. Protected by bo_table_mutex. */
struct util_hash_table *bo_handles;
/** List of buffer GEM flink names. Protected by bo_table_mutex. */
struct util_hash_table *bo_flink_names;
- /** List of buffer virtual memory ranges. Protected by bo_table_mutex. */
- struct util_hash_table *bo_vas;
/** This protects all hash tables. */
pthread_mutex_t bo_table_mutex;
- struct amdgpu_bo_va_mgr vamgr;
struct drm_amdgpu_info_device dev_info;
struct amdgpu_gpu_info info;
+ /** The global VA manager for the whole virtual address space */
+ struct amdgpu_bo_va_mgr vamgr;
+ /** The VA manager for the 32bit address space */
+ struct amdgpu_bo_va_mgr vamgr_32;
};
struct amdgpu_bo {
struct amdgpu_device *dev;
uint64_t alloc_size;
- uint64_t virtual_mc_base_address;
uint32_t handle;
uint32_t flink_name;
int cpu_map_count;
};
-/*
- * There are three mutexes.
- * To avoid deadlock, only hold the mutexes in this order:
- * sequence_mutex -> pendings_mutex -> pool_mutex.
-*/
+struct amdgpu_bo_list {
+ struct amdgpu_device *dev;
+
+ uint32_t handle;
+};
+
struct amdgpu_context {
struct amdgpu_device *dev;
/** Mutex for accessing fences and to maintain command submissions
- and pending lists in good sequence. */
+ in good sequence. */
pthread_mutex_t sequence_mutex;
- /** Buffer for user fences */
- struct amdgpu_ib *fence_ib;
- /** The newest expired fence for the ring of the ip blocks. */
- uint64_t expired_fences[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS];
- /** Mutex for accessing pendings list. */
- pthread_mutex_t pendings_mutex;
- /** Pending IBs. */
- struct list_head pendings[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS];
- /** Freed IBs not yet in pool */
- struct list_head freed;
- /** Mutex for accessing free ib pool. */
- pthread_mutex_t pool_mutex;
- /** Internal free IB pools. */
- struct list_head ib_pools[AMDGPU_CS_IB_SIZE_NUM];
/* context id*/
uint32_t id;
+ uint64_t last_seq[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS];
+ struct list_head sem_list[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS];
};
-struct amdgpu_ib {
- amdgpu_context_handle context;
- struct list_head list_node;
- amdgpu_bo_handle buf_handle;
- void *cpu;
- uint64_t virtual_mc_base_address;
- enum amdgpu_cs_ib_size ib_size;
- uint64_t cs_handle;
+/**
+ * Structure describing sw semaphore based on scheduler
+ *
+ */
+struct amdgpu_semaphore {
+ atomic_t refcount;
+ struct list_head list;
+ struct amdgpu_cs_fence signal_fence;
};
/**
* Functions.
*/
-void amdgpu_device_free_internal(amdgpu_device_handle dev);
+drm_private void amdgpu_bo_free_internal(amdgpu_bo_handle bo);
-void amdgpu_bo_free_internal(amdgpu_bo_handle bo);
+drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
+ uint64_t max, uint64_t alignment);
-void amdgpu_vamgr_init(struct amdgpu_device *dev);
+drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr);
-uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr,
- uint64_t size, uint64_t alignment);
+drm_private uint64_t
+amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
+ uint64_t alignment, uint64_t base_required);
-void amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va,
- uint64_t size);
+drm_private void
+amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size);
-int amdgpu_query_gpu_info_init(amdgpu_device_handle dev);
+drm_private int amdgpu_parse_asic_ids(struct amdgpu_asic_id **asic_ids);
-uint64_t amdgpu_cs_calculate_timeout(uint64_t timeout);
+drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev);
+
+drm_private uint64_t amdgpu_cs_calculate_timeout(uint64_t timeout);
/**
* Inline functions.
*dst = src;
}
-/**
- * Assignment between two amdgpu_device pointers with reference counting.
- *
- * Usage:
- * struct amdgpu_device *dst = ... , *src = ...;
- *
- * dst = src;
- * // No reference counting. Only use this when you need to move
- * // a reference from one pointer to another.
- *
- * amdgpu_device_reference(&dst, src);
- * // Reference counters are updated. dst is decremented and src is
- * // incremented. dst is freed if its reference counter is 0.
- */
-void amdgpu_device_reference(struct amdgpu_device **dst,
- struct amdgpu_device *src);
#endif