summaryrefslogtreecommitdiffstats
path: root/amdgpu
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 09:22:22 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 09:22:22 +0000
commit3f13df4d019cfcbef3f9909e3b993896d3c934e6 (patch)
tree6c515810bee8549d81e68c548a26a63909f8e716 /amdgpu
parentInitial commit. (diff)
downloadlibdrm-3f13df4d019cfcbef3f9909e3b993896d3c934e6.tar.xz
libdrm-3f13df4d019cfcbef3f9909e3b993896d3c934e6.zip
Adding upstream version 2.4.114.upstream/2.4.114upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'amdgpu')
-rw-r--r--amdgpu/.editorconfig13
-rw-r--r--amdgpu/Android.mk19
-rw-r--r--amdgpu/Makefile.sources14
-rw-r--r--amdgpu/amdgpu-symbols.txt77
-rw-r--r--amdgpu/amdgpu.h1876
-rw-r--r--amdgpu/amdgpu_asic_id.c161
-rw-r--r--amdgpu/amdgpu_bo.c791
-rw-r--r--amdgpu/amdgpu_cs.c972
-rw-r--r--amdgpu/amdgpu_device.c316
-rw-r--r--amdgpu/amdgpu_gpu_info.c348
-rw-r--r--amdgpu/amdgpu_internal.h176
-rw-r--r--amdgpu/amdgpu_vamgr.c298
-rw-r--r--amdgpu/amdgpu_vm.c50
-rw-r--r--amdgpu/handle_table.c72
-rw-r--r--amdgpu/handle_table.h41
-rw-r--r--amdgpu/libdrm_amdgpu.pc.in11
-rw-r--r--amdgpu/meson.build70
17 files changed, 5305 insertions, 0 deletions
diff --git a/amdgpu/.editorconfig b/amdgpu/.editorconfig
new file mode 100644
index 0000000..426273f
--- /dev/null
+++ b/amdgpu/.editorconfig
@@ -0,0 +1,13 @@
+# To use this config with your editor, follow the instructions at:
+# http://editorconfig.org
+
+[*]
+charset = utf-8
+indent_style = tab
+indent_size = 8
+tab_width = 8
+insert_final_newline = true
+
+[meson.build]
+indent_style = space
+indent_size = 2
diff --git a/amdgpu/Android.mk b/amdgpu/Android.mk
new file mode 100644
index 0000000..1f028d0
--- /dev/null
+++ b/amdgpu/Android.mk
@@ -0,0 +1,19 @@
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+# Import variables LIBDRM_AMDGPU_FILES, LIBDRM_AMDGPU_H_FILES
+include $(LOCAL_PATH)/Makefile.sources
+
+LOCAL_MODULE := libdrm_amdgpu
+
+LOCAL_SHARED_LIBRARIES := libdrm
+
+LOCAL_SRC_FILES := $(LIBDRM_AMDGPU_FILES)
+
+LOCAL_CFLAGS := \
+ -DAMDGPU_ASIC_ID_TABLE=\"/vendor/etc/hwdata/amdgpu.ids\"
+
+LOCAL_REQUIRED_MODULES := amdgpu.ids
+
+include $(LIBDRM_COMMON_MK)
+include $(BUILD_SHARED_LIBRARY)
diff --git a/amdgpu/Makefile.sources b/amdgpu/Makefile.sources
new file mode 100644
index 0000000..d6df324
--- /dev/null
+++ b/amdgpu/Makefile.sources
@@ -0,0 +1,14 @@
+LIBDRM_AMDGPU_FILES := \
+ amdgpu_asic_id.c \
+ amdgpu_bo.c \
+ amdgpu_cs.c \
+ amdgpu_device.c \
+ amdgpu_gpu_info.c \
+ amdgpu_internal.h \
+ amdgpu_vamgr.c \
+ amdgpu_vm.c \
+ handle_table.c \
+ handle_table.h
+
+LIBDRM_AMDGPU_H_FILES := \
+ amdgpu.h
diff --git a/amdgpu/amdgpu-symbols.txt b/amdgpu/amdgpu-symbols.txt
new file mode 100644
index 0000000..d41d9c2
--- /dev/null
+++ b/amdgpu/amdgpu-symbols.txt
@@ -0,0 +1,77 @@
+amdgpu_bo_alloc
+amdgpu_bo_cpu_map
+amdgpu_bo_cpu_unmap
+amdgpu_bo_export
+amdgpu_bo_free
+amdgpu_bo_import
+amdgpu_bo_inc_ref
+amdgpu_bo_list_create_raw
+amdgpu_bo_list_destroy_raw
+amdgpu_bo_list_create
+amdgpu_bo_list_destroy
+amdgpu_bo_list_update
+amdgpu_bo_query_info
+amdgpu_bo_set_metadata
+amdgpu_bo_va_op
+amdgpu_bo_va_op_raw
+amdgpu_bo_wait_for_idle
+amdgpu_create_bo_from_user_mem
+amdgpu_cs_chunk_fence_info_to_data
+amdgpu_cs_chunk_fence_to_dep
+amdgpu_cs_create_semaphore
+amdgpu_cs_create_syncobj
+amdgpu_cs_create_syncobj2
+amdgpu_cs_ctx_create
+amdgpu_cs_ctx_create2
+amdgpu_cs_ctx_free
+amdgpu_cs_ctx_override_priority
+amdgpu_cs_ctx_stable_pstate
+amdgpu_cs_destroy_semaphore
+amdgpu_cs_destroy_syncobj
+amdgpu_cs_export_syncobj
+amdgpu_cs_fence_to_handle
+amdgpu_cs_import_syncobj
+amdgpu_cs_query_fence_status
+amdgpu_cs_query_reset_state
+amdgpu_cs_query_reset_state2
+amdgpu_query_sw_info
+amdgpu_cs_signal_semaphore
+amdgpu_cs_submit
+amdgpu_cs_submit_raw
+amdgpu_cs_submit_raw2
+amdgpu_cs_syncobj_export_sync_file
+amdgpu_cs_syncobj_export_sync_file2
+amdgpu_cs_syncobj_import_sync_file
+amdgpu_cs_syncobj_import_sync_file2
+amdgpu_cs_syncobj_query
+amdgpu_cs_syncobj_query2
+amdgpu_cs_syncobj_reset
+amdgpu_cs_syncobj_signal
+amdgpu_cs_syncobj_timeline_signal
+amdgpu_cs_syncobj_timeline_wait
+amdgpu_cs_syncobj_transfer
+amdgpu_cs_syncobj_wait
+amdgpu_cs_wait_fences
+amdgpu_cs_wait_semaphore
+amdgpu_device_deinitialize
+amdgpu_device_get_fd
+amdgpu_device_initialize
+amdgpu_find_bo_by_cpu_mapping
+amdgpu_get_marketing_name
+amdgpu_query_buffer_size_alignment
+amdgpu_query_crtc_from_id
+amdgpu_query_firmware_version
+amdgpu_query_gds_info
+amdgpu_query_gpu_info
+amdgpu_query_heap_info
+amdgpu_query_hw_ip_count
+amdgpu_query_hw_ip_info
+amdgpu_query_info
+amdgpu_query_sensor_info
+amdgpu_query_video_caps_info
+amdgpu_read_mm_registers
+amdgpu_va_range_alloc
+amdgpu_va_range_free
+amdgpu_va_range_query
+amdgpu_vm_reserve_vmid
+amdgpu_vm_unreserve_vmid
diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
new file mode 100644
index 0000000..5ef2524
--- /dev/null
+++ b/amdgpu/amdgpu.h
@@ -0,0 +1,1876 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/**
+ * \file amdgpu.h
+ *
+ * Declare public libdrm_amdgpu API
+ *
+ * This file define API exposed by libdrm_amdgpu library.
+ * User wanted to use libdrm_amdgpu functionality must include
+ * this file.
+ *
+ */
+#ifndef _AMDGPU_H_
+#define _AMDGPU_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct drm_amdgpu_info_hw_ip;
+struct drm_amdgpu_bo_list_entry;
+
+/*--------------------------------------------------------------------------*/
+/* --------------------------- Defines ------------------------------------ */
+/*--------------------------------------------------------------------------*/
+
+/**
+ * Define max. number of Command Buffers (IB) which could be sent to the single
+ * hardware IP to accommodate CE/DE requirements
+ *
+ * \sa amdgpu_cs_ib_info
+*/
+#define AMDGPU_CS_MAX_IBS_PER_SUBMIT 4
+
+/**
+ * Special timeout value meaning that the timeout is infinite.
+ */
+#define AMDGPU_TIMEOUT_INFINITE 0xffffffffffffffffull
+
+/**
+ * Used in amdgpu_cs_query_fence_status(), meaning that the given timeout
+ * is absolute.
+ */
+#define AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE (1 << 0)
+
+/*--------------------------------------------------------------------------*/
+/* ----------------------------- Enums ------------------------------------ */
+/*--------------------------------------------------------------------------*/
+
+/**
+ * Enum describing possible handle types
+ *
+ * \sa amdgpu_bo_import, amdgpu_bo_export
+ *
+*/
+enum amdgpu_bo_handle_type {
+ /** GEM flink name (needs DRM authentication, used by DRI2) */
+ amdgpu_bo_handle_type_gem_flink_name = 0,
+
+ /** KMS handle which is used by all driver ioctls */
+ amdgpu_bo_handle_type_kms = 1,
+
+ /** DMA-buf fd handle */
+ amdgpu_bo_handle_type_dma_buf_fd = 2,
+
+ /** Deprecated in favour of and same behaviour as
+ * amdgpu_bo_handle_type_kms, use that instead of this
+ */
+ amdgpu_bo_handle_type_kms_noimport = 3,
+};
+
+/** Define known types of GPU VM VA ranges */
+enum amdgpu_gpu_va_range
+{
+ /** Allocate from "normal"/general range */
+ amdgpu_gpu_va_range_general = 0
+};
+
+enum amdgpu_sw_info {
+ amdgpu_sw_info_address32_hi = 0,
+};
+
+/*--------------------------------------------------------------------------*/
+/* -------------------------- Datatypes ----------------------------------- */
+/*--------------------------------------------------------------------------*/
+
+/**
+ * Define opaque pointer to context associated with fd.
+ * This context will be returned as the result of
+ * "initialize" function and should be pass as the first
+ * parameter to any API call
+ */
+typedef struct amdgpu_device *amdgpu_device_handle;
+
+/**
+ * Define GPU Context type as pointer to opaque structure
+ * Example of GPU Context is the "rendering" context associated
+ * with OpenGL context (glCreateContext)
+ */
+typedef struct amdgpu_context *amdgpu_context_handle;
+
+/**
+ * Define handle for amdgpu resources: buffer, GDS, etc.
+ */
+typedef struct amdgpu_bo *amdgpu_bo_handle;
+
+/**
+ * Define handle for list of BOs
+ */
+typedef struct amdgpu_bo_list *amdgpu_bo_list_handle;
+
+/**
+ * Define handle to be used to work with VA allocated ranges
+ */
+typedef struct amdgpu_va *amdgpu_va_handle;
+
+/**
+ * Define handle for semaphore
+ */
+typedef struct amdgpu_semaphore *amdgpu_semaphore_handle;
+
+/*--------------------------------------------------------------------------*/
+/* -------------------------- Structures ---------------------------------- */
+/*--------------------------------------------------------------------------*/
+
+/**
+ * Structure describing memory allocation request
+ *
+ * \sa amdgpu_bo_alloc()
+ *
+*/
+struct amdgpu_bo_alloc_request {
+ /** Allocation request. It must be aligned correctly. */
+ uint64_t alloc_size;
+
+ /**
+ * It may be required to have some specific alignment requirements
+ * for physical back-up storage (e.g. for displayable surface).
+ * If 0 there is no special alignment requirement
+ */
+ uint64_t phys_alignment;
+
+ /**
+ * UMD should specify where to allocate memory and how it
+ * will be accessed by the CPU.
+ */
+ uint32_t preferred_heap;
+
+ /** Additional flags passed on allocation */
+ uint64_t flags;
+};
+
+/**
+ * Special UMD specific information associated with buffer.
+ *
+ * It may be need to pass some buffer charactersitic as part
+ * of buffer sharing. Such information are defined UMD and
+ * opaque for libdrm_amdgpu as well for kernel driver.
+ *
+ * \sa amdgpu_bo_set_metadata(), amdgpu_bo_query_info,
+ * amdgpu_bo_import(), amdgpu_bo_export
+ *
+*/
+struct amdgpu_bo_metadata {
+ /** Special flag associated with surface */
+ uint64_t flags;
+
+ /**
+ * ASIC-specific tiling information (also used by DCE).
+ * The encoding is defined by the AMDGPU_TILING_* definitions.
+ */
+ uint64_t tiling_info;
+
+ /** Size of metadata associated with the buffer, in bytes. */
+ uint32_t size_metadata;
+
+ /** UMD specific metadata. Opaque for kernel */
+ uint32_t umd_metadata[64];
+};
+
+/**
+ * Structure describing allocated buffer. Client may need
+ * to query such information as part of 'sharing' buffers mechanism
+ *
+ * \sa amdgpu_bo_set_metadata(), amdgpu_bo_query_info(),
+ * amdgpu_bo_import(), amdgpu_bo_export()
+*/
+struct amdgpu_bo_info {
+ /** Allocated memory size */
+ uint64_t alloc_size;
+
+ /**
+ * It may be required to have some specific alignment requirements
+ * for physical back-up storage.
+ */
+ uint64_t phys_alignment;
+
+ /** Heap where to allocate memory. */
+ uint32_t preferred_heap;
+
+ /** Additional allocation flags. */
+ uint64_t alloc_flags;
+
+ /** Metadata associated with buffer if any. */
+ struct amdgpu_bo_metadata metadata;
+};
+
+/**
+ * Structure with information about "imported" buffer
+ *
+ * \sa amdgpu_bo_import()
+ *
+ */
+struct amdgpu_bo_import_result {
+ /** Handle of memory/buffer to use */
+ amdgpu_bo_handle buf_handle;
+
+ /** Buffer size */
+ uint64_t alloc_size;
+};
+
+/**
+ *
+ * Structure to describe GDS partitioning information.
+ * \note OA and GWS resources are asscoiated with GDS partition
+ *
+ * \sa amdgpu_gpu_resource_query_gds_info
+ *
+*/
+struct amdgpu_gds_resource_info {
+ uint32_t gds_gfx_partition_size;
+ uint32_t compute_partition_size;
+ uint32_t gds_total_size;
+ uint32_t gws_per_gfx_partition;
+ uint32_t gws_per_compute_partition;
+ uint32_t oa_per_gfx_partition;
+ uint32_t oa_per_compute_partition;
+};
+
+/**
+ * Structure describing CS fence
+ *
+ * \sa amdgpu_cs_query_fence_status(), amdgpu_cs_request, amdgpu_cs_submit()
+ *
+*/
+struct amdgpu_cs_fence {
+
+ /** In which context IB was sent to execution */
+ amdgpu_context_handle context;
+
+ /** To which HW IP type the fence belongs */
+ uint32_t ip_type;
+
+ /** IP instance index if there are several IPs of the same type. */
+ uint32_t ip_instance;
+
+ /** Ring index of the HW IP */
+ uint32_t ring;
+
+ /** Specify fence for which we need to check submission status.*/
+ uint64_t fence;
+};
+
+/**
+ * Structure describing IB
+ *
+ * \sa amdgpu_cs_request, amdgpu_cs_submit()
+ *
+*/
+struct amdgpu_cs_ib_info {
+ /** Special flags */
+ uint64_t flags;
+
+ /** Virtual MC address of the command buffer */
+ uint64_t ib_mc_address;
+
+ /**
+ * Size of Command Buffer to be submitted.
+ * - The size is in units of dwords (4 bytes).
+ * - Could be 0
+ */
+ uint32_t size;
+};
+
+/**
+ * Structure describing fence information
+ *
+ * \sa amdgpu_cs_request, amdgpu_cs_query_fence,
+ * amdgpu_cs_submit(), amdgpu_cs_query_fence_status()
+*/
+struct amdgpu_cs_fence_info {
+ /** buffer object for the fence */
+ amdgpu_bo_handle handle;
+
+ /** fence offset in the unit of sizeof(uint64_t) */
+ uint64_t offset;
+};
+
+/**
+ * Structure describing submission request
+ *
+ * \note We could have several IBs as packet. e.g. CE, CE, DE case for gfx
+ *
+ * \sa amdgpu_cs_submit()
+*/
+struct amdgpu_cs_request {
+ /** Specify flags with additional information */
+ uint64_t flags;
+
+ /** Specify HW IP block type to which to send the IB. */
+ unsigned ip_type;
+
+ /** IP instance index if there are several IPs of the same type. */
+ unsigned ip_instance;
+
+ /**
+ * Specify ring index of the IP. We could have several rings
+ * in the same IP. E.g. 0 for SDMA0 and 1 for SDMA1.
+ */
+ uint32_t ring;
+
+ /**
+ * List handle with resources used by this request.
+ */
+ amdgpu_bo_list_handle resources;
+
+ /**
+ * Number of dependencies this Command submission needs to
+ * wait for before starting execution.
+ */
+ uint32_t number_of_dependencies;
+
+ /**
+ * Array of dependencies which need to be met before
+ * execution can start.
+ */
+ struct amdgpu_cs_fence *dependencies;
+
+ /** Number of IBs to submit in the field ibs. */
+ uint32_t number_of_ibs;
+
+ /**
+ * IBs to submit. Those IBs will be submit together as single entity
+ */
+ struct amdgpu_cs_ib_info *ibs;
+
+ /**
+ * The returned sequence number for the command submission
+ */
+ uint64_t seq_no;
+
+ /**
+ * The fence information
+ */
+ struct amdgpu_cs_fence_info fence_info;
+};
+
+/**
+ * Structure which provide information about GPU VM MC Address space
+ * alignments requirements
+ *
+ * \sa amdgpu_query_buffer_size_alignment
+ */
+struct amdgpu_buffer_size_alignments {
+ /** Size alignment requirement for allocation in
+ * local memory */
+ uint64_t size_local;
+
+ /**
+ * Size alignment requirement for allocation in remote memory
+ */
+ uint64_t size_remote;
+};
+
+/**
+ * Structure which provide information about heap
+ *
+ * \sa amdgpu_query_heap_info()
+ *
+ */
+struct amdgpu_heap_info {
+ /** Theoretical max. available memory in the given heap */
+ uint64_t heap_size;
+
+ /**
+ * Number of bytes allocated in the heap. This includes all processes
+ * and private allocations in the kernel. It changes when new buffers
+ * are allocated, freed, and moved. It cannot be larger than
+ * heap_size.
+ */
+ uint64_t heap_usage;
+
+ /**
+ * Theoretical possible max. size of buffer which
+ * could be allocated in the given heap
+ */
+ uint64_t max_allocation;
+};
+
+/**
+ * Describe GPU h/w info needed for UMD correct initialization
+ *
+ * \sa amdgpu_query_gpu_info()
+*/
+struct amdgpu_gpu_info {
+ /** Asic id */
+ uint32_t asic_id;
+ /** Chip revision */
+ uint32_t chip_rev;
+ /** Chip external revision */
+ uint32_t chip_external_rev;
+ /** Family ID */
+ uint32_t family_id;
+ /** Special flags */
+ uint64_t ids_flags;
+ /** max engine clock*/
+ uint64_t max_engine_clk;
+ /** max memory clock */
+ uint64_t max_memory_clk;
+ /** number of shader engines */
+ uint32_t num_shader_engines;
+ /** number of shader arrays per engine */
+ uint32_t num_shader_arrays_per_engine;
+ /** Number of available good shader pipes */
+ uint32_t avail_quad_shader_pipes;
+ /** Max. number of shader pipes.(including good and bad pipes */
+ uint32_t max_quad_shader_pipes;
+ /** Number of parameter cache entries per shader quad pipe */
+ uint32_t cache_entries_per_quad_pipe;
+ /** Number of available graphics context */
+ uint32_t num_hw_gfx_contexts;
+ /** Number of render backend pipes */
+ uint32_t rb_pipes;
+ /** Enabled render backend pipe mask */
+ uint32_t enabled_rb_pipes_mask;
+ /** Frequency of GPU Counter */
+ uint32_t gpu_counter_freq;
+ /** CC_RB_BACKEND_DISABLE.BACKEND_DISABLE per SE */
+ uint32_t backend_disable[4];
+ /** Value of MC_ARB_RAMCFG register*/
+ uint32_t mc_arb_ramcfg;
+ /** Value of GB_ADDR_CONFIG */
+ uint32_t gb_addr_cfg;
+ /** Values of the GB_TILE_MODE0..31 registers */
+ uint32_t gb_tile_mode[32];
+ /** Values of GB_MACROTILE_MODE0..15 registers */
+ uint32_t gb_macro_tile_mode[16];
+ /** Value of PA_SC_RASTER_CONFIG register per SE */
+ uint32_t pa_sc_raster_cfg[4];
+ /** Value of PA_SC_RASTER_CONFIG_1 register per SE */
+ uint32_t pa_sc_raster_cfg1[4];
+ /* CU info */
+ uint32_t cu_active_number;
+ uint32_t cu_ao_mask;
+ uint32_t cu_bitmap[4][4];
+ /* video memory type info*/
+ uint32_t vram_type;
+ /* video memory bit width*/
+ uint32_t vram_bit_width;
+ /** constant engine ram size*/
+ uint32_t ce_ram_size;
+ /* vce harvesting instance */
+ uint32_t vce_harvest_config;
+ /* PCI revision ID */
+ uint32_t pci_rev_id;
+};
+
+
+/*--------------------------------------------------------------------------*/
+/*------------------------- Functions --------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+/*
+ * Initialization / Cleanup
+ *
+*/
+
+/**
+ *
+ * \param fd - \c [in] File descriptor for AMD GPU device
+ * received previously as the result of
+ * e.g. drmOpen() call.
+ * For legacy fd type, the DRI2/DRI3
+ * authentication should be done before
+ * calling this function.
+ * \param major_version - \c [out] Major version of library. It is assumed
+ * that adding new functionality will cause
+ * increase in major version
+ * \param minor_version - \c [out] Minor version of library
+ * \param device_handle - \c [out] Pointer to opaque context which should
+ * be passed as the first parameter on each
+ * API call
+ *
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ *
+ * \sa amdgpu_device_deinitialize()
+*/
+int amdgpu_device_initialize(int fd,
+ uint32_t *major_version,
+ uint32_t *minor_version,
+ amdgpu_device_handle *device_handle);
+
+/**
+ *
+ * When access to such library does not needed any more the special
+ * function must be call giving opportunity to clean up any
+ * resources if needed.
+ *
+ * \param device_handle - \c [in] Context associated with file
+ * descriptor for AMD GPU device
+ * received previously as the
+ * result e.g. of drmOpen() call.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_device_initialize()
+ *
+*/
+int amdgpu_device_deinitialize(amdgpu_device_handle device_handle);
+
+/**
+ *
+ * /param device_handle - \c [in] Device handle.
+ * See #amdgpu_device_initialize()
+ *
+ * \return Returns the drm fd used for operations on this
+ * device. This is still owned by the library and hence
+ * should not be closed. Guaranteed to be valid until
+ * #amdgpu_device_deinitialize gets called.
+ *
+*/
+int amdgpu_device_get_fd(amdgpu_device_handle device_handle);
+
+/*
+ * Memory Management
+ *
+*/
+
+/**
+ * Allocate memory to be used by UMD for GPU related operations
+ *
+ * \param dev - \c [in] Device handle.
+ * See #amdgpu_device_initialize()
+ * \param alloc_buffer - \c [in] Pointer to the structure describing an
+ * allocation request
+ * \param buf_handle - \c [out] Allocated buffer handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_free()
+*/
+int amdgpu_bo_alloc(amdgpu_device_handle dev,
+ struct amdgpu_bo_alloc_request *alloc_buffer,
+ amdgpu_bo_handle *buf_handle);
+
+/**
+ * Associate opaque data with buffer to be queried by another UMD
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param buf_handle - \c [in] Buffer handle
+ * \param info - \c [in] Metadata to associated with buffer
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+*/
+int amdgpu_bo_set_metadata(amdgpu_bo_handle buf_handle,
+ struct amdgpu_bo_metadata *info);
+
+/**
+ * Query buffer information including metadata previusly associated with
+ * buffer.
+ *
+ * \param dev - \c [in] Device handle.
+ * See #amdgpu_device_initialize()
+ * \param buf_handle - \c [in] Buffer handle
+ * \param info - \c [out] Structure describing buffer
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_set_metadata(), amdgpu_bo_alloc()
+*/
+int amdgpu_bo_query_info(amdgpu_bo_handle buf_handle,
+ struct amdgpu_bo_info *info);
+
+/**
+ * Allow others to get access to buffer
+ *
+ * \param dev - \c [in] Device handle.
+ * See #amdgpu_device_initialize()
+ * \param buf_handle - \c [in] Buffer handle
+ * \param type - \c [in] Type of handle requested
+ * \param shared_handle - \c [out] Special "shared" handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_import()
+ *
+*/
+int amdgpu_bo_export(amdgpu_bo_handle buf_handle,
+ enum amdgpu_bo_handle_type type,
+ uint32_t *shared_handle);
+
+/**
+ * Request access to "shared" buffer
+ *
+ * \param dev - \c [in] Device handle.
+ * See #amdgpu_device_initialize()
+ * \param type - \c [in] Type of handle requested
+ * \param shared_handle - \c [in] Shared handle received as result "import"
+ * operation
+ * \param output - \c [out] Pointer to structure with information
+ * about imported buffer
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \note Buffer must be "imported" only using new "fd" (different from
+ * one used by "exporter").
+ *
+ * \sa amdgpu_bo_export()
+ *
+*/
+int amdgpu_bo_import(amdgpu_device_handle dev,
+ enum amdgpu_bo_handle_type type,
+ uint32_t shared_handle,
+ struct amdgpu_bo_import_result *output);
+
+/**
+ * Request GPU access to user allocated memory e.g. via "malloc"
+ *
+ * \param dev - [in] Device handle. See #amdgpu_device_initialize()
+ * \param cpu - [in] CPU address of user allocated memory which we
+ * want to map to GPU address space (make GPU accessible)
+ * (This address must be correctly aligned).
+ * \param size - [in] Size of allocation (must be correctly aligned)
+ * \param buf_handle - [out] Buffer handle for the userptr memory
+ * resource on submission and be used in other operations.
+ *
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \note
+ * This call doesn't guarantee that such memory will be persistently
+ * "locked" / make non-pageable. The purpose of this call is to provide
+ * opportunity for GPU get access to this resource during submission.
+ *
+ * The maximum amount of memory which could be mapped in this call depends
+ * if overcommit is disabled or not. If overcommit is disabled than the max.
+ * amount of memory to be pinned will be limited by left "free" size in total
+ * amount of memory which could be locked simultaneously ("GART" size).
+ *
+ * Supported (theoretical) max. size of mapping is restricted only by
+ * "GART" size.
+ *
+ * It is responsibility of caller to correctly specify access rights
+ * on VA assignment.
+*/
+int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
+ void *cpu, uint64_t size,
+ amdgpu_bo_handle *buf_handle);
+
+/**
+ * Validate if the user memory comes from BO
+ *
+ * \param dev - [in] Device handle. See #amdgpu_device_initialize()
+ * \param cpu - [in] CPU address of user allocated memory which we
+ * want to map to GPU address space (make GPU accessible)
+ * (This address must be correctly aligned).
+ * \param size - [in] Size of allocation (must be correctly aligned)
+ * \param buf_handle - [out] Buffer handle for the userptr memory
+ * if the user memory is not from BO, the buf_handle will be NULL.
+ * \param offset_in_bo - [out] offset in this BO for this user memory
+ *
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
+ void *cpu,
+ uint64_t size,
+ amdgpu_bo_handle *buf_handle,
+ uint64_t *offset_in_bo);
+
+/**
+ * Free previously allocated memory
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param buf_handle - \c [in] Buffer handle to free
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \note In the case of memory shared between different applications all
+ * resources will be “physically” freed only all such applications
+ * will be terminated
+ * \note If is UMD responsibility to ‘free’ buffer only when there is no
+ * more GPU access
+ *
+ * \sa amdgpu_bo_set_metadata(), amdgpu_bo_alloc()
+ *
+*/
+int amdgpu_bo_free(amdgpu_bo_handle buf_handle);
+
+/**
+ * Increase the reference count of a buffer object
+ *
+ * \param bo - \c [in] Buffer object handle to increase the reference count
+ *
+ * \sa amdgpu_bo_alloc(), amdgpu_bo_free()
+ *
+*/
+void amdgpu_bo_inc_ref(amdgpu_bo_handle bo);
+
+/**
+ * Request CPU access to GPU accessible memory
+ *
+ * \param buf_handle - \c [in] Buffer handle
+ * \param cpu - \c [out] CPU address to be used for access
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_cpu_unmap()
+ *
+*/
+int amdgpu_bo_cpu_map(amdgpu_bo_handle buf_handle, void **cpu);
+
+/**
+ * Release CPU access to GPU memory
+ *
+ * \param buf_handle - \c [in] Buffer handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_cpu_map()
+ *
+*/
+int amdgpu_bo_cpu_unmap(amdgpu_bo_handle buf_handle);
+
+/**
+ * Wait until a buffer is not used by the device.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param buf_handle - \c [in] Buffer handle.
+ * \param timeout_ns - Timeout in nanoseconds.
+ * \param buffer_busy - 0 if buffer is idle, all GPU access was completed
+ * and no GPU access is scheduled.
+ * 1 GPU access is in fly or scheduled
+ *
+ * \return 0 - on success
+ * <0 - Negative POSIX Error code
+ */
+int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle,
+ uint64_t timeout_ns,
+ bool *buffer_busy);
+
+/**
+ * Creates a BO list handle for command submission.
+ *
+ * \param dev - \c [in] Device handle.
+ * See #amdgpu_device_initialize()
+ * \param number_of_buffers - \c [in] Number of BOs in the list
+ * \param buffers - \c [in] List of BO handles
+ * \param result - \c [out] Created BO list handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_list_destroy_raw(), amdgpu_cs_submit_raw2()
+*/
+int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
+ uint32_t number_of_buffers,
+ struct drm_amdgpu_bo_list_entry *buffers,
+ uint32_t *result);
+
+/**
+ * Destroys a BO list handle.
+ *
+ * \param bo_list - \c [in] BO list handle.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_list_create_raw(), amdgpu_cs_submit_raw2()
+*/
+int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev, uint32_t bo_list);
+
+/**
+ * Creates a BO list handle for command submission.
+ *
+ * \param dev - \c [in] Device handle.
+ * See #amdgpu_device_initialize()
+ * \param number_of_resources - \c [in] Number of BOs in the list
+ * \param resources - \c [in] List of BO handles
+ * \param resource_prios - \c [in] Optional priority for each handle
+ * \param result - \c [out] Created BO list handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_list_destroy()
+*/
+int amdgpu_bo_list_create(amdgpu_device_handle dev,
+ uint32_t number_of_resources,
+ amdgpu_bo_handle *resources,
+ uint8_t *resource_prios,
+ amdgpu_bo_list_handle *result);
+
+/**
+ * Destroys a BO list handle.
+ *
+ * \param handle - \c [in] BO list handle.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_list_create()
+*/
+int amdgpu_bo_list_destroy(amdgpu_bo_list_handle handle);
+
+/**
+ * Update resources for existing BO list
+ *
+ * \param handle - \c [in] BO list handle
+ * \param number_of_resources - \c [in] Number of BOs in the list
+ * \param resources - \c [in] List of BO handles
+ * \param resource_prios - \c [in] Optional priority for each handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_list_update()
+*/
+int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
+ uint32_t number_of_resources,
+ amdgpu_bo_handle *resources,
+ uint8_t *resource_prios);
+
+/*
+ * GPU Execution context
+ *
+*/
+
+/**
+ * Create GPU execution Context
+ *
+ * For the purpose of GPU Scheduler and GPU Robustness extensions it is
+ * necessary to have information/identify rendering/compute contexts.
+ * It also may be needed to associate some specific requirements with such
+ * contexts. Kernel driver will guarantee that submission from the same
+ * context will always be executed in order (first come, first serve).
+ *
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param priority - \c [in] Context creation flags. See AMDGPU_CTX_PRIORITY_*
+ * \param context - \c [out] GPU Context handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_cs_ctx_free()
+ *
+*/
+int amdgpu_cs_ctx_create2(amdgpu_device_handle dev,
+ uint32_t priority,
+ amdgpu_context_handle *context);
+/**
+ * Create GPU execution Context
+ *
+ * Refer to amdgpu_cs_ctx_create2 for full documentation. This call
+ * is missing the priority parameter.
+ *
+ * \sa amdgpu_cs_ctx_create2()
+ *
+*/
+int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
+ amdgpu_context_handle *context);
+
+/**
+ *
+ * Destroy GPU execution context when not needed any more
+ *
+ * \param context - \c [in] GPU Context handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_cs_ctx_create()
+ *
+*/
+int amdgpu_cs_ctx_free(amdgpu_context_handle context);
+
+/**
+ * Override the submission priority for the given context using a master fd.
+ *
+ * \param dev - \c [in] device handle
+ * \param context - \c [in] context handle for context id
+ * \param master_fd - \c [in] The master fd to authorize the override.
+ * \param priority - \c [in] The priority to assign to the context.
+ *
+ * \return 0 on success or a a negative Posix error code on failure.
+ */
+int amdgpu_cs_ctx_override_priority(amdgpu_device_handle dev,
+ amdgpu_context_handle context,
+ int master_fd,
+ unsigned priority);
+
+/**
+ * Set or query the stable power state for GPU profiling.
+ *
+ * \param dev - \c [in] device handle
+ * \param op - \c [in] AMDGPU_CTX_OP_{GET,SET}_STABLE_PSTATE
+ * \param flags - \c [in] AMDGPU_CTX_STABLE_PSTATE_*
+ * \param out_flags - \c [out] output current stable pstate
+ *
+ * \return 0 on success otherwise POSIX Error code.
+ */
+int amdgpu_cs_ctx_stable_pstate(amdgpu_context_handle context,
+ uint32_t op,
+ uint32_t flags,
+ uint32_t *out_flags);
+
+/**
+ * Query reset state for the specific GPU Context
+ *
+ * \param context - \c [in] GPU Context handle
+ * \param state - \c [out] One of AMDGPU_CTX_*_RESET
+ * \param hangs - \c [out] Number of hangs caused by the context.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_cs_ctx_create()
+ *
+*/
+int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
+ uint32_t *state, uint32_t *hangs);
+
+/**
+ * Query reset state for the specific GPU Context.
+ *
+ * \param context - \c [in] GPU Context handle
+ * \param flags - \c [out] A combination of AMDGPU_CTX_QUERY2_FLAGS_*
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_cs_ctx_create()
+ *
+*/
+int amdgpu_cs_query_reset_state2(amdgpu_context_handle context,
+ uint64_t *flags);
+
+/*
+ * Command Buffers Management
+ *
+*/
+
+/**
+ * Send request to submit command buffers to hardware.
+ *
+ * Kernel driver could use GPU Scheduler to make decision when physically
+ * sent this request to the hardware. Accordingly this request could be put
+ * in queue and sent for execution later. The only guarantee is that request
+ * from the same GPU context to the same ip:ip_instance:ring will be executed in
+ * order.
+ *
+ * The caller can specify the user fence buffer/location with the fence_info in the
+ * cs_request.The sequence number is returned via the 'seq_no' parameter
+ * in ibs_request structure.
+ *
+ *
+ * \param dev - \c [in] Device handle.
+ * See #amdgpu_device_initialize()
+ * \param context - \c [in] GPU Context
+ * \param flags - \c [in] Global submission flags
+ * \param ibs_request - \c [in/out] Pointer to submission requests.
+ * We could submit to the several
+ * engines/rings simulteniously as
+ * 'atomic' operation
+ * \param number_of_requests - \c [in] Number of submission requests
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \note It is required to pass correct resource list with buffer handles
+ * which will be accessible by command buffers from submission
+ * This will allow kernel driver to correctly implement "paging".
+ * Failure to do so will have unpredictable results.
+ *
+ * \sa amdgpu_command_buffer_alloc(), amdgpu_command_buffer_free(),
+ * amdgpu_cs_query_fence_status()
+ *
+*/
+int amdgpu_cs_submit(amdgpu_context_handle context,
+ uint64_t flags,
+ struct amdgpu_cs_request *ibs_request,
+ uint32_t number_of_requests);
+
+/**
+ * Query status of Command Buffer Submission
+ *
+ * \param fence - \c [in] Structure describing fence to query
+ * \param timeout_ns - \c [in] Timeout value to wait
+ * \param flags - \c [in] Flags for the query
+ * \param expired - \c [out] If fence expired or not.\n
+ * 0 – if fence is not expired\n
+ * !0 - otherwise
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \note If UMD wants only to check operation status and returned immediately
+ * then timeout value as 0 must be passed. In this case success will be
+ * returned in the case if submission was completed or timeout error
+ * code.
+ *
+ * \sa amdgpu_cs_submit()
+*/
+int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
+ uint64_t timeout_ns,
+ uint64_t flags,
+ uint32_t *expired);
+
+/**
+ * Wait for multiple fences
+ *
+ * \param fences - \c [in] The fence array to wait
+ * \param fence_count - \c [in] The fence count
+ * \param wait_all - \c [in] If true, wait all fences to be signaled,
+ * otherwise, wait at least one fence
+ * \param timeout_ns - \c [in] The timeout to wait, in nanoseconds
+ * \param status - \c [out] '1' for signaled, '0' for timeout
+ * \param first - \c [out] the index of the first signaled fence from @fences
+ *
+ * \return 0 on success
+ * <0 - Negative POSIX Error code
+ *
+ * \note Currently it supports only one amdgpu_device. All fences come from
+ * the same amdgpu_device with the same fd.
+*/
+int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences,
+ uint32_t fence_count,
+ bool wait_all,
+ uint64_t timeout_ns,
+ uint32_t *status, uint32_t *first);
+
+/*
+ * Query / Info API
+ *
+*/
+
+/**
+ * Query allocation size alignments
+ *
+ * UMD should query information about GPU VM MC size alignments requirements
+ * to be able correctly choose required allocation size and implement
+ * internal optimization if needed.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param info - \c [out] Pointer to structure to get size alignment
+ * requirements
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
+ struct amdgpu_buffer_size_alignments
+ *info);
+
+/**
+ * Query firmware versions
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param fw_type - \c [in] AMDGPU_INFO_FW_*
+ * \param ip_instance - \c [in] Index of the IP block of the same type.
+ * \param index - \c [in] Index of the engine. (for SDMA and MEC)
+ * \param version - \c [out] Pointer to to the "version" return value
+ * \param feature - \c [out] Pointer to to the "feature" return value
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_query_firmware_version(amdgpu_device_handle dev, unsigned fw_type,
+ unsigned ip_instance, unsigned index,
+ uint32_t *version, uint32_t *feature);
+
+/**
+ * Query the number of HW IP instances of a certain type.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param type - \c [in] Hardware IP block type = AMDGPU_HW_IP_*
+ * \param count - \c [out] Pointer to structure to get information
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+*/
+int amdgpu_query_hw_ip_count(amdgpu_device_handle dev, unsigned type,
+ uint32_t *count);
+
+/**
+ * Query engine information
+ *
+ * This query allows UMD to query information different engines and their
+ * capabilities.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param type - \c [in] Hardware IP block type = AMDGPU_HW_IP_*
+ * \param ip_instance - \c [in] Index of the IP block of the same type.
+ * \param info - \c [out] Pointer to structure to get information
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+*/
+int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
+ unsigned ip_instance,
+ struct drm_amdgpu_info_hw_ip *info);
+
+/**
+ * Query heap information
+ *
+ * This query allows UMD to query potentially available memory resources and
+ * adjust their logic if necessary.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param heap - \c [in] Heap type
+ * \param info - \c [in] Pointer to structure to get needed information
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_query_heap_info(amdgpu_device_handle dev, uint32_t heap,
+ uint32_t flags, struct amdgpu_heap_info *info);
+
+/**
+ * Get the CRTC ID from the mode object ID
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param id - \c [in] Mode object ID
+ * \param result - \c [in] Pointer to the CRTC ID
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_query_crtc_from_id(amdgpu_device_handle dev, unsigned id,
+ int32_t *result);
+
+/**
+ * Query GPU H/w Info
+ *
+ * Query hardware specific information
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param heap - \c [in] Heap type
+ * \param info - \c [in] Pointer to structure to get needed information
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_query_gpu_info(amdgpu_device_handle dev,
+ struct amdgpu_gpu_info *info);
+
+/**
+ * Query hardware or driver information.
+ *
+ * The return size is query-specific and depends on the "info_id" parameter.
+ * No more than "size" bytes is returned.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param info_id - \c [in] AMDGPU_INFO_*
+ * \param size - \c [in] Size of the returned value.
+ * \param value - \c [out] Pointer to the return value.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX error code
+ *
+*/
+int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id,
+ unsigned size, void *value);
+
+/**
+ * Query hardware or driver information.
+ *
+ * The return size is query-specific and depends on the "info_id" parameter.
+ * No more than "size" bytes is returned.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param info - \c [in] amdgpu_sw_info_*
+ * \param value - \c [out] Pointer to the return value.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX error code
+ *
+*/
+int amdgpu_query_sw_info(amdgpu_device_handle dev, enum amdgpu_sw_info info,
+ void *value);
+
+/**
+ * Query information about GDS
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param gds_info - \c [out] Pointer to structure to get GDS information
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_query_gds_info(amdgpu_device_handle dev,
+ struct amdgpu_gds_resource_info *gds_info);
+
+/**
+ * Query information about sensor.
+ *
+ * The return size is query-specific and depends on the "sensor_type"
+ * parameter. No more than "size" bytes is returned.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param sensor_type - \c [in] AMDGPU_INFO_SENSOR_*
+ * \param size - \c [in] Size of the returned value.
+ * \param value - \c [out] Pointer to the return value.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_query_sensor_info(amdgpu_device_handle dev, unsigned sensor_type,
+ unsigned size, void *value);
+
+/**
+ * Query information about video capabilities
+ *
+ * The return sizeof(struct drm_amdgpu_info_video_caps)
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param caps_type - \c [in] AMDGPU_INFO_VIDEO_CAPS_DECODE(ENCODE)
+ * \param size - \c [in] Size of the returned value.
+ * \param value - \c [out] Pointer to the return value.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_query_video_caps_info(amdgpu_device_handle dev, unsigned cap_type,
+ unsigned size, void *value);
+
+/**
+ * Read a set of consecutive memory-mapped registers.
+ * Not all registers are allowed to be read by userspace.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize(
+ * \param dword_offset - \c [in] Register offset in dwords
+ * \param count - \c [in] The number of registers to read starting
+ * from the offset
+ * \param instance - \c [in] GRBM_GFX_INDEX selector. It may have other
+ * uses. Set it to 0xffffffff if unsure.
+ * \param flags - \c [in] Flags with additional information.
+ * \param values - \c [out] The pointer to return values.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX error code
+ *
+*/
+int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset,
+ unsigned count, uint32_t instance, uint32_t flags,
+ uint32_t *values);
+
+/**
+ * Flag to request VA address range in the 32bit address space
+*/
+#define AMDGPU_VA_RANGE_32_BIT 0x1
+#define AMDGPU_VA_RANGE_HIGH 0x2
+#define AMDGPU_VA_RANGE_REPLAYABLE 0x4
+
+/**
+ * Allocate virtual address range
+ *
+ * \param dev - [in] Device handle. See #amdgpu_device_initialize()
+ * \param va_range_type - \c [in] Type of MC va range from which to allocate
+ * \param size - \c [in] Size of range. Size must be correctly* aligned.
+ * It is client responsibility to correctly aligned size based on the future
+ * usage of allocated range.
+ * \param va_base_alignment - \c [in] Overwrite base address alignment
+ * requirement for GPU VM MC virtual
+ * address assignment. Must be multiple of size alignments received as
+ * 'amdgpu_buffer_size_alignments'.
+ * If 0 use the default one.
+ * \param va_base_required - \c [in] Specified required va base address.
+ * If 0 then library choose available one.
+ * If !0 value will be passed and those value already "in use" then
+ * corresponding error status will be returned.
+ * \param va_base_allocated - \c [out] On return: Allocated VA base to be used
+ * by client.
+ * \param va_range_handle - \c [out] On return: Handle assigned to allocation
+ * \param flags - \c [in] flags for special VA range
+ *
+ * \return 0 on success\n
+ * >0 - AMD specific error code\n
+ * <0 - Negative POSIX Error code
+ *
+ * \notes \n
+ * It is client responsibility to correctly handle VA assignments and usage.
+ * Neither kernel driver nor libdrm_amdpgu are able to prevent and
+ * detect wrong va assignment.
+ *
+ * It is client responsibility to correctly handle multi-GPU cases and to pass
+ * the corresponding arrays of all devices handles where corresponding VA will
+ * be used.
+ *
+*/
+int amdgpu_va_range_alloc(amdgpu_device_handle dev,
+ enum amdgpu_gpu_va_range va_range_type,
+ uint64_t size,
+ uint64_t va_base_alignment,
+ uint64_t va_base_required,
+ uint64_t *va_base_allocated,
+ amdgpu_va_handle *va_range_handle,
+ uint64_t flags);
+
+/**
+ * Free previously allocated virtual address range
+ *
+ *
+ * \param va_range_handle - \c [in] Handle assigned to VA allocation
+ *
+ * \return 0 on success\n
+ * >0 - AMD specific error code\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_va_range_free(amdgpu_va_handle va_range_handle);
+
+/**
+* Query virtual address range
+*
+* UMD can query GPU VM range supported by each device
+* to initialize its own VAM accordingly.
+*
+* \param dev - [in] Device handle. See #amdgpu_device_initialize()
+* \param type - \c [in] Type of virtual address range
+* \param offset - \c [out] Start offset of virtual address range
+* \param size - \c [out] Size of virtual address range
+*
+* \return 0 on success\n
+* <0 - Negative POSIX Error code
+*
+*/
+
+int amdgpu_va_range_query(amdgpu_device_handle dev,
+ enum amdgpu_gpu_va_range type,
+ uint64_t *start,
+ uint64_t *end);
+
+/**
+ * VA mapping/unmapping for the buffer object
+ *
+ * \param bo - \c [in] BO handle
+ * \param offset - \c [in] Start offset to map
+ * \param size - \c [in] Size to map
+ * \param addr - \c [in] Start virtual address.
+ * \param flags - \c [in] Supported flags for mapping/unmapping
+ * \param ops - \c [in] AMDGPU_VA_OP_MAP or AMDGPU_VA_OP_UNMAP
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+
+int amdgpu_bo_va_op(amdgpu_bo_handle bo,
+ uint64_t offset,
+ uint64_t size,
+ uint64_t addr,
+ uint64_t flags,
+ uint32_t ops);
+
+/**
+ * VA mapping/unmapping for a buffer object or PRT region.
+ *
+ * This is not a simple drop-in extension for amdgpu_bo_va_op; instead, all
+ * parameters are treated "raw", i.e. size is not automatically aligned, and
+ * all flags must be specified explicitly.
+ *
+ * \param dev - \c [in] device handle
+ * \param bo - \c [in] BO handle (may be NULL)
+ * \param offset - \c [in] Start offset to map
+ * \param size - \c [in] Size to map
+ * \param addr - \c [in] Start virtual address.
+ * \param flags - \c [in] Supported flags for mapping/unmapping
+ * \param ops - \c [in] AMDGPU_VA_OP_MAP or AMDGPU_VA_OP_UNMAP
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+
+int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
+ amdgpu_bo_handle bo,
+ uint64_t offset,
+ uint64_t size,
+ uint64_t addr,
+ uint64_t flags,
+ uint32_t ops);
+
+/**
+ * create semaphore
+ *
+ * \param sem - \c [out] semaphore handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem);
+
+/**
+ * signal semaphore
+ *
+ * \param context - \c [in] GPU Context
+ * \param ip_type - \c [in] Hardware IP block type = AMDGPU_HW_IP_*
+ * \param ip_instance - \c [in] Index of the IP block of the same type
+ * \param ring - \c [in] Specify ring index of the IP
+ * \param sem - \c [in] semaphore handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx,
+ uint32_t ip_type,
+ uint32_t ip_instance,
+ uint32_t ring,
+ amdgpu_semaphore_handle sem);
+
+/**
+ * wait semaphore
+ *
+ * \param context - \c [in] GPU Context
+ * \param ip_type - \c [in] Hardware IP block type = AMDGPU_HW_IP_*
+ * \param ip_instance - \c [in] Index of the IP block of the same type
+ * \param ring - \c [in] Specify ring index of the IP
+ * \param sem - \c [in] semaphore handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
+ uint32_t ip_type,
+ uint32_t ip_instance,
+ uint32_t ring,
+ amdgpu_semaphore_handle sem);
+
+/**
+ * destroy semaphore
+ *
+ * \param sem - \c [in] semaphore handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem);
+
+/**
+ * Get the ASIC marketing name
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ *
+ * \return the constant string of the marketing name
+ * "NULL" means the ASIC is not found
+*/
+const char *amdgpu_get_marketing_name(amdgpu_device_handle dev);
+
+/**
+ * Create kernel sync object
+ *
+ * \param dev - \c [in] device handle
+ * \param flags - \c [in] flags that affect creation
+ * \param syncobj - \c [out] sync object handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_create_syncobj2(amdgpu_device_handle dev,
+ uint32_t flags,
+ uint32_t *syncobj);
+
+/**
+ * Create kernel sync object
+ *
+ * \param dev - \c [in] device handle
+ * \param syncobj - \c [out] sync object handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_create_syncobj(amdgpu_device_handle dev,
+ uint32_t *syncobj);
+/**
+ * Destroy kernel sync object
+ *
+ * \param dev - \c [in] device handle
+ * \param syncobj - \c [in] sync object handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_destroy_syncobj(amdgpu_device_handle dev,
+ uint32_t syncobj);
+
+/**
+ * Reset kernel sync objects to unsignalled state.
+ *
+ * \param dev - \c [in] device handle
+ * \param syncobjs - \c [in] array of sync object handles
+ * \param syncobj_count - \c [in] number of handles in syncobjs
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_syncobj_reset(amdgpu_device_handle dev,
+ const uint32_t *syncobjs, uint32_t syncobj_count);
+
+/**
+ * Signal kernel sync objects.
+ *
+ * \param dev - \c [in] device handle
+ * \param syncobjs - \c [in] array of sync object handles
+ * \param syncobj_count - \c [in] number of handles in syncobjs
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_syncobj_signal(amdgpu_device_handle dev,
+ const uint32_t *syncobjs, uint32_t syncobj_count);
+
+/**
+ * Signal kernel timeline sync objects.
+ *
+ * \param dev - \c [in] device handle
+ * \param syncobjs - \c [in] array of sync object handles
+ * \param points - \c [in] array of timeline points
+ * \param syncobj_count - \c [in] number of handles in syncobjs
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_syncobj_timeline_signal(amdgpu_device_handle dev,
+ const uint32_t *syncobjs,
+ uint64_t *points,
+ uint32_t syncobj_count);
+
+/**
+ * Wait for one or all sync objects to signal.
+ *
+ * \param dev - \c [in] self-explanatory
+ * \param handles - \c [in] array of sync object handles
+ * \param num_handles - \c [in] self-explanatory
+ * \param timeout_nsec - \c [in] self-explanatory
+ * \param flags - \c [in] a bitmask of DRM_SYNCOBJ_WAIT_FLAGS_*
+ * \param first_signaled - \c [in] self-explanatory
+ *
+ * \return 0 on success\n
+ * -ETIME - Timeout
+ * <0 - Negative POSIX Error code
+ *
+ */
+int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev,
+ uint32_t *handles, unsigned num_handles,
+ int64_t timeout_nsec, unsigned flags,
+ uint32_t *first_signaled);
+
+/**
+ * Wait for one or all sync objects on their points to signal.
+ *
+ * \param dev - \c [in] self-explanatory
+ * \param handles - \c [in] array of sync object handles
+ * \param points - \c [in] array of sync points to wait
+ * \param num_handles - \c [in] self-explanatory
+ * \param timeout_nsec - \c [in] self-explanatory
+ * \param flags - \c [in] a bitmask of DRM_SYNCOBJ_WAIT_FLAGS_*
+ * \param first_signaled - \c [in] self-explanatory
+ *
+ * \return 0 on success\n
+ * -ETIME - Timeout
+ * <0 - Negative POSIX Error code
+ *
+ */
+int amdgpu_cs_syncobj_timeline_wait(amdgpu_device_handle dev,
+ uint32_t *handles, uint64_t *points,
+ unsigned num_handles,
+ int64_t timeout_nsec, unsigned flags,
+ uint32_t *first_signaled);
+/**
+ * Query sync objects payloads.
+ *
+ * \param dev - \c [in] self-explanatory
+ * \param handles - \c [in] array of sync object handles
+ * \param points - \c [out] array of sync points returned, which presents
+ * syncobj payload.
+ * \param num_handles - \c [in] self-explanatory
+ *
+ * \return 0 on success\n
+ * -ETIME - Timeout
+ * <0 - Negative POSIX Error code
+ *
+ */
+int amdgpu_cs_syncobj_query(amdgpu_device_handle dev,
+ uint32_t *handles, uint64_t *points,
+ unsigned num_handles);
+/**
+ * Query sync objects last signaled or submitted point.
+ *
+ * \param dev - \c [in] self-explanatory
+ * \param handles - \c [in] array of sync object handles
+ * \param points - \c [out] array of sync points returned, which presents
+ * syncobj payload.
+ * \param num_handles - \c [in] self-explanatory
+ * \param flags - \c [in] a bitmask of DRM_SYNCOBJ_QUERY_FLAGS_*
+ *
+ * \return 0 on success\n
+ * -ETIME - Timeout
+ * <0 - Negative POSIX Error code
+ *
+ */
+int amdgpu_cs_syncobj_query2(amdgpu_device_handle dev,
+ uint32_t *handles, uint64_t *points,
+ unsigned num_handles, uint32_t flags);
+
+/**
+ * Export kernel sync object to shareable fd.
+ *
+ * \param dev - \c [in] device handle
+ * \param syncobj - \c [in] sync object handle
+ * \param shared_fd - \c [out] shared file descriptor.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_export_syncobj(amdgpu_device_handle dev,
+ uint32_t syncobj,
+ int *shared_fd);
+/**
+ * Import kernel sync object from shareable fd.
+ *
+ * \param dev - \c [in] device handle
+ * \param shared_fd - \c [in] shared file descriptor.
+ * \param syncobj - \c [out] sync object handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_import_syncobj(amdgpu_device_handle dev,
+ int shared_fd,
+ uint32_t *syncobj);
+
+/**
+ * Export kernel sync object to a sync_file.
+ *
+ * \param dev - \c [in] device handle
+ * \param syncobj - \c [in] sync object handle
+ * \param sync_file_fd - \c [out] sync_file file descriptor.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ */
+int amdgpu_cs_syncobj_export_sync_file(amdgpu_device_handle dev,
+ uint32_t syncobj,
+ int *sync_file_fd);
+
+/**
+ * Import kernel sync object from a sync_file.
+ *
+ * \param dev - \c [in] device handle
+ * \param syncobj - \c [in] sync object handle
+ * \param sync_file_fd - \c [in] sync_file file descriptor.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ */
+int amdgpu_cs_syncobj_import_sync_file(amdgpu_device_handle dev,
+ uint32_t syncobj,
+ int sync_file_fd);
+/**
+ * Export kernel timeline sync object to a sync_file.
+ *
+ * \param dev - \c [in] device handle
+ * \param syncobj - \c [in] sync object handle
+ * \param point - \c [in] timeline point
+ * \param flags - \c [in] flags
+ * \param sync_file_fd - \c [out] sync_file file descriptor.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ */
+int amdgpu_cs_syncobj_export_sync_file2(amdgpu_device_handle dev,
+ uint32_t syncobj,
+ uint64_t point,
+ uint32_t flags,
+ int *sync_file_fd);
+
+/**
+ * Import kernel timeline sync object from a sync_file.
+ *
+ * \param dev - \c [in] device handle
+ * \param syncobj - \c [in] sync object handle
+ * \param point - \c [in] timeline point
+ * \param sync_file_fd - \c [in] sync_file file descriptor.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ */
+int amdgpu_cs_syncobj_import_sync_file2(amdgpu_device_handle dev,
+ uint32_t syncobj,
+ uint64_t point,
+ int sync_file_fd);
+
+/**
+ * transfer between syncbojs.
+ *
+ * \param dev - \c [in] device handle
+ * \param dst_handle - \c [in] sync object handle
+ * \param dst_point - \c [in] timeline point, 0 presents dst is binary
+ * \param src_handle - \c [in] sync object handle
+ * \param src_point - \c [in] timeline point, 0 presents src is binary
+ * \param flags - \c [in] flags
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ */
+int amdgpu_cs_syncobj_transfer(amdgpu_device_handle dev,
+ uint32_t dst_handle,
+ uint64_t dst_point,
+ uint32_t src_handle,
+ uint64_t src_point,
+ uint32_t flags);
+
+/**
+ * Export an amdgpu fence as a handle (syncobj or fd).
+ *
+ * \param what AMDGPU_FENCE_TO_HANDLE_GET_{SYNCOBJ, FD}
+ * \param out_handle returned handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ */
+int amdgpu_cs_fence_to_handle(amdgpu_device_handle dev,
+ struct amdgpu_cs_fence *fence,
+ uint32_t what,
+ uint32_t *out_handle);
+
+/**
+ * Submit raw command submission to kernel
+ *
+ * \param dev - \c [in] device handle
+ * \param context - \c [in] context handle for context id
+ * \param bo_list_handle - \c [in] request bo list handle (0 for none)
+ * \param num_chunks - \c [in] number of CS chunks to submit
+ * \param chunks - \c [in] array of CS chunks
+ * \param seq_no - \c [out] output sequence number for submission.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ */
+struct drm_amdgpu_cs_chunk;
+struct drm_amdgpu_cs_chunk_dep;
+struct drm_amdgpu_cs_chunk_data;
+
+int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
+ amdgpu_context_handle context,
+ amdgpu_bo_list_handle bo_list_handle,
+ int num_chunks,
+ struct drm_amdgpu_cs_chunk *chunks,
+ uint64_t *seq_no);
+
+/**
+ * Submit raw command submission to the kernel with a raw BO list handle.
+ *
+ * \param dev - \c [in] device handle
+ * \param context - \c [in] context handle for context id
+ * \param bo_list_handle - \c [in] raw bo list handle (0 for none)
+ * \param num_chunks - \c [in] number of CS chunks to submit
+ * \param chunks - \c [in] array of CS chunks
+ * \param seq_no - \c [out] output sequence number for submission.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_list_create_raw(), amdgpu_bo_list_destroy_raw()
+ */
+int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
+ amdgpu_context_handle context,
+ uint32_t bo_list_handle,
+ int num_chunks,
+ struct drm_amdgpu_cs_chunk *chunks,
+ uint64_t *seq_no);
+
+void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
+ struct drm_amdgpu_cs_chunk_dep *dep);
+void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
+ struct drm_amdgpu_cs_chunk_data *data);
+
+/**
+ * Reserve VMID
+ * \param context - \c [in] GPU Context
+ * \param flags - \c [in] TBD
+ *
+ * \return 0 on success otherwise POSIX Error code
+*/
+int amdgpu_vm_reserve_vmid(amdgpu_device_handle dev, uint32_t flags);
+
+/**
+ * Free reserved VMID
+ * \param context - \c [in] GPU Context
+ * \param flags - \c [in] TBD
+ *
+ * \return 0 on success otherwise POSIX Error code
+*/
+int amdgpu_vm_unreserve_vmid(amdgpu_device_handle dev, uint32_t flags);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* #ifdef _AMDGPU_H_ */
diff --git a/amdgpu/amdgpu_asic_id.c b/amdgpu/amdgpu_asic_id.c
new file mode 100644
index 0000000..a5007ff
--- /dev/null
+++ b/amdgpu/amdgpu_asic_id.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright © 2017 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+
+#include "xf86drm.h"
+#include "amdgpu_drm.h"
+#include "amdgpu_internal.h"
+
+static int parse_one_line(struct amdgpu_device *dev, const char *line)
+{
+ char *buf, *saveptr;
+ char *s_did;
+ uint32_t did;
+ char *s_rid;
+ uint32_t rid;
+ char *s_name;
+ char *endptr;
+ int r = -EINVAL;
+
+ /* ignore empty line and commented line */
+ if (strlen(line) == 0 || line[0] == '#')
+ return -EAGAIN;
+
+ buf = strdup(line);
+ if (!buf)
+ return -ENOMEM;
+
+ /* device id */
+ s_did = strtok_r(buf, ",", &saveptr);
+ if (!s_did)
+ goto out;
+
+ did = strtol(s_did, &endptr, 16);
+ if (*endptr)
+ goto out;
+
+ if (did != dev->info.asic_id) {
+ r = -EAGAIN;
+ goto out;
+ }
+
+ /* revision id */
+ s_rid = strtok_r(NULL, ",", &saveptr);
+ if (!s_rid)
+ goto out;
+
+ rid = strtol(s_rid, &endptr, 16);
+ if (*endptr)
+ goto out;
+
+ if (rid != dev->info.pci_rev_id) {
+ r = -EAGAIN;
+ goto out;
+ }
+
+ /* marketing name */
+ s_name = strtok_r(NULL, ",", &saveptr);
+ if (!s_name)
+ goto out;
+
+ /* trim leading whitespaces or tabs */
+ while (isblank(*s_name))
+ s_name++;
+ if (strlen(s_name) == 0)
+ goto out;
+
+ dev->marketing_name = strdup(s_name);
+ if (dev->marketing_name)
+ r = 0;
+ else
+ r = -ENOMEM;
+
+out:
+ free(buf);
+
+ return r;
+}
+
+void amdgpu_parse_asic_ids(struct amdgpu_device *dev)
+{
+ FILE *fp;
+ char *line = NULL;
+ size_t len = 0;
+ ssize_t n;
+ int line_num = 1;
+ int r = 0;
+
+ fp = fopen(AMDGPU_ASIC_ID_TABLE, "r");
+ if (!fp) {
+ fprintf(stderr, "%s: %s\n", AMDGPU_ASIC_ID_TABLE,
+ strerror(errno));
+ return;
+ }
+
+ /* 1st valid line is file version */
+ while ((n = getline(&line, &len, fp)) != -1) {
+ /* trim trailing newline */
+ if (line[n - 1] == '\n')
+ line[n - 1] = '\0';
+
+ /* ignore empty line and commented line */
+ if (strlen(line) == 0 || line[0] == '#') {
+ line_num++;
+ continue;
+ }
+
+ drmMsg("%s version: %s\n", AMDGPU_ASIC_ID_TABLE, line);
+ break;
+ }
+
+ while ((n = getline(&line, &len, fp)) != -1) {
+ /* trim trailing newline */
+ if (line[n - 1] == '\n')
+ line[n - 1] = '\0';
+
+ r = parse_one_line(dev, line);
+ if (r != -EAGAIN)
+ break;
+
+ line_num++;
+ }
+
+ if (r == -EINVAL) {
+ fprintf(stderr, "Invalid format: %s: line %d: %s\n",
+ AMDGPU_ASIC_ID_TABLE, line_num, line);
+ } else if (r && r != -EAGAIN) {
+ fprintf(stderr, "%s: Cannot parse ASIC IDs: %s\n",
+ __func__, strerror(-r));
+ }
+
+ free(line);
+ fclose(fp);
+}
diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
new file mode 100644
index 0000000..f4e0435
--- /dev/null
+++ b/amdgpu/amdgpu_bo.c
@@ -0,0 +1,791 @@
+/*
+ * Copyright © 2014 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+
+#include "libdrm_macros.h"
+#include "xf86drm.h"
+#include "amdgpu_drm.h"
+#include "amdgpu_internal.h"
+#include "util_math.h"
+
+static int amdgpu_bo_create(amdgpu_device_handle dev,
+ uint64_t size,
+ uint32_t handle,
+ amdgpu_bo_handle *buf_handle)
+{
+ struct amdgpu_bo *bo;
+ int r;
+
+ bo = calloc(1, sizeof(struct amdgpu_bo));
+ if (!bo)
+ return -ENOMEM;
+
+ r = handle_table_insert(&dev->bo_handles, handle, bo);
+ if (r) {
+ free(bo);
+ return r;
+ }
+
+ atomic_set(&bo->refcount, 1);
+ bo->dev = dev;
+ bo->alloc_size = size;
+ bo->handle = handle;
+ pthread_mutex_init(&bo->cpu_access_mutex, NULL);
+
+ *buf_handle = bo;
+ return 0;
+}
+
+drm_public int amdgpu_bo_alloc(amdgpu_device_handle dev,
+ struct amdgpu_bo_alloc_request *alloc_buffer,
+ amdgpu_bo_handle *buf_handle)
+{
+ union drm_amdgpu_gem_create args;
+ int r;
+
+ memset(&args, 0, sizeof(args));
+ args.in.bo_size = alloc_buffer->alloc_size;
+ args.in.alignment = alloc_buffer->phys_alignment;
+
+ /* Set the placement. */
+ args.in.domains = alloc_buffer->preferred_heap;
+ args.in.domain_flags = alloc_buffer->flags;
+
+ /* Allocate the buffer with the preferred heap. */
+ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
+ &args, sizeof(args));
+ if (r)
+ goto out;
+
+ pthread_mutex_lock(&dev->bo_table_mutex);
+ r = amdgpu_bo_create(dev, alloc_buffer->alloc_size, args.out.handle,
+ buf_handle);
+ pthread_mutex_unlock(&dev->bo_table_mutex);
+ if (r) {
+ drmCloseBufferHandle(dev->fd, args.out.handle);
+ }
+
+out:
+ return r;
+}
+
+drm_public int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
+ struct amdgpu_bo_metadata *info)
+{
+ struct drm_amdgpu_gem_metadata args = {};
+
+ args.handle = bo->handle;
+ args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
+ args.data.flags = info->flags;
+ args.data.tiling_info = info->tiling_info;
+
+ if (info->size_metadata > sizeof(args.data.data))
+ return -EINVAL;
+
+ if (info->size_metadata) {
+ args.data.data_size_bytes = info->size_metadata;
+ memcpy(args.data.data, info->umd_metadata, info->size_metadata);
+ }
+
+ return drmCommandWriteRead(bo->dev->fd,
+ DRM_AMDGPU_GEM_METADATA,
+ &args, sizeof(args));
+}
+
+drm_public int amdgpu_bo_query_info(amdgpu_bo_handle bo,
+ struct amdgpu_bo_info *info)
+{
+ struct drm_amdgpu_gem_metadata metadata = {};
+ struct drm_amdgpu_gem_create_in bo_info = {};
+ struct drm_amdgpu_gem_op gem_op = {};
+ int r;
+
+ /* Validate the BO passed in */
+ if (!bo->handle)
+ return -EINVAL;
+
+ /* Query metadata. */
+ metadata.handle = bo->handle;
+ metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
+
+ r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA,
+ &metadata, sizeof(metadata));
+ if (r)
+ return r;
+
+ if (metadata.data.data_size_bytes >
+ sizeof(info->metadata.umd_metadata))
+ return -EINVAL;
+
+ /* Query buffer info. */
+ gem_op.handle = bo->handle;
+ gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
+ gem_op.value = (uintptr_t)&bo_info;
+
+ r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP,
+ &gem_op, sizeof(gem_op));
+ if (r)
+ return r;
+
+ memset(info, 0, sizeof(*info));
+ info->alloc_size = bo_info.bo_size;
+ info->phys_alignment = bo_info.alignment;
+ info->preferred_heap = bo_info.domains;
+ info->alloc_flags = bo_info.domain_flags;
+ info->metadata.flags = metadata.data.flags;
+ info->metadata.tiling_info = metadata.data.tiling_info;
+
+ info->metadata.size_metadata = metadata.data.data_size_bytes;
+ if (metadata.data.data_size_bytes > 0)
+ memcpy(info->metadata.umd_metadata, metadata.data.data,
+ metadata.data.data_size_bytes);
+
+ return 0;
+}
+
+static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
+{
+ struct drm_gem_flink flink;
+ int fd, dma_fd;
+ uint32_t handle;
+ int r;
+
+ fd = bo->dev->fd;
+ handle = bo->handle;
+ if (bo->flink_name)
+ return 0;
+
+
+ if (bo->dev->flink_fd != bo->dev->fd) {
+ r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
+ &dma_fd);
+ if (!r) {
+ r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
+ close(dma_fd);
+ }
+ if (r)
+ return r;
+ fd = bo->dev->flink_fd;
+ }
+ memset(&flink, 0, sizeof(flink));
+ flink.handle = handle;
+
+ r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
+ if (r)
+ return r;
+
+ bo->flink_name = flink.name;
+
+ if (bo->dev->flink_fd != bo->dev->fd)
+ drmCloseBufferHandle(bo->dev->flink_fd, handle);
+
+ pthread_mutex_lock(&bo->dev->bo_table_mutex);
+ r = handle_table_insert(&bo->dev->bo_flink_names, bo->flink_name, bo);
+ pthread_mutex_unlock(&bo->dev->bo_table_mutex);
+
+ return r;
+}
+
+drm_public int amdgpu_bo_export(amdgpu_bo_handle bo,
+ enum amdgpu_bo_handle_type type,
+ uint32_t *shared_handle)
+{
+ int r;
+
+ switch (type) {
+ case amdgpu_bo_handle_type_gem_flink_name:
+ r = amdgpu_bo_export_flink(bo);
+ if (r)
+ return r;
+
+ *shared_handle = bo->flink_name;
+ return 0;
+
+ case amdgpu_bo_handle_type_kms:
+ case amdgpu_bo_handle_type_kms_noimport:
+ *shared_handle = bo->handle;
+ return 0;
+
+ case amdgpu_bo_handle_type_dma_buf_fd:
+ return drmPrimeHandleToFD(bo->dev->fd, bo->handle,
+ DRM_CLOEXEC | DRM_RDWR,
+ (int*)shared_handle);
+ }
+ return -EINVAL;
+}
+
+drm_public int amdgpu_bo_import(amdgpu_device_handle dev,
+ enum amdgpu_bo_handle_type type,
+ uint32_t shared_handle,
+ struct amdgpu_bo_import_result *output)
+{
+ struct drm_gem_open open_arg = {};
+ struct amdgpu_bo *bo = NULL;
+ uint32_t handle = 0, flink_name = 0;
+ uint64_t alloc_size = 0;
+ int r = 0;
+ int dma_fd;
+ uint64_t dma_buf_size = 0;
+
+ /* We must maintain a list of pairs <handle, bo>, so that we always
+ * return the same amdgpu_bo instance for the same handle. */
+ pthread_mutex_lock(&dev->bo_table_mutex);
+
+ /* Convert a DMA buf handle to a KMS handle now. */
+ if (type == amdgpu_bo_handle_type_dma_buf_fd) {
+ off_t size;
+
+ /* Get a KMS handle. */
+ r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
+ if (r)
+ goto unlock;
+
+ /* Query the buffer size. */
+ size = lseek(shared_handle, 0, SEEK_END);
+ if (size == (off_t)-1) {
+ r = -errno;
+ goto free_bo_handle;
+ }
+ lseek(shared_handle, 0, SEEK_SET);
+
+ dma_buf_size = size;
+ shared_handle = handle;
+ }
+
+ /* If we have already created a buffer with this handle, find it. */
+ switch (type) {
+ case amdgpu_bo_handle_type_gem_flink_name:
+ bo = handle_table_lookup(&dev->bo_flink_names, shared_handle);
+ break;
+
+ case amdgpu_bo_handle_type_dma_buf_fd:
+ bo = handle_table_lookup(&dev->bo_handles, shared_handle);
+ break;
+
+ case amdgpu_bo_handle_type_kms:
+ case amdgpu_bo_handle_type_kms_noimport:
+ /* Importing a KMS handle in not allowed. */
+ r = -EPERM;
+ goto unlock;
+
+ default:
+ r = -EINVAL;
+ goto unlock;
+ }
+
+ if (bo) {
+ /* The buffer already exists, just bump the refcount. */
+ atomic_inc(&bo->refcount);
+ pthread_mutex_unlock(&dev->bo_table_mutex);
+
+ output->buf_handle = bo;
+ output->alloc_size = bo->alloc_size;
+ return 0;
+ }
+
+ /* Open the handle. */
+ switch (type) {
+ case amdgpu_bo_handle_type_gem_flink_name:
+ open_arg.name = shared_handle;
+ r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
+ if (r)
+ goto unlock;
+
+ flink_name = shared_handle;
+ handle = open_arg.handle;
+ alloc_size = open_arg.size;
+ if (dev->flink_fd != dev->fd) {
+ r = drmPrimeHandleToFD(dev->flink_fd, handle,
+ DRM_CLOEXEC, &dma_fd);
+ if (r)
+ goto free_bo_handle;
+ r = drmPrimeFDToHandle(dev->fd, dma_fd, &handle);
+ close(dma_fd);
+ if (r)
+ goto free_bo_handle;
+ r = drmCloseBufferHandle(dev->flink_fd,
+ open_arg.handle);
+ if (r)
+ goto free_bo_handle;
+ }
+ open_arg.handle = 0;
+ break;
+
+ case amdgpu_bo_handle_type_dma_buf_fd:
+ handle = shared_handle;
+ alloc_size = dma_buf_size;
+ break;
+
+ case amdgpu_bo_handle_type_kms:
+ case amdgpu_bo_handle_type_kms_noimport:
+ assert(0); /* unreachable */
+ }
+
+ /* Initialize it. */
+ r = amdgpu_bo_create(dev, alloc_size, handle, &bo);
+ if (r)
+ goto free_bo_handle;
+
+ if (flink_name) {
+ bo->flink_name = flink_name;
+ r = handle_table_insert(&dev->bo_flink_names, flink_name,
+ bo);
+ if (r)
+ goto free_bo_handle;
+
+ }
+
+ output->buf_handle = bo;
+ output->alloc_size = bo->alloc_size;
+ pthread_mutex_unlock(&dev->bo_table_mutex);
+ return 0;
+
+free_bo_handle:
+ if (flink_name && open_arg.handle)
+ drmCloseBufferHandle(dev->flink_fd, open_arg.handle);
+
+ if (bo)
+ amdgpu_bo_free(bo);
+ else
+ drmCloseBufferHandle(dev->fd, handle);
+unlock:
+ pthread_mutex_unlock(&dev->bo_table_mutex);
+ return r;
+}
+
+drm_public int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
+{
+ struct amdgpu_device *dev;
+ struct amdgpu_bo *bo = buf_handle;
+
+ assert(bo != NULL);
+ dev = bo->dev;
+ pthread_mutex_lock(&dev->bo_table_mutex);
+
+ if (update_references(&bo->refcount, NULL)) {
+ /* Remove the buffer from the hash tables. */
+ handle_table_remove(&dev->bo_handles, bo->handle);
+
+ if (bo->flink_name)
+ handle_table_remove(&dev->bo_flink_names,
+ bo->flink_name);
+
+ /* Release CPU access. */
+ if (bo->cpu_map_count > 0) {
+ bo->cpu_map_count = 1;
+ amdgpu_bo_cpu_unmap(bo);
+ }
+
+ drmCloseBufferHandle(dev->fd, bo->handle);
+ pthread_mutex_destroy(&bo->cpu_access_mutex);
+ free(bo);
+ }
+
+ pthread_mutex_unlock(&dev->bo_table_mutex);
+
+ return 0;
+}
+
+drm_public void amdgpu_bo_inc_ref(amdgpu_bo_handle bo)
+{
+ atomic_inc(&bo->refcount);
+}
+
+drm_public int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
+{
+ union drm_amdgpu_gem_mmap args;
+ void *ptr;
+ int r;
+
+ pthread_mutex_lock(&bo->cpu_access_mutex);
+
+ if (bo->cpu_ptr) {
+ /* already mapped */
+ assert(bo->cpu_map_count > 0);
+ bo->cpu_map_count++;
+ *cpu = bo->cpu_ptr;
+ pthread_mutex_unlock(&bo->cpu_access_mutex);
+ return 0;
+ }
+
+ assert(bo->cpu_map_count == 0);
+
+ memset(&args, 0, sizeof(args));
+
+ /* Query the buffer address (args.addr_ptr).
+ * The kernel driver ignores the offset and size parameters. */
+ args.in.handle = bo->handle;
+
+ r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
+ sizeof(args));
+ if (r) {
+ pthread_mutex_unlock(&bo->cpu_access_mutex);
+ return r;
+ }
+
+ /* Map the buffer. */
+ ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ bo->dev->fd, args.out.addr_ptr);
+ if (ptr == MAP_FAILED) {
+ pthread_mutex_unlock(&bo->cpu_access_mutex);
+ return -errno;
+ }
+
+ bo->cpu_ptr = ptr;
+ bo->cpu_map_count = 1;
+ pthread_mutex_unlock(&bo->cpu_access_mutex);
+
+ *cpu = ptr;
+ return 0;
+}
+
+drm_public int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
+{
+ int r;
+
+ pthread_mutex_lock(&bo->cpu_access_mutex);
+ assert(bo->cpu_map_count >= 0);
+
+ if (bo->cpu_map_count == 0) {
+ /* not mapped */
+ pthread_mutex_unlock(&bo->cpu_access_mutex);
+ return -EINVAL;
+ }
+
+ bo->cpu_map_count--;
+ if (bo->cpu_map_count > 0) {
+ /* mapped multiple times */
+ pthread_mutex_unlock(&bo->cpu_access_mutex);
+ return 0;
+ }
+
+ r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
+ bo->cpu_ptr = NULL;
+ pthread_mutex_unlock(&bo->cpu_access_mutex);
+ return r;
+}
+
+drm_public int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
+ struct amdgpu_buffer_size_alignments *info)
+{
+ info->size_local = dev->dev_info.pte_fragment_size;
+ info->size_remote = dev->dev_info.gart_page_size;
+ return 0;
+}
+
+drm_public int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
+ uint64_t timeout_ns,
+ bool *busy)
+{
+ union drm_amdgpu_gem_wait_idle args;
+ int r;
+
+ memset(&args, 0, sizeof(args));
+ args.in.handle = bo->handle;
+ args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
+
+ r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE,
+ &args, sizeof(args));
+
+ if (r == 0) {
+ *busy = args.out.status;
+ return 0;
+ } else {
+ fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
+ return r;
+ }
+}
+
+drm_public int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
+ void *cpu,
+ uint64_t size,
+ amdgpu_bo_handle *buf_handle,
+ uint64_t *offset_in_bo)
+{
+ struct amdgpu_bo *bo = NULL;
+ uint32_t i;
+ int r = 0;
+
+ if (cpu == NULL || size == 0)
+ return -EINVAL;
+
+ /*
+ * Workaround for a buggy application which tries to import previously
+ * exposed CPU pointers. If we find a real world use case we should
+ * improve that by asking the kernel for the right handle.
+ */
+ pthread_mutex_lock(&dev->bo_table_mutex);
+ for (i = 0; i < dev->bo_handles.max_key; i++) {
+ bo = handle_table_lookup(&dev->bo_handles, i);
+ if (!bo || !bo->cpu_ptr || size > bo->alloc_size)
+ continue;
+ if (cpu >= bo->cpu_ptr &&
+ cpu < (void*)((uintptr_t)bo->cpu_ptr + bo->alloc_size))
+ break;
+ }
+
+ if (i < dev->bo_handles.max_key) {
+ atomic_inc(&bo->refcount);
+ *buf_handle = bo;
+ *offset_in_bo = (uintptr_t)cpu - (uintptr_t)bo->cpu_ptr;
+ } else {
+ *buf_handle = NULL;
+ *offset_in_bo = 0;
+ r = -ENXIO;
+ }
+ pthread_mutex_unlock(&dev->bo_table_mutex);
+
+ return r;
+}
+
+drm_public int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
+ void *cpu,
+ uint64_t size,
+ amdgpu_bo_handle *buf_handle)
+{
+ int r;
+ struct drm_amdgpu_gem_userptr args;
+
+ args.addr = (uintptr_t)cpu;
+ args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER |
+ AMDGPU_GEM_USERPTR_VALIDATE;
+ args.size = size;
+ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
+ &args, sizeof(args));
+ if (r)
+ goto out;
+
+ pthread_mutex_lock(&dev->bo_table_mutex);
+ r = amdgpu_bo_create(dev, size, args.handle, buf_handle);
+ pthread_mutex_unlock(&dev->bo_table_mutex);
+ if (r) {
+ drmCloseBufferHandle(dev->fd, args.handle);
+ }
+
+out:
+ return r;
+}
+
+drm_public int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
+ uint32_t number_of_buffers,
+ struct drm_amdgpu_bo_list_entry *buffers,
+ uint32_t *result)
+{
+ union drm_amdgpu_bo_list args;
+ int r;
+
+ memset(&args, 0, sizeof(args));
+ args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
+ args.in.bo_number = number_of_buffers;
+ args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
+ args.in.bo_info_ptr = (uint64_t)(uintptr_t)buffers;
+
+ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
+ &args, sizeof(args));
+ if (!r)
+ *result = args.out.list_handle;
+ return r;
+}
+
+drm_public int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev,
+ uint32_t bo_list)
+{
+ union drm_amdgpu_bo_list args;
+
+ memset(&args, 0, sizeof(args));
+ args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
+ args.in.list_handle = bo_list;
+
+ return drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
+ &args, sizeof(args));
+}
+
+drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
+ uint32_t number_of_resources,
+ amdgpu_bo_handle *resources,
+ uint8_t *resource_prios,
+ amdgpu_bo_list_handle *result)
+{
+ struct drm_amdgpu_bo_list_entry *list;
+ union drm_amdgpu_bo_list args;
+ unsigned i;
+ int r;
+
+ if (!number_of_resources)
+ return -EINVAL;
+
+ /* overflow check for multiplication */
+ if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
+ return -EINVAL;
+
+ list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
+ if (!list)
+ return -ENOMEM;
+
+ *result = malloc(sizeof(struct amdgpu_bo_list));
+ if (!*result) {
+ free(list);
+ return -ENOMEM;
+ }
+
+ memset(&args, 0, sizeof(args));
+ args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
+ args.in.bo_number = number_of_resources;
+ args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
+ args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
+
+ for (i = 0; i < number_of_resources; i++) {
+ list[i].bo_handle = resources[i]->handle;
+ if (resource_prios)
+ list[i].bo_priority = resource_prios[i];
+ else
+ list[i].bo_priority = 0;
+ }
+
+ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
+ &args, sizeof(args));
+ free(list);
+ if (r) {
+ free(*result);
+ return r;
+ }
+
+ (*result)->dev = dev;
+ (*result)->handle = args.out.list_handle;
+ return 0;
+}
+
+drm_public int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
+{
+ union drm_amdgpu_bo_list args;
+ int r;
+
+ memset(&args, 0, sizeof(args));
+ args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
+ args.in.list_handle = list->handle;
+
+ r = drmCommandWriteRead(list->dev->fd, DRM_AMDGPU_BO_LIST,
+ &args, sizeof(args));
+
+ if (!r)
+ free(list);
+
+ return r;
+}
+
+drm_public int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
+ uint32_t number_of_resources,
+ amdgpu_bo_handle *resources,
+ uint8_t *resource_prios)
+{
+ struct drm_amdgpu_bo_list_entry *list;
+ union drm_amdgpu_bo_list args;
+ unsigned i;
+ int r;
+
+ if (!number_of_resources)
+ return -EINVAL;
+
+ /* overflow check for multiplication */
+ if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
+ return -EINVAL;
+
+ list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
+ if (!list)
+ return -ENOMEM;
+
+ args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
+ args.in.list_handle = handle->handle;
+ args.in.bo_number = number_of_resources;
+ args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
+ args.in.bo_info_ptr = (uintptr_t)list;
+
+ for (i = 0; i < number_of_resources; i++) {
+ list[i].bo_handle = resources[i]->handle;
+ if (resource_prios)
+ list[i].bo_priority = resource_prios[i];
+ else
+ list[i].bo_priority = 0;
+ }
+
+ r = drmCommandWriteRead(handle->dev->fd, DRM_AMDGPU_BO_LIST,
+ &args, sizeof(args));
+ free(list);
+ return r;
+}
+
+drm_public int amdgpu_bo_va_op(amdgpu_bo_handle bo,
+ uint64_t offset,
+ uint64_t size,
+ uint64_t addr,
+ uint64_t flags,
+ uint32_t ops)
+{
+ amdgpu_device_handle dev = bo->dev;
+
+ size = ALIGN(size, getpagesize());
+
+ return amdgpu_bo_va_op_raw(dev, bo, offset, size, addr,
+ AMDGPU_VM_PAGE_READABLE |
+ AMDGPU_VM_PAGE_WRITEABLE |
+ AMDGPU_VM_PAGE_EXECUTABLE, ops);
+}
+
+drm_public int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
+ amdgpu_bo_handle bo,
+ uint64_t offset,
+ uint64_t size,
+ uint64_t addr,
+ uint64_t flags,
+ uint32_t ops)
+{
+ struct drm_amdgpu_gem_va va;
+ int r;
+
+ if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP &&
+ ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR)
+ return -EINVAL;
+
+ memset(&va, 0, sizeof(va));
+ va.handle = bo ? bo->handle : 0;
+ va.operation = ops;
+ va.flags = flags;
+ va.va_address = addr;
+ va.offset_in_bo = offset;
+ va.map_size = size;
+
+ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
+
+ return r;
+}
diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
new file mode 100644
index 0000000..638fd7d
--- /dev/null
+++ b/amdgpu/amdgpu_cs.c
@@ -0,0 +1,972 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <pthread.h>
+#include <sched.h>
+#include <sys/ioctl.h>
+#if HAVE_ALLOCA_H
+# include <alloca.h>
+#endif
+
+#include "xf86drm.h"
+#include "amdgpu_drm.h"
+#include "amdgpu_internal.h"
+
+static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem);
+static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem);
+
+/**
+ * Create command submission context
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param priority - \c [in] Context creation flags. See AMDGPU_CTX_PRIORITY_*
+ * \param context - \c [out] GPU Context handle
+ *
+ * \return 0 on success otherwise POSIX Error code
+*/
+drm_public int amdgpu_cs_ctx_create2(amdgpu_device_handle dev,
+ uint32_t priority,
+ amdgpu_context_handle *context)
+{
+ struct amdgpu_context *gpu_context;
+ union drm_amdgpu_ctx args;
+ int i, j, k;
+ int r;
+
+ if (!dev || !context)
+ return -EINVAL;
+
+ gpu_context = calloc(1, sizeof(struct amdgpu_context));
+ if (!gpu_context)
+ return -ENOMEM;
+
+ gpu_context->dev = dev;
+
+ r = pthread_mutex_init(&gpu_context->sequence_mutex, NULL);
+ if (r)
+ goto error;
+
+ /* Create the context */
+ memset(&args, 0, sizeof(args));
+ args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
+ args.in.priority = priority;
+
+ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
+ if (r)
+ goto error;
+
+ gpu_context->id = args.out.alloc.ctx_id;
+ for (i = 0; i < AMDGPU_HW_IP_NUM; i++)
+ for (j = 0; j < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; j++)
+ for (k = 0; k < AMDGPU_CS_MAX_RINGS; k++)
+ list_inithead(&gpu_context->sem_list[i][j][k]);
+ *context = (amdgpu_context_handle)gpu_context;
+
+ return 0;
+
+error:
+ pthread_mutex_destroy(&gpu_context->sequence_mutex);
+ free(gpu_context);
+ return r;
+}
+
+drm_public int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
+ amdgpu_context_handle *context)
+{
+ return amdgpu_cs_ctx_create2(dev, AMDGPU_CTX_PRIORITY_NORMAL, context);
+}
+
+/**
+ * Release command submission context
+ *
+ * \param dev - \c [in] amdgpu device handle
+ * \param context - \c [in] amdgpu context handle
+ *
+ * \return 0 on success otherwise POSIX Error code
+*/
+drm_public int amdgpu_cs_ctx_free(amdgpu_context_handle context)
+{
+ union drm_amdgpu_ctx args;
+ int i, j, k;
+ int r;
+
+ if (!context)
+ return -EINVAL;
+
+ pthread_mutex_destroy(&context->sequence_mutex);
+
+ /* now deal with kernel side */
+ memset(&args, 0, sizeof(args));
+ args.in.op = AMDGPU_CTX_OP_FREE_CTX;
+ args.in.ctx_id = context->id;
+ r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
+ &args, sizeof(args));
+ for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
+ for (j = 0; j < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; j++) {
+ for (k = 0; k < AMDGPU_CS_MAX_RINGS; k++) {
+ amdgpu_semaphore_handle sem;
+ LIST_FOR_EACH_ENTRY(sem, &context->sem_list[i][j][k], list) {
+ list_del(&sem->list);
+ amdgpu_cs_reset_sem(sem);
+ amdgpu_cs_unreference_sem(sem);
+ }
+ }
+ }
+ }
+ free(context);
+
+ return r;
+}
+
+drm_public int amdgpu_cs_ctx_override_priority(amdgpu_device_handle dev,
+ amdgpu_context_handle context,
+ int master_fd,
+ unsigned priority)
+{
+ union drm_amdgpu_sched args;
+ int r;
+
+ if (!dev || !context || master_fd < 0)
+ return -EINVAL;
+
+ memset(&args, 0, sizeof(args));
+
+ args.in.op = AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE;
+ args.in.fd = dev->fd;
+ args.in.priority = priority;
+ args.in.ctx_id = context->id;
+
+ r = drmCommandWrite(master_fd, DRM_AMDGPU_SCHED, &args, sizeof(args));
+ if (r)
+ return r;
+
+ return 0;
+}
+
+drm_public int amdgpu_cs_ctx_stable_pstate(amdgpu_context_handle context,
+ uint32_t op,
+ uint32_t flags,
+ uint32_t *out_flags)
+{
+ union drm_amdgpu_ctx args;
+ int r;
+
+ if (!context)
+ return -EINVAL;
+
+ memset(&args, 0, sizeof(args));
+ args.in.op = op;
+ args.in.ctx_id = context->id;
+ args.in.flags = flags;
+ r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
+ &args, sizeof(args));
+ if (!r && out_flags)
+ *out_flags = args.out.pstate.flags;
+ return r;
+}
+
+drm_public int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
+ uint32_t *state, uint32_t *hangs)
+{
+ union drm_amdgpu_ctx args;
+ int r;
+
+ if (!context)
+ return -EINVAL;
+
+ memset(&args, 0, sizeof(args));
+ args.in.op = AMDGPU_CTX_OP_QUERY_STATE;
+ args.in.ctx_id = context->id;
+ r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
+ &args, sizeof(args));
+ if (!r) {
+ *state = args.out.state.reset_status;
+ *hangs = args.out.state.hangs;
+ }
+ return r;
+}
+
+drm_public int amdgpu_cs_query_reset_state2(amdgpu_context_handle context,
+ uint64_t *flags)
+{
+ union drm_amdgpu_ctx args;
+ int r;
+
+ if (!context)
+ return -EINVAL;
+
+ memset(&args, 0, sizeof(args));
+ args.in.op = AMDGPU_CTX_OP_QUERY_STATE2;
+ args.in.ctx_id = context->id;
+ r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
+ &args, sizeof(args));
+ if (!r)
+ *flags = args.out.state.flags;
+ return r;
+}
+
+/**
+ * Submit command to kernel DRM
+ * \param dev - \c [in] Device handle
+ * \param context - \c [in] GPU Context
+ * \param ibs_request - \c [in] Pointer to submission requests
+ * \param fence - \c [out] return fence for this submission
+ *
+ * \return 0 on success otherwise POSIX Error code
+ * \sa amdgpu_cs_submit()
+*/
+static int amdgpu_cs_submit_one(amdgpu_context_handle context,
+ struct amdgpu_cs_request *ibs_request)
+{
+ struct drm_amdgpu_cs_chunk *chunks;
+ struct drm_amdgpu_cs_chunk_data *chunk_data;
+ struct drm_amdgpu_cs_chunk_dep *dependencies = NULL;
+ struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
+ amdgpu_device_handle dev = context->dev;
+ struct list_head *sem_list;
+ amdgpu_semaphore_handle sem, tmp;
+ uint32_t i, size, num_chunks, bo_list_handle = 0, sem_count = 0;
+ uint64_t seq_no;
+ bool user_fence;
+ int r = 0;
+
+ if (ibs_request->ip_type >= AMDGPU_HW_IP_NUM)
+ return -EINVAL;
+ if (ibs_request->ring >= AMDGPU_CS_MAX_RINGS)
+ return -EINVAL;
+ if (ibs_request->number_of_ibs == 0) {
+ ibs_request->seq_no = AMDGPU_NULL_SUBMIT_SEQ;
+ return 0;
+ }
+ user_fence = (ibs_request->fence_info.handle != NULL);
+
+ size = ibs_request->number_of_ibs + (user_fence ? 2 : 1) + 1;
+
+ chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
+
+ size = ibs_request->number_of_ibs + (user_fence ? 1 : 0);
+
+ chunk_data = alloca(sizeof(struct drm_amdgpu_cs_chunk_data) * size);
+
+ if (ibs_request->resources)
+ bo_list_handle = ibs_request->resources->handle;
+ num_chunks = ibs_request->number_of_ibs;
+ /* IB chunks */
+ for (i = 0; i < ibs_request->number_of_ibs; i++) {
+ struct amdgpu_cs_ib_info *ib;
+ chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
+ chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
+ chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
+
+ ib = &ibs_request->ibs[i];
+
+ chunk_data[i].ib_data._pad = 0;
+ chunk_data[i].ib_data.va_start = ib->ib_mc_address;
+ chunk_data[i].ib_data.ib_bytes = ib->size * 4;
+ chunk_data[i].ib_data.ip_type = ibs_request->ip_type;
+ chunk_data[i].ib_data.ip_instance = ibs_request->ip_instance;
+ chunk_data[i].ib_data.ring = ibs_request->ring;
+ chunk_data[i].ib_data.flags = ib->flags;
+ }
+
+ pthread_mutex_lock(&context->sequence_mutex);
+
+ if (user_fence) {
+ i = num_chunks++;
+
+ /* fence chunk */
+ chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
+ chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
+ chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
+
+ /* fence bo handle */
+ chunk_data[i].fence_data.handle = ibs_request->fence_info.handle->handle;
+ /* offset */
+ chunk_data[i].fence_data.offset =
+ ibs_request->fence_info.offset * sizeof(uint64_t);
+ }
+
+ if (ibs_request->number_of_dependencies) {
+ dependencies = alloca(sizeof(struct drm_amdgpu_cs_chunk_dep) *
+ ibs_request->number_of_dependencies);
+ if (!dependencies) {
+ r = -ENOMEM;
+ goto error_unlock;
+ }
+
+ for (i = 0; i < ibs_request->number_of_dependencies; ++i) {
+ struct amdgpu_cs_fence *info = &ibs_request->dependencies[i];
+ struct drm_amdgpu_cs_chunk_dep *dep = &dependencies[i];
+ dep->ip_type = info->ip_type;
+ dep->ip_instance = info->ip_instance;
+ dep->ring = info->ring;
+ dep->ctx_id = info->context->id;
+ dep->handle = info->fence;
+ }
+
+ i = num_chunks++;
+
+ /* dependencies chunk */
+ chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
+ chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4
+ * ibs_request->number_of_dependencies;
+ chunks[i].chunk_data = (uint64_t)(uintptr_t)dependencies;
+ }
+
+ sem_list = &context->sem_list[ibs_request->ip_type][ibs_request->ip_instance][ibs_request->ring];
+ LIST_FOR_EACH_ENTRY(sem, sem_list, list)
+ sem_count++;
+ if (sem_count) {
+ sem_dependencies = alloca(sizeof(struct drm_amdgpu_cs_chunk_dep) * sem_count);
+ if (!sem_dependencies) {
+ r = -ENOMEM;
+ goto error_unlock;
+ }
+ sem_count = 0;
+ LIST_FOR_EACH_ENTRY_SAFE(sem, tmp, sem_list, list) {
+ struct amdgpu_cs_fence *info = &sem->signal_fence;
+ struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
+ dep->ip_type = info->ip_type;
+ dep->ip_instance = info->ip_instance;
+ dep->ring = info->ring;
+ dep->ctx_id = info->context->id;
+ dep->handle = info->fence;
+
+ list_del(&sem->list);
+ amdgpu_cs_reset_sem(sem);
+ amdgpu_cs_unreference_sem(sem);
+ }
+ i = num_chunks++;
+
+ /* dependencies chunk */
+ chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
+ chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
+ chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
+ }
+
+ r = amdgpu_cs_submit_raw2(dev, context, bo_list_handle, num_chunks,
+ chunks, &seq_no);
+ if (r)
+ goto error_unlock;
+
+ ibs_request->seq_no = seq_no;
+ context->last_seq[ibs_request->ip_type][ibs_request->ip_instance][ibs_request->ring] = ibs_request->seq_no;
+error_unlock:
+ pthread_mutex_unlock(&context->sequence_mutex);
+ return r;
+}
+
+drm_public int amdgpu_cs_submit(amdgpu_context_handle context,
+ uint64_t flags,
+ struct amdgpu_cs_request *ibs_request,
+ uint32_t number_of_requests)
+{
+ uint32_t i;
+ int r;
+
+ if (!context || !ibs_request)
+ return -EINVAL;
+
+ r = 0;
+ for (i = 0; i < number_of_requests; i++) {
+ r = amdgpu_cs_submit_one(context, ibs_request);
+ if (r)
+ break;
+ ibs_request++;
+ }
+
+ return r;
+}
+
+/**
+ * Calculate absolute timeout.
+ *
+ * \param timeout - \c [in] timeout in nanoseconds.
+ *
+ * \return absolute timeout in nanoseconds
+*/
+drm_private uint64_t amdgpu_cs_calculate_timeout(uint64_t timeout)
+{
+ int r;
+
+ if (timeout != AMDGPU_TIMEOUT_INFINITE) {
+ struct timespec current;
+ uint64_t current_ns;
+ r = clock_gettime(CLOCK_MONOTONIC, &current);
+ if (r) {
+ fprintf(stderr, "clock_gettime() returned error (%d)!", errno);
+ return AMDGPU_TIMEOUT_INFINITE;
+ }
+
+ current_ns = ((uint64_t)current.tv_sec) * 1000000000ull;
+ current_ns += current.tv_nsec;
+ timeout += current_ns;
+ if (timeout < current_ns)
+ timeout = AMDGPU_TIMEOUT_INFINITE;
+ }
+ return timeout;
+}
+
+static int amdgpu_ioctl_wait_cs(amdgpu_context_handle context,
+ unsigned ip,
+ unsigned ip_instance,
+ uint32_t ring,
+ uint64_t handle,
+ uint64_t timeout_ns,
+ uint64_t flags,
+ bool *busy)
+{
+ amdgpu_device_handle dev = context->dev;
+ union drm_amdgpu_wait_cs args;
+ int r;
+
+ memset(&args, 0, sizeof(args));
+ args.in.handle = handle;
+ args.in.ip_type = ip;
+ args.in.ip_instance = ip_instance;
+ args.in.ring = ring;
+ args.in.ctx_id = context->id;
+
+ if (flags & AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE)
+ args.in.timeout = timeout_ns;
+ else
+ args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
+
+ r = drmIoctl(dev->fd, DRM_IOCTL_AMDGPU_WAIT_CS, &args);
+ if (r)
+ return -errno;
+
+ *busy = args.out.status;
+ return 0;
+}
+
+drm_public int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
+ uint64_t timeout_ns,
+ uint64_t flags,
+ uint32_t *expired)
+{
+ bool busy = true;
+ int r;
+
+ if (!fence || !expired || !fence->context)
+ return -EINVAL;
+ if (fence->ip_type >= AMDGPU_HW_IP_NUM)
+ return -EINVAL;
+ if (fence->ring >= AMDGPU_CS_MAX_RINGS)
+ return -EINVAL;
+ if (fence->fence == AMDGPU_NULL_SUBMIT_SEQ) {
+ *expired = true;
+ return 0;
+ }
+
+ *expired = false;
+
+ r = amdgpu_ioctl_wait_cs(fence->context, fence->ip_type,
+ fence->ip_instance, fence->ring,
+ fence->fence, timeout_ns, flags, &busy);
+
+ if (!r && !busy)
+ *expired = true;
+
+ return r;
+}
+
+static int amdgpu_ioctl_wait_fences(struct amdgpu_cs_fence *fences,
+ uint32_t fence_count,
+ bool wait_all,
+ uint64_t timeout_ns,
+ uint32_t *status,
+ uint32_t *first)
+{
+ struct drm_amdgpu_fence *drm_fences;
+ amdgpu_device_handle dev = fences[0].context->dev;
+ union drm_amdgpu_wait_fences args;
+ int r;
+ uint32_t i;
+
+ drm_fences = alloca(sizeof(struct drm_amdgpu_fence) * fence_count);
+ for (i = 0; i < fence_count; i++) {
+ drm_fences[i].ctx_id = fences[i].context->id;
+ drm_fences[i].ip_type = fences[i].ip_type;
+ drm_fences[i].ip_instance = fences[i].ip_instance;
+ drm_fences[i].ring = fences[i].ring;
+ drm_fences[i].seq_no = fences[i].fence;
+ }
+
+ memset(&args, 0, sizeof(args));
+ args.in.fences = (uint64_t)(uintptr_t)drm_fences;
+ args.in.fence_count = fence_count;
+ args.in.wait_all = wait_all;
+ args.in.timeout_ns = amdgpu_cs_calculate_timeout(timeout_ns);
+
+ r = drmIoctl(dev->fd, DRM_IOCTL_AMDGPU_WAIT_FENCES, &args);
+ if (r)
+ return -errno;
+
+ *status = args.out.status;
+
+ if (first)
+ *first = args.out.first_signaled;
+
+ return 0;
+}
+
+drm_public int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences,
+ uint32_t fence_count,
+ bool wait_all,
+ uint64_t timeout_ns,
+ uint32_t *status,
+ uint32_t *first)
+{
+ uint32_t i;
+
+ /* Sanity check */
+ if (!fences || !status || !fence_count)
+ return -EINVAL;
+
+ for (i = 0; i < fence_count; i++) {
+ if (NULL == fences[i].context)
+ return -EINVAL;
+ if (fences[i].ip_type >= AMDGPU_HW_IP_NUM)
+ return -EINVAL;
+ if (fences[i].ring >= AMDGPU_CS_MAX_RINGS)
+ return -EINVAL;
+ }
+
+ *status = 0;
+
+ return amdgpu_ioctl_wait_fences(fences, fence_count, wait_all,
+ timeout_ns, status, first);
+}
+
+drm_public int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
+{
+ struct amdgpu_semaphore *gpu_semaphore;
+
+ if (!sem)
+ return -EINVAL;
+
+ gpu_semaphore = calloc(1, sizeof(struct amdgpu_semaphore));
+ if (!gpu_semaphore)
+ return -ENOMEM;
+
+ atomic_set(&gpu_semaphore->refcount, 1);
+ *sem = gpu_semaphore;
+
+ return 0;
+}
+
+drm_public int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx,
+ uint32_t ip_type,
+ uint32_t ip_instance,
+ uint32_t ring,
+ amdgpu_semaphore_handle sem)
+{
+ if (!ctx || !sem)
+ return -EINVAL;
+ if (ip_type >= AMDGPU_HW_IP_NUM)
+ return -EINVAL;
+ if (ring >= AMDGPU_CS_MAX_RINGS)
+ return -EINVAL;
+ /* sem has been signaled */
+ if (sem->signal_fence.context)
+ return -EINVAL;
+ pthread_mutex_lock(&ctx->sequence_mutex);
+ sem->signal_fence.context = ctx;
+ sem->signal_fence.ip_type = ip_type;
+ sem->signal_fence.ip_instance = ip_instance;
+ sem->signal_fence.ring = ring;
+ sem->signal_fence.fence = ctx->last_seq[ip_type][ip_instance][ring];
+ update_references(NULL, &sem->refcount);
+ pthread_mutex_unlock(&ctx->sequence_mutex);
+ return 0;
+}
+
+drm_public int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
+ uint32_t ip_type,
+ uint32_t ip_instance,
+ uint32_t ring,
+ amdgpu_semaphore_handle sem)
+{
+ if (!ctx || !sem)
+ return -EINVAL;
+ if (ip_type >= AMDGPU_HW_IP_NUM)
+ return -EINVAL;
+ if (ring >= AMDGPU_CS_MAX_RINGS)
+ return -EINVAL;
+ /* must signal first */
+ if (!sem->signal_fence.context)
+ return -EINVAL;
+
+ pthread_mutex_lock(&ctx->sequence_mutex);
+ list_add(&sem->list, &ctx->sem_list[ip_type][ip_instance][ring]);
+ pthread_mutex_unlock(&ctx->sequence_mutex);
+ return 0;
+}
+
+static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem)
+{
+ if (!sem || !sem->signal_fence.context)
+ return -EINVAL;
+
+ sem->signal_fence.context = NULL;
+ sem->signal_fence.ip_type = 0;
+ sem->signal_fence.ip_instance = 0;
+ sem->signal_fence.ring = 0;
+ sem->signal_fence.fence = 0;
+
+ return 0;
+}
+
+static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem)
+{
+ if (!sem)
+ return -EINVAL;
+
+ if (update_references(&sem->refcount, NULL))
+ free(sem);
+ return 0;
+}
+
+drm_public int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem)
+{
+ return amdgpu_cs_unreference_sem(sem);
+}
+
+drm_public int amdgpu_cs_create_syncobj2(amdgpu_device_handle dev,
+ uint32_t flags,
+ uint32_t *handle)
+{
+ if (NULL == dev)
+ return -EINVAL;
+
+ return drmSyncobjCreate(dev->fd, flags, handle);
+}
+
+drm_public int amdgpu_cs_create_syncobj(amdgpu_device_handle dev,
+ uint32_t *handle)
+{
+ if (NULL == dev)
+ return -EINVAL;
+
+ return drmSyncobjCreate(dev->fd, 0, handle);
+}
+
+drm_public int amdgpu_cs_destroy_syncobj(amdgpu_device_handle dev,
+ uint32_t handle)
+{
+ if (NULL == dev)
+ return -EINVAL;
+
+ return drmSyncobjDestroy(dev->fd, handle);
+}
+
+drm_public int amdgpu_cs_syncobj_reset(amdgpu_device_handle dev,
+ const uint32_t *syncobjs,
+ uint32_t syncobj_count)
+{
+ if (NULL == dev)
+ return -EINVAL;
+
+ return drmSyncobjReset(dev->fd, syncobjs, syncobj_count);
+}
+
+drm_public int amdgpu_cs_syncobj_signal(amdgpu_device_handle dev,
+ const uint32_t *syncobjs,
+ uint32_t syncobj_count)
+{
+ if (NULL == dev)
+ return -EINVAL;
+
+ return drmSyncobjSignal(dev->fd, syncobjs, syncobj_count);
+}
+
+drm_public int amdgpu_cs_syncobj_timeline_signal(amdgpu_device_handle dev,
+ const uint32_t *syncobjs,
+ uint64_t *points,
+ uint32_t syncobj_count)
+{
+ if (NULL == dev)
+ return -EINVAL;
+
+ return drmSyncobjTimelineSignal(dev->fd, syncobjs,
+ points, syncobj_count);
+}
+
+drm_public int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev,
+ uint32_t *handles, unsigned num_handles,
+ int64_t timeout_nsec, unsigned flags,
+ uint32_t *first_signaled)
+{
+ if (NULL == dev)
+ return -EINVAL;
+
+ return drmSyncobjWait(dev->fd, handles, num_handles, timeout_nsec,
+ flags, first_signaled);
+}
+
+drm_public int amdgpu_cs_syncobj_timeline_wait(amdgpu_device_handle dev,
+ uint32_t *handles, uint64_t *points,
+ unsigned num_handles,
+ int64_t timeout_nsec, unsigned flags,
+ uint32_t *first_signaled)
+{
+ if (NULL == dev)
+ return -EINVAL;
+
+ return drmSyncobjTimelineWait(dev->fd, handles, points, num_handles,
+ timeout_nsec, flags, first_signaled);
+}
+
+drm_public int amdgpu_cs_syncobj_query(amdgpu_device_handle dev,
+ uint32_t *handles, uint64_t *points,
+ unsigned num_handles)
+{
+ if (NULL == dev)
+ return -EINVAL;
+
+ return drmSyncobjQuery(dev->fd, handles, points, num_handles);
+}
+
+drm_public int amdgpu_cs_syncobj_query2(amdgpu_device_handle dev,
+ uint32_t *handles, uint64_t *points,
+ unsigned num_handles, uint32_t flags)
+{
+ if (!dev)
+ return -EINVAL;
+
+ return drmSyncobjQuery2(dev->fd, handles, points, num_handles, flags);
+}
+
+drm_public int amdgpu_cs_export_syncobj(amdgpu_device_handle dev,
+ uint32_t handle,
+ int *shared_fd)
+{
+ if (NULL == dev)
+ return -EINVAL;
+
+ return drmSyncobjHandleToFD(dev->fd, handle, shared_fd);
+}
+
+drm_public int amdgpu_cs_import_syncobj(amdgpu_device_handle dev,
+ int shared_fd,
+ uint32_t *handle)
+{
+ if (NULL == dev)
+ return -EINVAL;
+
+ return drmSyncobjFDToHandle(dev->fd, shared_fd, handle);
+}
+
+drm_public int amdgpu_cs_syncobj_export_sync_file(amdgpu_device_handle dev,
+ uint32_t syncobj,
+ int *sync_file_fd)
+{
+ if (NULL == dev)
+ return -EINVAL;
+
+ return drmSyncobjExportSyncFile(dev->fd, syncobj, sync_file_fd);
+}
+
+drm_public int amdgpu_cs_syncobj_import_sync_file(amdgpu_device_handle dev,
+ uint32_t syncobj,
+ int sync_file_fd)
+{
+ if (NULL == dev)
+ return -EINVAL;
+
+ return drmSyncobjImportSyncFile(dev->fd, syncobj, sync_file_fd);
+}
+
+drm_public int amdgpu_cs_syncobj_export_sync_file2(amdgpu_device_handle dev,
+ uint32_t syncobj,
+ uint64_t point,
+ uint32_t flags,
+ int *sync_file_fd)
+{
+ uint32_t binary_handle;
+ int ret;
+
+ if (NULL == dev)
+ return -EINVAL;
+
+ if (!point)
+ return drmSyncobjExportSyncFile(dev->fd, syncobj, sync_file_fd);
+
+ ret = drmSyncobjCreate(dev->fd, 0, &binary_handle);
+ if (ret)
+ return ret;
+
+ ret = drmSyncobjTransfer(dev->fd, binary_handle, 0,
+ syncobj, point, flags);
+ if (ret)
+ goto out;
+ ret = drmSyncobjExportSyncFile(dev->fd, binary_handle, sync_file_fd);
+out:
+ drmSyncobjDestroy(dev->fd, binary_handle);
+ return ret;
+}
+
+drm_public int amdgpu_cs_syncobj_import_sync_file2(amdgpu_device_handle dev,
+ uint32_t syncobj,
+ uint64_t point,
+ int sync_file_fd)
+{
+ uint32_t binary_handle;
+ int ret;
+
+ if (NULL == dev)
+ return -EINVAL;
+
+ if (!point)
+ return drmSyncobjImportSyncFile(dev->fd, syncobj, sync_file_fd);
+
+ ret = drmSyncobjCreate(dev->fd, 0, &binary_handle);
+ if (ret)
+ return ret;
+ ret = drmSyncobjImportSyncFile(dev->fd, binary_handle, sync_file_fd);
+ if (ret)
+ goto out;
+ ret = drmSyncobjTransfer(dev->fd, syncobj, point,
+ binary_handle, 0, 0);
+out:
+ drmSyncobjDestroy(dev->fd, binary_handle);
+ return ret;
+}
+
+drm_public int amdgpu_cs_syncobj_transfer(amdgpu_device_handle dev,
+ uint32_t dst_handle,
+ uint64_t dst_point,
+ uint32_t src_handle,
+ uint64_t src_point,
+ uint32_t flags)
+{
+ if (NULL == dev)
+ return -EINVAL;
+
+ return drmSyncobjTransfer(dev->fd,
+ dst_handle, dst_point,
+ src_handle, src_point,
+ flags);
+}
+
+drm_public int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
+ amdgpu_context_handle context,
+ amdgpu_bo_list_handle bo_list_handle,
+ int num_chunks,
+ struct drm_amdgpu_cs_chunk *chunks,
+ uint64_t *seq_no)
+{
+ union drm_amdgpu_cs cs;
+ uint64_t *chunk_array;
+ int i, r;
+ if (num_chunks == 0)
+ return -EINVAL;
+
+ memset(&cs, 0, sizeof(cs));
+ chunk_array = alloca(sizeof(uint64_t) * num_chunks);
+ for (i = 0; i < num_chunks; i++)
+ chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
+ cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
+ cs.in.ctx_id = context->id;
+ cs.in.bo_list_handle = bo_list_handle ? bo_list_handle->handle : 0;
+ cs.in.num_chunks = num_chunks;
+ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
+ &cs, sizeof(cs));
+ if (r)
+ return r;
+
+ if (seq_no)
+ *seq_no = cs.out.handle;
+ return 0;
+}
+
+drm_public int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
+ amdgpu_context_handle context,
+ uint32_t bo_list_handle,
+ int num_chunks,
+ struct drm_amdgpu_cs_chunk *chunks,
+ uint64_t *seq_no)
+{
+ union drm_amdgpu_cs cs;
+ uint64_t *chunk_array;
+ int i, r;
+
+ memset(&cs, 0, sizeof(cs));
+ chunk_array = alloca(sizeof(uint64_t) * num_chunks);
+ for (i = 0; i < num_chunks; i++)
+ chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
+ cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
+ cs.in.ctx_id = context->id;
+ cs.in.bo_list_handle = bo_list_handle;
+ cs.in.num_chunks = num_chunks;
+ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
+ &cs, sizeof(cs));
+ if (!r && seq_no)
+ *seq_no = cs.out.handle;
+ return r;
+}
+
+drm_public void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
+ struct drm_amdgpu_cs_chunk_data *data)
+{
+ data->fence_data.handle = fence_info->handle->handle;
+ data->fence_data.offset = fence_info->offset * sizeof(uint64_t);
+}
+
+drm_public void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
+ struct drm_amdgpu_cs_chunk_dep *dep)
+{
+ dep->ip_type = fence->ip_type;
+ dep->ip_instance = fence->ip_instance;
+ dep->ring = fence->ring;
+ dep->ctx_id = fence->context->id;
+ dep->handle = fence->fence;
+}
+
+drm_public int amdgpu_cs_fence_to_handle(amdgpu_device_handle dev,
+ struct amdgpu_cs_fence *fence,
+ uint32_t what,
+ uint32_t *out_handle)
+{
+ union drm_amdgpu_fence_to_handle fth;
+ int r;
+
+ memset(&fth, 0, sizeof(fth));
+ fth.in.fence.ctx_id = fence->context->id;
+ fth.in.fence.ip_type = fence->ip_type;
+ fth.in.fence.ip_instance = fence->ip_instance;
+ fth.in.fence.ring = fence->ring;
+ fth.in.fence.seq_no = fence->fence;
+ fth.in.what = what;
+
+ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_FENCE_TO_HANDLE,
+ &fth, sizeof(fth));
+ if (r == 0)
+ *out_handle = fth.out.handle;
+ return r;
+}
diff --git a/amdgpu/amdgpu_device.c b/amdgpu/amdgpu_device.c
new file mode 100644
index 0000000..aeb5e3c
--- /dev/null
+++ b/amdgpu/amdgpu_device.c
@@ -0,0 +1,316 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/**
+ * \file amdgpu_device.c
+ *
+ * Implementation of functions for AMD GPU device
+ *
+ */
+
+#include <sys/stat.h>
+#include <errno.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "xf86drm.h"
+#include "amdgpu_drm.h"
+#include "amdgpu_internal.h"
+#include "util_math.h"
+
+#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
+
+static pthread_mutex_t dev_mutex = PTHREAD_MUTEX_INITIALIZER;
+static amdgpu_device_handle dev_list;
+
+static int fd_compare(int fd1, int fd2)
+{
+ char *name1 = drmGetPrimaryDeviceNameFromFd(fd1);
+ char *name2 = drmGetPrimaryDeviceNameFromFd(fd2);
+ int result;
+
+ if (name1 == NULL || name2 == NULL) {
+ free(name1);
+ free(name2);
+ return 0;
+ }
+
+ result = strcmp(name1, name2);
+ free(name1);
+ free(name2);
+
+ return result;
+}
+
+/**
+* Get the authenticated form fd,
+*
+* \param fd - \c [in] File descriptor for AMD GPU device
+* \param auth - \c [out] Pointer to output the fd is authenticated or not
+* A render node fd, output auth = 0
+* A legacy fd, get the authenticated for compatibility root
+*
+* \return 0 on success\n
+* >0 - AMD specific error code\n
+* <0 - Negative POSIX Error code
+*/
+static int amdgpu_get_auth(int fd, int *auth)
+{
+ int r = 0;
+ drm_client_t client = {};
+
+ if (drmGetNodeTypeFromFd(fd) == DRM_NODE_RENDER)
+ *auth = 0;
+ else {
+ client.idx = 0;
+ r = drmIoctl(fd, DRM_IOCTL_GET_CLIENT, &client);
+ if (!r)
+ *auth = client.auth;
+ }
+ return r;
+}
+
+static void amdgpu_device_free_internal(amdgpu_device_handle dev)
+{
+ amdgpu_device_handle *node = &dev_list;
+
+ pthread_mutex_lock(&dev_mutex);
+ while (*node != dev && (*node)->next)
+ node = &(*node)->next;
+ *node = (*node)->next;
+ pthread_mutex_unlock(&dev_mutex);
+
+ close(dev->fd);
+ if ((dev->flink_fd >= 0) && (dev->fd != dev->flink_fd))
+ close(dev->flink_fd);
+
+ amdgpu_vamgr_deinit(&dev->vamgr_32);
+ amdgpu_vamgr_deinit(&dev->vamgr);
+ amdgpu_vamgr_deinit(&dev->vamgr_high_32);
+ amdgpu_vamgr_deinit(&dev->vamgr_high);
+ handle_table_fini(&dev->bo_handles);
+ handle_table_fini(&dev->bo_flink_names);
+ pthread_mutex_destroy(&dev->bo_table_mutex);
+ free(dev->marketing_name);
+ free(dev);
+}
+
+/**
+ * Assignment between two amdgpu_device pointers with reference counting.
+ *
+ * Usage:
+ * struct amdgpu_device *dst = ... , *src = ...;
+ *
+ * dst = src;
+ * // No reference counting. Only use this when you need to move
+ * // a reference from one pointer to another.
+ *
+ * amdgpu_device_reference(&dst, src);
+ * // Reference counters are updated. dst is decremented and src is
+ * // incremented. dst is freed if its reference counter is 0.
+ */
+static void amdgpu_device_reference(struct amdgpu_device **dst,
+ struct amdgpu_device *src)
+{
+ if (update_references(&(*dst)->refcount, &src->refcount))
+ amdgpu_device_free_internal(*dst);
+ *dst = src;
+}
+
+drm_public int amdgpu_device_initialize(int fd,
+ uint32_t *major_version,
+ uint32_t *minor_version,
+ amdgpu_device_handle *device_handle)
+{
+ struct amdgpu_device *dev;
+ drmVersionPtr version;
+ int r;
+ int flag_auth = 0;
+ int flag_authexist=0;
+ uint32_t accel_working = 0;
+ uint64_t start, max;
+
+ *device_handle = NULL;
+
+ pthread_mutex_lock(&dev_mutex);
+ r = amdgpu_get_auth(fd, &flag_auth);
+ if (r) {
+ fprintf(stderr, "%s: amdgpu_get_auth (1) failed (%i)\n",
+ __func__, r);
+ pthread_mutex_unlock(&dev_mutex);
+ return r;
+ }
+
+ for (dev = dev_list; dev; dev = dev->next)
+ if (fd_compare(dev->fd, fd) == 0)
+ break;
+
+ if (dev) {
+ r = amdgpu_get_auth(dev->fd, &flag_authexist);
+ if (r) {
+ fprintf(stderr, "%s: amdgpu_get_auth (2) failed (%i)\n",
+ __func__, r);
+ pthread_mutex_unlock(&dev_mutex);
+ return r;
+ }
+ if ((flag_auth) && (!flag_authexist)) {
+ dev->flink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
+ }
+ *major_version = dev->major_version;
+ *minor_version = dev->minor_version;
+ amdgpu_device_reference(device_handle, dev);
+ pthread_mutex_unlock(&dev_mutex);
+ return 0;
+ }
+
+ dev = calloc(1, sizeof(struct amdgpu_device));
+ if (!dev) {
+ fprintf(stderr, "%s: calloc failed\n", __func__);
+ pthread_mutex_unlock(&dev_mutex);
+ return -ENOMEM;
+ }
+
+ dev->fd = -1;
+ dev->flink_fd = -1;
+
+ atomic_set(&dev->refcount, 1);
+
+ version = drmGetVersion(fd);
+ if (version->version_major != 3) {
+ fprintf(stderr, "%s: DRM version is %d.%d.%d but this driver is "
+ "only compatible with 3.x.x.\n",
+ __func__,
+ version->version_major,
+ version->version_minor,
+ version->version_patchlevel);
+ drmFreeVersion(version);
+ r = -EBADF;
+ goto cleanup;
+ }
+
+ dev->fd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
+ dev->flink_fd = dev->fd;
+ dev->major_version = version->version_major;
+ dev->minor_version = version->version_minor;
+ drmFreeVersion(version);
+
+ pthread_mutex_init(&dev->bo_table_mutex, NULL);
+
+ /* Check if acceleration is working. */
+ r = amdgpu_query_info(dev, AMDGPU_INFO_ACCEL_WORKING, 4, &accel_working);
+ if (r) {
+ fprintf(stderr, "%s: amdgpu_query_info(ACCEL_WORKING) failed (%i)\n",
+ __func__, r);
+ goto cleanup;
+ }
+ if (!accel_working) {
+ fprintf(stderr, "%s: AMDGPU_INFO_ACCEL_WORKING = 0\n", __func__);
+ r = -EBADF;
+ goto cleanup;
+ }
+
+ r = amdgpu_query_gpu_info_init(dev);
+ if (r) {
+ fprintf(stderr, "%s: amdgpu_query_gpu_info_init failed\n", __func__);
+ goto cleanup;
+ }
+
+ start = dev->dev_info.virtual_address_offset;
+ max = MIN2(dev->dev_info.virtual_address_max, 0x100000000ULL);
+ amdgpu_vamgr_init(&dev->vamgr_32, start, max,
+ dev->dev_info.virtual_address_alignment);
+
+ start = max;
+ max = MAX2(dev->dev_info.virtual_address_max, 0x100000000ULL);
+ amdgpu_vamgr_init(&dev->vamgr, start, max,
+ dev->dev_info.virtual_address_alignment);
+
+ start = dev->dev_info.high_va_offset;
+ max = MIN2(dev->dev_info.high_va_max, (start & ~0xffffffffULL) +
+ 0x100000000ULL);
+ amdgpu_vamgr_init(&dev->vamgr_high_32, start, max,
+ dev->dev_info.virtual_address_alignment);
+
+ start = max;
+ max = MAX2(dev->dev_info.high_va_max, (start & ~0xffffffffULL) +
+ 0x100000000ULL);
+ amdgpu_vamgr_init(&dev->vamgr_high, start, max,
+ dev->dev_info.virtual_address_alignment);
+
+ amdgpu_parse_asic_ids(dev);
+
+ *major_version = dev->major_version;
+ *minor_version = dev->minor_version;
+ *device_handle = dev;
+ dev->next = dev_list;
+ dev_list = dev;
+ pthread_mutex_unlock(&dev_mutex);
+
+ return 0;
+
+cleanup:
+ if (dev->fd >= 0)
+ close(dev->fd);
+ free(dev);
+ pthread_mutex_unlock(&dev_mutex);
+ return r;
+}
+
+drm_public int amdgpu_device_deinitialize(amdgpu_device_handle dev)
+{
+ amdgpu_device_reference(&dev, NULL);
+ return 0;
+}
+
+drm_public int amdgpu_device_get_fd(amdgpu_device_handle device_handle)
+{
+ return device_handle->fd;
+}
+
+drm_public const char *amdgpu_get_marketing_name(amdgpu_device_handle dev)
+{
+ if (dev->marketing_name)
+ return dev->marketing_name;
+ else
+ return "AMD Radeon Graphics";
+}
+
+drm_public int amdgpu_query_sw_info(amdgpu_device_handle dev,
+ enum amdgpu_sw_info info,
+ void *value)
+{
+ uint32_t *val32 = (uint32_t*)value;
+
+ switch (info) {
+ case amdgpu_sw_info_address32_hi:
+ if (dev->vamgr_high_32.va_max)
+ *val32 = (dev->vamgr_high_32.va_max - 1) >> 32;
+ else
+ *val32 = (dev->vamgr_32.va_max - 1) >> 32;
+ return 0;
+ }
+ return -EINVAL;
+}
diff --git a/amdgpu/amdgpu_gpu_info.c b/amdgpu/amdgpu_gpu_info.c
new file mode 100644
index 0000000..9f8695c
--- /dev/null
+++ b/amdgpu/amdgpu_gpu_info.c
@@ -0,0 +1,348 @@
+/*
+ * Copyright © 2014 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <errno.h>
+#include <string.h>
+
+#include "amdgpu.h"
+#include "amdgpu_drm.h"
+#include "amdgpu_internal.h"
+#include "xf86drm.h"
+
+drm_public int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id,
+ unsigned size, void *value)
+{
+ struct drm_amdgpu_info request;
+
+ memset(&request, 0, sizeof(request));
+ request.return_pointer = (uintptr_t)value;
+ request.return_size = size;
+ request.query = info_id;
+
+ return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
+ sizeof(struct drm_amdgpu_info));
+}
+
+drm_public int amdgpu_query_crtc_from_id(amdgpu_device_handle dev, unsigned id,
+ int32_t *result)
+{
+ struct drm_amdgpu_info request;
+
+ memset(&request, 0, sizeof(request));
+ request.return_pointer = (uintptr_t)result;
+ request.return_size = sizeof(*result);
+ request.query = AMDGPU_INFO_CRTC_FROM_ID;
+ request.mode_crtc.id = id;
+
+ return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
+ sizeof(struct drm_amdgpu_info));
+}
+
+drm_public int amdgpu_read_mm_registers(amdgpu_device_handle dev,
+ unsigned dword_offset, unsigned count, uint32_t instance,
+ uint32_t flags, uint32_t *values)
+{
+ struct drm_amdgpu_info request;
+
+ memset(&request, 0, sizeof(request));
+ request.return_pointer = (uintptr_t)values;
+ request.return_size = count * sizeof(uint32_t);
+ request.query = AMDGPU_INFO_READ_MMR_REG;
+ request.read_mmr_reg.dword_offset = dword_offset;
+ request.read_mmr_reg.count = count;
+ request.read_mmr_reg.instance = instance;
+ request.read_mmr_reg.flags = flags;
+
+ return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
+ sizeof(struct drm_amdgpu_info));
+}
+
+drm_public int amdgpu_query_hw_ip_count(amdgpu_device_handle dev,
+ unsigned type,
+ uint32_t *count)
+{
+ struct drm_amdgpu_info request;
+
+ memset(&request, 0, sizeof(request));
+ request.return_pointer = (uintptr_t)count;
+ request.return_size = sizeof(*count);
+ request.query = AMDGPU_INFO_HW_IP_COUNT;
+ request.query_hw_ip.type = type;
+
+ return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
+ sizeof(struct drm_amdgpu_info));
+}
+
+drm_public int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
+ unsigned ip_instance,
+ struct drm_amdgpu_info_hw_ip *info)
+{
+ struct drm_amdgpu_info request;
+
+ memset(&request, 0, sizeof(request));
+ request.return_pointer = (uintptr_t)info;
+ request.return_size = sizeof(*info);
+ request.query = AMDGPU_INFO_HW_IP_INFO;
+ request.query_hw_ip.type = type;
+ request.query_hw_ip.ip_instance = ip_instance;
+
+ return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
+ sizeof(struct drm_amdgpu_info));
+}
+
+drm_public int amdgpu_query_firmware_version(amdgpu_device_handle dev,
+ unsigned fw_type, unsigned ip_instance, unsigned index,
+ uint32_t *version, uint32_t *feature)
+{
+ struct drm_amdgpu_info request;
+ struct drm_amdgpu_info_firmware firmware = {};
+ int r;
+
+ memset(&request, 0, sizeof(request));
+ request.return_pointer = (uintptr_t)&firmware;
+ request.return_size = sizeof(firmware);
+ request.query = AMDGPU_INFO_FW_VERSION;
+ request.query_fw.fw_type = fw_type;
+ request.query_fw.ip_instance = ip_instance;
+ request.query_fw.index = index;
+
+ r = drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
+ sizeof(struct drm_amdgpu_info));
+ if (r)
+ return r;
+
+ *version = firmware.ver;
+ *feature = firmware.feature;
+ return 0;
+}
+
+drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev)
+{
+ int r, i;
+
+ r = amdgpu_query_info(dev, AMDGPU_INFO_DEV_INFO, sizeof(dev->dev_info),
+ &dev->dev_info);
+ if (r)
+ return r;
+
+ dev->info.asic_id = dev->dev_info.device_id;
+ dev->info.chip_rev = dev->dev_info.chip_rev;
+ dev->info.chip_external_rev = dev->dev_info.external_rev;
+ dev->info.family_id = dev->dev_info.family;
+ dev->info.max_engine_clk = dev->dev_info.max_engine_clock;
+ dev->info.max_memory_clk = dev->dev_info.max_memory_clock;
+ dev->info.gpu_counter_freq = dev->dev_info.gpu_counter_freq;
+ dev->info.enabled_rb_pipes_mask = dev->dev_info.enabled_rb_pipes_mask;
+ dev->info.rb_pipes = dev->dev_info.num_rb_pipes;
+ dev->info.ids_flags = dev->dev_info.ids_flags;
+ dev->info.num_hw_gfx_contexts = dev->dev_info.num_hw_gfx_contexts;
+ dev->info.num_shader_engines = dev->dev_info.num_shader_engines;
+ dev->info.num_shader_arrays_per_engine =
+ dev->dev_info.num_shader_arrays_per_engine;
+ dev->info.vram_type = dev->dev_info.vram_type;
+ dev->info.vram_bit_width = dev->dev_info.vram_bit_width;
+ dev->info.ce_ram_size = dev->dev_info.ce_ram_size;
+ dev->info.vce_harvest_config = dev->dev_info.vce_harvest_config;
+ dev->info.pci_rev_id = dev->dev_info.pci_rev;
+
+ if (dev->info.family_id < AMDGPU_FAMILY_AI) {
+ for (i = 0; i < (int)dev->info.num_shader_engines; i++) {
+ unsigned instance = (i << AMDGPU_INFO_MMR_SE_INDEX_SHIFT) |
+ (AMDGPU_INFO_MMR_SH_INDEX_MASK <<
+ AMDGPU_INFO_MMR_SH_INDEX_SHIFT);
+
+ r = amdgpu_read_mm_registers(dev, 0x263d, 1, instance, 0,
+ &dev->info.backend_disable[i]);
+ if (r)
+ return r;
+ /* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */
+ dev->info.backend_disable[i] =
+ (dev->info.backend_disable[i] >> 16) & 0xff;
+
+ r = amdgpu_read_mm_registers(dev, 0xa0d4, 1, instance, 0,
+ &dev->info.pa_sc_raster_cfg[i]);
+ if (r)
+ return r;
+
+ if (dev->info.family_id >= AMDGPU_FAMILY_CI) {
+ r = amdgpu_read_mm_registers(dev, 0xa0d5, 1, instance, 0,
+ &dev->info.pa_sc_raster_cfg1[i]);
+ if (r)
+ return r;
+ }
+ }
+ }
+
+ r = amdgpu_read_mm_registers(dev, 0x263e, 1, 0xffffffff, 0,
+ &dev->info.gb_addr_cfg);
+ if (r)
+ return r;
+
+ if (dev->info.family_id < AMDGPU_FAMILY_AI) {
+ r = amdgpu_read_mm_registers(dev, 0x2644, 32, 0xffffffff, 0,
+ dev->info.gb_tile_mode);
+ if (r)
+ return r;
+
+ if (dev->info.family_id >= AMDGPU_FAMILY_CI) {
+ r = amdgpu_read_mm_registers(dev, 0x2664, 16, 0xffffffff, 0,
+ dev->info.gb_macro_tile_mode);
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_read_mm_registers(dev, 0x9d8, 1, 0xffffffff, 0,
+ &dev->info.mc_arb_ramcfg);
+ if (r)
+ return r;
+ }
+
+ dev->info.cu_active_number = dev->dev_info.cu_active_number;
+ dev->info.cu_ao_mask = dev->dev_info.cu_ao_mask;
+ memcpy(&dev->info.cu_bitmap[0][0], &dev->dev_info.cu_bitmap[0][0], sizeof(dev->info.cu_bitmap));
+
+ /* TODO: info->max_quad_shader_pipes is not set */
+ /* TODO: info->avail_quad_shader_pipes is not set */
+ /* TODO: info->cache_entries_per_quad_pipe is not set */
+ return 0;
+}
+
+drm_public int amdgpu_query_gpu_info(amdgpu_device_handle dev,
+ struct amdgpu_gpu_info *info)
+{
+ if (!dev || !info)
+ return -EINVAL;
+
+ /* Get ASIC info*/
+ *info = dev->info;
+
+ return 0;
+}
+
+drm_public int amdgpu_query_heap_info(amdgpu_device_handle dev,
+ uint32_t heap,
+ uint32_t flags,
+ struct amdgpu_heap_info *info)
+{
+ struct drm_amdgpu_info_vram_gtt vram_gtt_info = {};
+ int r;
+
+ r = amdgpu_query_info(dev, AMDGPU_INFO_VRAM_GTT,
+ sizeof(vram_gtt_info), &vram_gtt_info);
+ if (r)
+ return r;
+
+ /* Get heap information */
+ switch (heap) {
+ case AMDGPU_GEM_DOMAIN_VRAM:
+ /* query visible only vram heap */
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+ info->heap_size = vram_gtt_info.vram_cpu_accessible_size;
+ else /* query total vram heap */
+ info->heap_size = vram_gtt_info.vram_size;
+
+ info->max_allocation = vram_gtt_info.vram_cpu_accessible_size;
+
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+ r = amdgpu_query_info(dev, AMDGPU_INFO_VIS_VRAM_USAGE,
+ sizeof(info->heap_usage),
+ &info->heap_usage);
+ else
+ r = amdgpu_query_info(dev, AMDGPU_INFO_VRAM_USAGE,
+ sizeof(info->heap_usage),
+ &info->heap_usage);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_GEM_DOMAIN_GTT:
+ info->heap_size = vram_gtt_info.gtt_size;
+ info->max_allocation = vram_gtt_info.vram_cpu_accessible_size;
+
+ r = amdgpu_query_info(dev, AMDGPU_INFO_GTT_USAGE,
+ sizeof(info->heap_usage),
+ &info->heap_usage);
+ if (r)
+ return r;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+drm_public int amdgpu_query_gds_info(amdgpu_device_handle dev,
+ struct amdgpu_gds_resource_info *gds_info)
+{
+ struct drm_amdgpu_info_gds gds_config = {};
+ int r;
+
+ if (!gds_info)
+ return -EINVAL;
+
+ r = amdgpu_query_info(dev, AMDGPU_INFO_GDS_CONFIG,
+ sizeof(gds_config), &gds_config);
+ if (r)
+ return r;
+
+ gds_info->gds_gfx_partition_size = gds_config.gds_gfx_partition_size;
+ gds_info->compute_partition_size = gds_config.compute_partition_size;
+ gds_info->gds_total_size = gds_config.gds_total_size;
+ gds_info->gws_per_gfx_partition = gds_config.gws_per_gfx_partition;
+ gds_info->gws_per_compute_partition = gds_config.gws_per_compute_partition;
+ gds_info->oa_per_gfx_partition = gds_config.oa_per_gfx_partition;
+ gds_info->oa_per_compute_partition = gds_config.oa_per_compute_partition;
+
+ return 0;
+}
+
+drm_public int amdgpu_query_sensor_info(amdgpu_device_handle dev, unsigned sensor_type,
+ unsigned size, void *value)
+{
+ struct drm_amdgpu_info request;
+
+ memset(&request, 0, sizeof(request));
+ request.return_pointer = (uintptr_t)value;
+ request.return_size = size;
+ request.query = AMDGPU_INFO_SENSOR;
+ request.sensor_info.type = sensor_type;
+
+ return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
+ sizeof(struct drm_amdgpu_info));
+}
+
+drm_public int amdgpu_query_video_caps_info(amdgpu_device_handle dev, unsigned cap_type,
+ unsigned size, void *value)
+{
+ struct drm_amdgpu_info request;
+
+ memset(&request, 0, sizeof(request));
+ request.return_pointer = (uintptr_t)value;
+ request.return_size = size;
+ request.query = AMDGPU_INFO_VIDEO_CAPS;
+ request.sensor_info.type = cap_type;
+
+ return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
+ sizeof(struct drm_amdgpu_info));
+}
diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h
new file mode 100644
index 0000000..37a7c9d
--- /dev/null
+++ b/amdgpu/amdgpu_internal.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright © 2014 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _AMDGPU_INTERNAL_H_
+#define _AMDGPU_INTERNAL_H_
+
+#include <assert.h>
+#include <pthread.h>
+
+#include "libdrm_macros.h"
+#include "xf86atomic.h"
+#include "amdgpu.h"
+#include "util_double_list.h"
+#include "handle_table.h"
+
+#define AMDGPU_CS_MAX_RINGS 8
+/* do not use below macro if b is not power of 2 aligned value */
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define ROUND_UP(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#define ROUND_DOWN(x, y) ((x) & ~__round_mask(x, y))
+
+#define AMDGPU_INVALID_VA_ADDRESS 0xffffffffffffffff
+#define AMDGPU_NULL_SUBMIT_SEQ 0
+
+struct amdgpu_bo_va_hole {
+ struct list_head list;
+ uint64_t offset;
+ uint64_t size;
+};
+
+struct amdgpu_bo_va_mgr {
+ uint64_t va_max;
+ struct list_head va_holes;
+ pthread_mutex_t bo_va_mutex;
+ uint32_t va_alignment;
+};
+
+struct amdgpu_va {
+ amdgpu_device_handle dev;
+ uint64_t address;
+ uint64_t size;
+ enum amdgpu_gpu_va_range range;
+ struct amdgpu_bo_va_mgr *vamgr;
+};
+
+struct amdgpu_device {
+ atomic_t refcount;
+ struct amdgpu_device *next;
+ int fd;
+ int flink_fd;
+ unsigned major_version;
+ unsigned minor_version;
+
+ char *marketing_name;
+ /** List of buffer handles. Protected by bo_table_mutex. */
+ struct handle_table bo_handles;
+ /** List of buffer GEM flink names. Protected by bo_table_mutex. */
+ struct handle_table bo_flink_names;
+ /** This protects all hash tables. */
+ pthread_mutex_t bo_table_mutex;
+ struct drm_amdgpu_info_device dev_info;
+ struct amdgpu_gpu_info info;
+ /** The VA manager for the lower virtual address space */
+ struct amdgpu_bo_va_mgr vamgr;
+ /** The VA manager for the 32bit address space */
+ struct amdgpu_bo_va_mgr vamgr_32;
+ /** The VA manager for the high virtual address space */
+ struct amdgpu_bo_va_mgr vamgr_high;
+ /** The VA manager for the 32bit high address space */
+ struct amdgpu_bo_va_mgr vamgr_high_32;
+};
+
+struct amdgpu_bo {
+ atomic_t refcount;
+ struct amdgpu_device *dev;
+
+ uint64_t alloc_size;
+
+ uint32_t handle;
+ uint32_t flink_name;
+
+ pthread_mutex_t cpu_access_mutex;
+ void *cpu_ptr;
+ int64_t cpu_map_count;
+};
+
+struct amdgpu_bo_list {
+ struct amdgpu_device *dev;
+
+ uint32_t handle;
+};
+
+struct amdgpu_context {
+ struct amdgpu_device *dev;
+ /** Mutex for accessing fences and to maintain command submissions
+ in good sequence. */
+ pthread_mutex_t sequence_mutex;
+ /* context id*/
+ uint32_t id;
+ uint64_t last_seq[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS];
+ struct list_head sem_list[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS];
+};
+
+/**
+ * Structure describing sw semaphore based on scheduler
+ *
+ */
+struct amdgpu_semaphore {
+ atomic_t refcount;
+ struct list_head list;
+ struct amdgpu_cs_fence signal_fence;
+};
+
+/**
+ * Functions.
+ */
+
+drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
+ uint64_t max, uint64_t alignment);
+
+drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr);
+
+drm_private void amdgpu_parse_asic_ids(struct amdgpu_device *dev);
+
+drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev);
+
+drm_private uint64_t amdgpu_cs_calculate_timeout(uint64_t timeout);
+
+/**
+ * Inline functions.
+ */
+
+/**
+ * Increment src and decrement dst as if we were updating references
+ * for an assignment between 2 pointers of some objects.
+ *
+ * \return true if dst is 0
+ */
+static inline bool update_references(atomic_t *dst, atomic_t *src)
+{
+ if (dst != src) {
+ /* bump src first */
+ if (src) {
+ assert(atomic_read(src) > 0);
+ atomic_inc(src);
+ }
+ if (dst) {
+ assert(atomic_read(dst) > 0);
+ return atomic_dec_and_test(dst);
+ }
+ }
+ return false;
+}
+
+#endif
diff --git a/amdgpu/amdgpu_vamgr.c b/amdgpu/amdgpu_vamgr.c
new file mode 100644
index 0000000..077a9fc
--- /dev/null
+++ b/amdgpu/amdgpu_vamgr.c
@@ -0,0 +1,298 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include "amdgpu.h"
+#include "amdgpu_drm.h"
+#include "amdgpu_internal.h"
+#include "util_math.h"
+
+drm_public int amdgpu_va_range_query(amdgpu_device_handle dev,
+ enum amdgpu_gpu_va_range type,
+ uint64_t *start, uint64_t *end)
+{
+ if (type != amdgpu_gpu_va_range_general)
+ return -EINVAL;
+
+ *start = dev->dev_info.virtual_address_offset;
+ *end = dev->dev_info.virtual_address_max;
+ return 0;
+}
+
+drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
+ uint64_t max, uint64_t alignment)
+{
+ struct amdgpu_bo_va_hole *n;
+
+ mgr->va_max = max;
+ mgr->va_alignment = alignment;
+
+ list_inithead(&mgr->va_holes);
+ pthread_mutex_init(&mgr->bo_va_mutex, NULL);
+ pthread_mutex_lock(&mgr->bo_va_mutex);
+ n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
+ n->size = mgr->va_max - start;
+ n->offset = start;
+ list_add(&n->list, &mgr->va_holes);
+ pthread_mutex_unlock(&mgr->bo_va_mutex);
+}
+
+drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
+{
+ struct amdgpu_bo_va_hole *hole, *tmp;
+ LIST_FOR_EACH_ENTRY_SAFE(hole, tmp, &mgr->va_holes, list) {
+ list_del(&hole->list);
+ free(hole);
+ }
+ pthread_mutex_destroy(&mgr->bo_va_mutex);
+}
+
+static drm_private int
+amdgpu_vamgr_subtract_hole(struct amdgpu_bo_va_hole *hole, uint64_t start_va,
+ uint64_t end_va)
+{
+ if (start_va > hole->offset && end_va - hole->offset < hole->size) {
+ struct amdgpu_bo_va_hole *n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
+ if (!n)
+ return -ENOMEM;
+
+ n->size = start_va - hole->offset;
+ n->offset = hole->offset;
+ list_add(&n->list, &hole->list);
+
+ hole->size -= (end_va - hole->offset);
+ hole->offset = end_va;
+ } else if (start_va > hole->offset) {
+ hole->size = start_va - hole->offset;
+ } else if (end_va - hole->offset < hole->size) {
+ hole->size -= (end_va - hole->offset);
+ hole->offset = end_va;
+ } else {
+ list_del(&hole->list);
+ free(hole);
+ }
+
+ return 0;
+}
+
+static drm_private int
+amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
+ uint64_t alignment, uint64_t base_required,
+ bool search_from_top, uint64_t *va_out)
+{
+ struct amdgpu_bo_va_hole *hole, *n;
+ uint64_t offset = 0;
+ int ret;
+
+
+ alignment = MAX2(alignment, mgr->va_alignment);
+ size = ALIGN(size, mgr->va_alignment);
+
+ if (base_required % alignment)
+ return -EINVAL;
+
+ pthread_mutex_lock(&mgr->bo_va_mutex);
+ if (!search_from_top) {
+ LIST_FOR_EACH_ENTRY_SAFE_REV(hole, n, &mgr->va_holes, list) {
+ if (base_required) {
+ if (hole->offset > base_required ||
+ (hole->offset + hole->size) < (base_required + size))
+ continue;
+ offset = base_required;
+ } else {
+ uint64_t waste = hole->offset % alignment;
+ waste = waste ? alignment - waste : 0;
+ offset = hole->offset + waste;
+ if (offset >= (hole->offset + hole->size) ||
+ size > (hole->offset + hole->size) - offset) {
+ continue;
+ }
+ }
+ ret = amdgpu_vamgr_subtract_hole(hole, offset, offset + size);
+ pthread_mutex_unlock(&mgr->bo_va_mutex);
+ *va_out = offset;
+ return ret;
+ }
+ } else {
+ LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
+ if (base_required) {
+ if (hole->offset > base_required ||
+ (hole->offset + hole->size) < (base_required + size))
+ continue;
+ offset = base_required;
+ } else {
+ if (size > hole->size)
+ continue;
+
+ offset = hole->offset + hole->size - size;
+ offset -= offset % alignment;
+ if (offset < hole->offset) {
+ continue;
+ }
+ }
+
+ ret = amdgpu_vamgr_subtract_hole(hole, offset, offset + size);
+ pthread_mutex_unlock(&mgr->bo_va_mutex);
+ *va_out = offset;
+ return ret;
+ }
+ }
+
+ pthread_mutex_unlock(&mgr->bo_va_mutex);
+ return -ENOMEM;
+}
+
+static drm_private void
+amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
+{
+ struct amdgpu_bo_va_hole *hole, *next;
+
+ if (va == AMDGPU_INVALID_VA_ADDRESS)
+ return;
+
+ size = ALIGN(size, mgr->va_alignment);
+
+ pthread_mutex_lock(&mgr->bo_va_mutex);
+ hole = container_of(&mgr->va_holes, hole, list);
+ LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
+ if (next->offset < va)
+ break;
+ hole = next;
+ }
+
+ if (&hole->list != &mgr->va_holes) {
+ /* Grow upper hole if it's adjacent */
+ if (hole->offset == (va + size)) {
+ hole->offset = va;
+ hole->size += size;
+ /* Merge lower hole if it's adjacent */
+ if (next != hole &&
+ &next->list != &mgr->va_holes &&
+ (next->offset + next->size) == va) {
+ next->size += hole->size;
+ list_del(&hole->list);
+ free(hole);
+ }
+ goto out;
+ }
+ }
+
+ /* Grow lower hole if it's adjacent */
+ if (next != hole && &next->list != &mgr->va_holes &&
+ (next->offset + next->size) == va) {
+ next->size += size;
+ goto out;
+ }
+
+ /* FIXME on allocation failure we just lose virtual address space
+ * maybe print a warning
+ */
+ next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
+ if (next) {
+ next->size = size;
+ next->offset = va;
+ list_add(&next->list, &hole->list);
+ }
+
+out:
+ pthread_mutex_unlock(&mgr->bo_va_mutex);
+}
+
+drm_public int amdgpu_va_range_alloc(amdgpu_device_handle dev,
+ enum amdgpu_gpu_va_range va_range_type,
+ uint64_t size,
+ uint64_t va_base_alignment,
+ uint64_t va_base_required,
+ uint64_t *va_base_allocated,
+ amdgpu_va_handle *va_range_handle,
+ uint64_t flags)
+{
+ struct amdgpu_bo_va_mgr *vamgr;
+ bool search_from_top = !!(flags & AMDGPU_VA_RANGE_REPLAYABLE);
+ int ret;
+
+ /* Clear the flag when the high VA manager is not initialized */
+ if (flags & AMDGPU_VA_RANGE_HIGH && !dev->vamgr_high_32.va_max)
+ flags &= ~AMDGPU_VA_RANGE_HIGH;
+
+ if (flags & AMDGPU_VA_RANGE_HIGH) {
+ if (flags & AMDGPU_VA_RANGE_32_BIT)
+ vamgr = &dev->vamgr_high_32;
+ else
+ vamgr = &dev->vamgr_high;
+ } else {
+ if (flags & AMDGPU_VA_RANGE_32_BIT)
+ vamgr = &dev->vamgr_32;
+ else
+ vamgr = &dev->vamgr;
+ }
+
+ va_base_alignment = MAX2(va_base_alignment, vamgr->va_alignment);
+ size = ALIGN(size, vamgr->va_alignment);
+
+ ret = amdgpu_vamgr_find_va(vamgr, size,
+ va_base_alignment, va_base_required,
+ search_from_top, va_base_allocated);
+
+ if (!(flags & AMDGPU_VA_RANGE_32_BIT) && ret) {
+ /* fallback to 32bit address */
+ if (flags & AMDGPU_VA_RANGE_HIGH)
+ vamgr = &dev->vamgr_high_32;
+ else
+ vamgr = &dev->vamgr_32;
+ ret = amdgpu_vamgr_find_va(vamgr, size,
+ va_base_alignment, va_base_required,
+ search_from_top, va_base_allocated);
+ }
+
+ if (!ret) {
+ struct amdgpu_va* va;
+ va = calloc(1, sizeof(struct amdgpu_va));
+ if(!va){
+ amdgpu_vamgr_free_va(vamgr, *va_base_allocated, size);
+ return -ENOMEM;
+ }
+ va->dev = dev;
+ va->address = *va_base_allocated;
+ va->size = size;
+ va->range = va_range_type;
+ va->vamgr = vamgr;
+ *va_range_handle = va;
+ }
+
+ return ret;
+}
+
+drm_public int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
+{
+ if(!va_range_handle || !va_range_handle->address)
+ return 0;
+
+ amdgpu_vamgr_free_va(va_range_handle->vamgr,
+ va_range_handle->address,
+ va_range_handle->size);
+ free(va_range_handle);
+ return 0;
+}
diff --git a/amdgpu/amdgpu_vm.c b/amdgpu/amdgpu_vm.c
new file mode 100644
index 0000000..7e6e28f
--- /dev/null
+++ b/amdgpu/amdgpu_vm.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_drm.h"
+#include "xf86drm.h"
+#include "amdgpu_internal.h"
+
+drm_public int amdgpu_vm_reserve_vmid(amdgpu_device_handle dev, uint32_t flags)
+{
+ union drm_amdgpu_vm vm;
+
+ vm.in.op = AMDGPU_VM_OP_RESERVE_VMID;
+ vm.in.flags = flags;
+
+ return drmCommandWriteRead(dev->fd, DRM_AMDGPU_VM,
+ &vm, sizeof(vm));
+}
+
+drm_public int amdgpu_vm_unreserve_vmid(amdgpu_device_handle dev,
+ uint32_t flags)
+{
+ union drm_amdgpu_vm vm;
+
+ vm.in.op = AMDGPU_VM_OP_UNRESERVE_VMID;
+ vm.in.flags = flags;
+
+ return drmCommandWriteRead(dev->fd, DRM_AMDGPU_VM,
+ &vm, sizeof(vm));
+}
diff --git a/amdgpu/handle_table.c b/amdgpu/handle_table.c
new file mode 100644
index 0000000..4fdd29d
--- /dev/null
+++ b/amdgpu/handle_table.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include "handle_table.h"
+#include "util_math.h"
+
+drm_private int handle_table_insert(struct handle_table *table, uint32_t key,
+ void *value)
+{
+ if (key >= table->max_key) {
+ uint32_t alignment = sysconf(_SC_PAGESIZE) / sizeof(void*);
+ uint32_t max_key = ALIGN(key + 1, alignment);
+ void **values;
+
+ values = realloc(table->values, max_key * sizeof(void *));
+ if (!values)
+ return -ENOMEM;
+
+ memset(values + table->max_key, 0, (max_key - table->max_key) *
+ sizeof(void *));
+
+ table->max_key = max_key;
+ table->values = values;
+ }
+ table->values[key] = value;
+ return 0;
+}
+
+drm_private void handle_table_remove(struct handle_table *table, uint32_t key)
+{
+ if (key < table->max_key)
+ table->values[key] = NULL;
+}
+
+drm_private void *handle_table_lookup(struct handle_table *table, uint32_t key)
+{
+ if (key < table->max_key)
+ return table->values[key];
+ else
+ return NULL;
+}
+
+drm_private void handle_table_fini(struct handle_table *table)
+{
+ free(table->values);
+ table->max_key = 0;
+ table->values = NULL;
+}
diff --git a/amdgpu/handle_table.h b/amdgpu/handle_table.h
new file mode 100644
index 0000000..461193f
--- /dev/null
+++ b/amdgpu/handle_table.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _HANDLE_TABLE_H_
+#define _HANDLE_TABLE_H_
+
+#include <stdint.h>
+#include "libdrm_macros.h"
+
+struct handle_table {
+ uint32_t max_key;
+ void **values;
+};
+
+drm_private int handle_table_insert(struct handle_table *table, uint32_t key,
+ void *value);
+drm_private void handle_table_remove(struct handle_table *table, uint32_t key);
+drm_private void *handle_table_lookup(struct handle_table *table, uint32_t key);
+drm_private void handle_table_fini(struct handle_table *table);
+
+#endif /* _HANDLE_TABLE_H_ */
diff --git a/amdgpu/libdrm_amdgpu.pc.in b/amdgpu/libdrm_amdgpu.pc.in
new file mode 100644
index 0000000..f1c552a
--- /dev/null
+++ b/amdgpu/libdrm_amdgpu.pc.in
@@ -0,0 +1,11 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: libdrm_amdgpu
+Description: Userspace interface to kernel DRM services for amdgpu
+Version: @PACKAGE_VERSION@
+Libs: -L${libdir} -ldrm_amdgpu
+Cflags: -I${includedir} -I${includedir}/libdrm
+Requires.private: libdrm
diff --git a/amdgpu/meson.build b/amdgpu/meson.build
new file mode 100644
index 0000000..a1cca5a
--- /dev/null
+++ b/amdgpu/meson.build
@@ -0,0 +1,70 @@
+# Copyright © 2017-2018 Intel Corporation
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+
+datadir_amdgpu = join_paths(get_option('prefix'), get_option('datadir'), 'libdrm')
+
+libdrm_amdgpu = library(
+ 'drm_amdgpu',
+ [
+ files(
+ 'amdgpu_asic_id.c', 'amdgpu_bo.c', 'amdgpu_cs.c', 'amdgpu_device.c',
+ 'amdgpu_gpu_info.c', 'amdgpu_vamgr.c', 'amdgpu_vm.c', 'handle_table.c',
+ ),
+ config_file,
+ ],
+ c_args : [
+ libdrm_c_args,
+ '-DAMDGPU_ASIC_ID_TABLE="@0@"'.format(join_paths(datadir_amdgpu, 'amdgpu.ids')),
+ ],
+ include_directories : [inc_root, inc_drm],
+ link_with : libdrm,
+ dependencies : [dep_pthread_stubs, dep_atomic_ops, dep_rt],
+ version : '1.0.0',
+ install : true,
+)
+
+install_headers('amdgpu.h', subdir : 'libdrm')
+
+pkg.generate(
+ libdrm_amdgpu,
+ name : 'libdrm_amdgpu',
+ subdirs : ['.', 'libdrm'],
+ description : 'Userspace interface to kernel DRM services for amdgpu',
+)
+
+ext_libdrm_amdgpu = declare_dependency(
+ link_with : [libdrm, libdrm_amdgpu],
+ include_directories : [inc_drm, include_directories('.')],
+)
+
+if meson.version().version_compare('>= 0.54.0')
+ meson.override_dependency('libdrm_amdgpu', ext_libdrm_amdgpu)
+endif
+
+test(
+ 'amdgpu-symbols-check',
+ symbols_check,
+ args : [
+ '--lib', libdrm_amdgpu,
+ '--symbols-file', files('amdgpu-symbols.txt'),
+ '--nm', prog_nm.path(),
+ ],
+)