summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdkfd
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:57 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:57 +0000
commitdc50eab76b709d68175a358d6e23a5a3890764d3 (patch)
treec754d0390db060af0213ff994f0ac310e4cfd6e9 /drivers/gpu/drm/amd/amdkfd
parentAdding debian version 6.6.15-2. (diff)
downloadlinux-dc50eab76b709d68175a358d6e23a5a3890764d3.tar.xz
linux-dc50eab76b709d68175a358d6e23a5a3890764d3.zip
Merging upstream version 6.7.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/amd/amdkfd')
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c97
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c126
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c28
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c135
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c88
13 files changed, 357 insertions, 195 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index c37f1fcd21..f6d4748c19 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1021,7 +1021,7 @@ err_drm_file:
bool kfd_dev_is_large_bar(struct kfd_node *dev)
{
- if (debug_largebar) {
+ if (dev->kfd->adev->debug_largebar) {
pr_debug("Simulate large-bar allocation on non large-bar machine\n");
return true;
}
@@ -1432,17 +1432,21 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
goto sync_memory_failed;
}
}
- mutex_unlock(&p->mutex);
- if (flush_tlb) {
- /* Flush TLBs after waiting for the page table updates to complete */
- for (i = 0; i < args->n_devices; i++) {
- peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
- if (WARN_ON_ONCE(!peer_pdd))
- continue;
+ /* Flush TLBs after waiting for the page table updates to complete */
+ for (i = 0; i < args->n_devices; i++) {
+ peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
+ if (WARN_ON_ONCE(!peer_pdd))
+ continue;
+ if (flush_tlb)
kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
- }
+
+ /* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
+ amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
}
+
+ mutex_unlock(&p->mutex);
+
kfree(devices_arr);
return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index f76b7aee5c..cd8e459201 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1404,6 +1404,66 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
return i;
}
+static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
+ struct kfd_gpu_cache_info *pcache_info)
+{
+ struct amdgpu_device *adev = kdev->adev;
+ int i = 0;
+
+ /* TCP L1 Cache per CU */
+ if (adev->gfx.config.gc_tcp_size_per_cu) {
+ pcache_info[i].cache_size = adev->gfx.config.gc_tcp_size_per_cu;
+ pcache_info[i].cache_level = 1;
+ pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
+ CRAT_CACHE_FLAGS_DATA_CACHE |
+ CRAT_CACHE_FLAGS_SIMD_CACHE);
+ pcache_info[i].num_cu_shared = 1;
+ i++;
+ }
+ /* Scalar L1 Instruction Cache per SQC */
+ if (adev->gfx.config.gc_l1_instruction_cache_size_per_sqc) {
+ pcache_info[i].cache_size =
+ adev->gfx.config.gc_l1_instruction_cache_size_per_sqc;
+ pcache_info[i].cache_level = 1;
+ pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
+ CRAT_CACHE_FLAGS_INST_CACHE |
+ CRAT_CACHE_FLAGS_SIMD_CACHE);
+ pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_cu_per_sqc;
+ i++;
+ }
+ /* Scalar L1 Data Cache per SQC */
+ if (adev->gfx.config.gc_l1_data_cache_size_per_sqc) {
+ pcache_info[i].cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc;
+ pcache_info[i].cache_level = 1;
+ pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
+ CRAT_CACHE_FLAGS_DATA_CACHE |
+ CRAT_CACHE_FLAGS_SIMD_CACHE);
+ pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_cu_per_sqc;
+ i++;
+ }
+ /* L2 Data Cache per GPU (Total Tex Cache) */
+ if (adev->gfx.config.gc_tcc_size) {
+ pcache_info[i].cache_size = adev->gfx.config.gc_tcc_size;
+ pcache_info[i].cache_level = 2;
+ pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
+ CRAT_CACHE_FLAGS_DATA_CACHE |
+ CRAT_CACHE_FLAGS_SIMD_CACHE);
+ pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
+ i++;
+ }
+ /* L3 Data Cache per GPU */
+ if (adev->gmc.mall_size) {
+ pcache_info[i].cache_size = adev->gmc.mall_size / 1024;
+ pcache_info[i].cache_level = 3;
+ pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
+ CRAT_CACHE_FLAGS_DATA_CACHE |
+ CRAT_CACHE_FLAGS_SIMD_CACHE);
+ pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
+ i++;
+ }
+ return i;
+}
+
int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info)
{
int num_of_cache_types = 0;
@@ -1461,10 +1521,14 @@ int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pc
num_of_cache_types = ARRAY_SIZE(vega20_cache_info);
break;
case IP_VERSION(9, 4, 2):
- case IP_VERSION(9, 4, 3):
*pcache_info = aldebaran_cache_info;
num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info);
break;
+ case IP_VERSION(9, 4, 3):
+ num_of_cache_types =
+ kfd_fill_gpu_cache_info_from_gfx_config_v2(kdev->kfd,
+ *pcache_info);
+ break;
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 2):
*pcache_info = raven_cache_info;
@@ -1522,6 +1586,7 @@ int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pc
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
num_of_cache_types =
kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd, *pcache_info);
break;
@@ -2037,11 +2102,12 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
uint32_t proximity_domain)
{
struct crat_header *crat_table = (struct crat_header *)pcrat_image;
+ struct amdgpu_gfx_config *gfx_info = &kdev->adev->gfx.config;
+ struct amdgpu_cu_info *cu_info = &kdev->adev->gfx.cu_info;
struct crat_subtype_generic *sub_type_hdr;
struct kfd_local_mem_info local_mem_info;
struct kfd_topology_device *peer_dev;
struct crat_subtype_computeunit *cu;
- struct kfd_cu_info cu_info;
int avail_size = *size;
uint32_t total_num_of_cu;
uint32_t nid = 0;
@@ -2085,21 +2151,20 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
cu->proximity_domain = proximity_domain;
- amdgpu_amdkfd_get_cu_info(kdev->adev, &cu_info);
- cu->num_simd_per_cu = cu_info.simd_per_cu;
- cu->num_simd_cores = cu_info.simd_per_cu *
- (cu_info.cu_active_number / kdev->kfd->num_nodes);
- cu->max_waves_simd = cu_info.max_waves_per_simd;
+ cu->num_simd_per_cu = cu_info->simd_per_cu;
+ cu->num_simd_cores = cu_info->simd_per_cu *
+ (cu_info->number / kdev->kfd->num_nodes);
+ cu->max_waves_simd = cu_info->max_waves_per_simd;
- cu->wave_front_size = cu_info.wave_front_size;
- cu->array_count = cu_info.num_shader_arrays_per_engine *
- cu_info.num_shader_engines;
- total_num_of_cu = (cu->array_count * cu_info.num_cu_per_sh);
+ cu->wave_front_size = cu_info->wave_front_size;
+ cu->array_count = gfx_info->max_sh_per_se *
+ gfx_info->max_shader_engines;
+ total_num_of_cu = (cu->array_count * gfx_info->max_cu_per_sh);
cu->processor_id_low = get_and_inc_gpu_processor_id(total_num_of_cu);
- cu->num_cu_per_array = cu_info.num_cu_per_sh;
- cu->max_slots_scatch_cu = cu_info.max_scratch_slots_per_cu;
- cu->num_banks = cu_info.num_shader_engines;
- cu->lds_size_in_kb = cu_info.lds_size;
+ cu->num_cu_per_array = gfx_info->max_cu_per_sh;
+ cu->max_slots_scatch_cu = cu_info->max_scratch_slots_per_cu;
+ cu->num_banks = gfx_info->max_shader_engines;
+ cu->lds_size_in_kb = cu_info->lds_size;
cu->hsa_capability = 0;
@@ -2115,7 +2180,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
sub_type_hdr->length);
- if (debug_largebar)
+ if (kdev->adev->debug_largebar)
local_mem_info.local_mem_size_private = 0;
if (local_mem_info.local_mem_size_private == 0)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 93ce181eb3..0a9cf9dfc2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -65,7 +65,7 @@ static int kfd_resume(struct kfd_node *kfd);
static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
{
- uint32_t sdma_version = kfd->adev->ip_versions[SDMA0_HWIP][0];
+ uint32_t sdma_version = amdgpu_ip_version(kfd->adev, SDMA0_HWIP, 0);
switch (sdma_version) {
case IP_VERSION(4, 0, 0):/* VEGA10 */
@@ -95,6 +95,7 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
case IP_VERSION(6, 0, 3):
+ case IP_VERSION(6, 1, 0):
kfd->device_info.num_sdma_queues_per_engine = 8;
break;
default:
@@ -111,6 +112,7 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
case IP_VERSION(6, 0, 3):
+ case IP_VERSION(6, 1, 0):
/* Reserve 1 for paging and 1 for gfx */
kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
/* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */
@@ -162,6 +164,7 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
kfd->device_info.event_interrupt_class = &event_interrupt_class_v11;
break;
default:
@@ -279,7 +282,7 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &gfx_v8_kfd2kgd;
break;
default:
- switch (adev->ip_versions[GC_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
/* Vega 10 */
case IP_VERSION(9, 0, 1):
gfx_target_version = 90000;
@@ -413,6 +416,10 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
gfx_target_version = 110001;
f2g = &gfx_v11_kfd2kgd;
break;
+ case IP_VERSION(11, 5, 0):
+ gfx_target_version = 110500;
+ f2g = &gfx_v11_kfd2kgd;
+ break;
default:
break;
}
@@ -420,9 +427,11 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
}
if (!f2g) {
- if (adev->ip_versions[GC_HWIP][0])
- dev_err(kfd_device, "GC IP %06x %s not supported in kfd\n",
- adev->ip_versions[GC_HWIP][0], vf ? "VF" : "");
+ if (amdgpu_ip_version(adev, GC_HWIP, 0))
+ dev_err(kfd_device,
+ "GC IP %06x %s not supported in kfd\n",
+ amdgpu_ip_version(adev, GC_HWIP, 0),
+ vf ? "VF" : "");
else
dev_err(kfd_device, "%s %s not supported in kfd\n",
amdgpu_asic_name[adev->asic_type], vf ? "VF" : "");
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index e07652e724..c0e7154338 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -92,7 +92,7 @@ static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
{
return bitmap_weight(dqm->dev->kfd->shared_resources.cp_queue_bitmap,
- KGD_MAX_QUEUES);
+ AMDGPU_MAX_QUEUES);
}
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
@@ -227,13 +227,15 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
queue_input.tba_addr = qpd->tba_addr;
queue_input.tma_addr = qpd->tma_addr;
queue_input.trap_en = !kfd_dbg_has_cwsr_workaround(q->device);
- queue_input.skip_process_ctx_clear = qpd->pqm->process->debug_trap_enabled ||
- kfd_dbg_has_ttmps_always_setup(q->device);
+ queue_input.skip_process_ctx_clear =
+ qpd->pqm->process->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED &&
+ (qpd->pqm->process->debug_trap_enabled ||
+ kfd_dbg_has_ttmps_always_setup(q->device));
queue_type = convert_to_mes_queue_type(q->properties.type);
if (queue_type < 0) {
- pr_err("Queue type not supported with MES, queue:%d\n",
- q->properties.type);
+ dev_err(adev->dev, "Queue type not supported with MES, queue:%d\n",
+ q->properties.type);
return -EINVAL;
}
queue_input.queue_type = (uint32_t)queue_type;
@@ -244,9 +246,9 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
amdgpu_mes_unlock(&adev->mes);
if (r) {
- pr_err("failed to add hardware queue to MES, doorbell=0x%x\n",
+ dev_err(adev->dev, "failed to add hardware queue to MES, doorbell=0x%x\n",
q->properties.doorbell_off);
- pr_err("MES might be in unrecoverable state, issue a GPU reset\n");
+ dev_err(adev->dev, "MES might be in unrecoverable state, issue a GPU reset\n");
kfd_hws_hang(dqm);
}
@@ -272,9 +274,9 @@ static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q,
amdgpu_mes_unlock(&adev->mes);
if (r) {
- pr_err("failed to remove hardware queue from MES, doorbell=0x%x\n",
+ dev_err(adev->dev, "failed to remove hardware queue from MES, doorbell=0x%x\n",
q->properties.doorbell_off);
- pr_err("MES might be in unrecoverable state, issue a GPU reset\n");
+ dev_err(adev->dev, "MES might be in unrecoverable state, issue a GPU reset\n");
kfd_hws_hang(dqm);
}
@@ -284,6 +286,7 @@ static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q,
static int remove_all_queues_mes(struct device_queue_manager *dqm)
{
struct device_process_node *cur;
+ struct device *dev = dqm->dev->adev->dev;
struct qcm_process_device *qpd;
struct queue *q;
int retval = 0;
@@ -294,7 +297,7 @@ static int remove_all_queues_mes(struct device_queue_manager *dqm)
if (q->properties.is_active) {
retval = remove_queue_mes(dqm, q, qpd);
if (retval) {
- pr_err("%s: Failed to remove queue %d for dev %d",
+ dev_err(dev, "%s: Failed to remove queue %d for dev %d",
__func__,
q->properties.queue_id,
dqm->dev->id);
@@ -444,6 +447,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
+ struct device *dev = dqm->dev->adev->dev;
int allocated_vmid = -1, i;
for (i = dqm->dev->vm_info.first_vmid_kfd;
@@ -455,7 +459,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
}
if (allocated_vmid < 0) {
- pr_err("no more vmid to allocate\n");
+ dev_err(dev, "no more vmid to allocate\n");
return -ENOSPC;
}
@@ -511,10 +515,12 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
+ struct device *dev = dqm->dev->adev->dev;
+
/* On GFX v7, CP doesn't flush TC at dequeue */
if (q->device->adev->asic_type == CHIP_HAWAII)
if (flush_texture_cache_nocpsch(q->device, qpd))
- pr_err("Failed to flush TC\n");
+ dev_err(dev, "Failed to flush TC\n");
kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
@@ -709,7 +715,7 @@ static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process
pr_debug("Killing all process wavefronts\n");
if (!dev->kfd2kgd->get_atc_vmid_pasid_mapping_info) {
- pr_err("no vmid pasid mapping supported \n");
+ dev_err(dev->adev->dev, "no vmid pasid mapping supported\n");
return -EOPNOTSUPP;
}
@@ -730,7 +736,7 @@ static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process
}
if (vmid > last_vmid_to_scan) {
- pr_err("Didn't find vmid for pasid 0x%x\n", p->pasid);
+ dev_err(dev->adev->dev, "Didn't find vmid for pasid 0x%x\n", p->pasid);
return -EFAULT;
}
@@ -822,6 +828,7 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
{
int retval;
uint64_t sdma_val = 0;
+ struct device *dev = dqm->dev->adev->dev;
struct kfd_process_device *pdd = qpd_to_pdd(qpd);
struct mqd_manager *mqd_mgr =
dqm->mqd_mgrs[get_mqd_type_from_queue_type(q->properties.type)];
@@ -832,7 +839,7 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
&sdma_val);
if (retval)
- pr_err("Failed to read SDMA queue counter for queue: %d\n",
+ dev_err(dev, "Failed to read SDMA queue counter for queue: %d\n",
q->properties.queue_id);
}
@@ -851,6 +858,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q,
struct mqd_update_info *minfo)
{
int retval = 0;
+ struct device *dev = dqm->dev->adev->dev;
struct mqd_manager *mqd_mgr;
struct kfd_process_device *pdd;
bool prev_active = false;
@@ -876,7 +884,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q,
retval = remove_queue_mes(dqm, q, &pdd->qpd);
if (retval) {
- pr_err("unmap queue failed\n");
+ dev_err(dev, "unmap queue failed\n");
goto out_unlock;
}
} else if (prev_active &&
@@ -895,7 +903,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q,
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
if (retval) {
- pr_err("destroy mqd failed\n");
+ dev_err(dev, "destroy mqd failed\n");
goto out_unlock;
}
}
@@ -1089,6 +1097,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct queue *q;
+ struct device *dev = dqm->dev->adev->dev;
struct kfd_process_device *pdd;
int retval = 0;
@@ -1122,7 +1131,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
if (dqm->dev->kfd->shared_resources.enable_mes) {
retval = remove_queue_mes(dqm, q, qpd);
if (retval) {
- pr_err("Failed to evict queue %d\n",
+ dev_err(dev, "Failed to evict queue %d\n",
q->properties.queue_id);
goto out;
}
@@ -1226,6 +1235,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct queue *q;
+ struct device *dev = dqm->dev->adev->dev;
struct kfd_process_device *pdd;
uint64_t eviction_duration;
int retval = 0;
@@ -1266,7 +1276,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
if (dqm->dev->kfd->shared_resources.enable_mes) {
retval = add_queue_mes(dqm, q, qpd);
if (retval) {
- pr_err("Failed to restore queue %d\n",
+ dev_err(dev, "Failed to restore queue %d\n",
q->properties.queue_id);
goto out;
}
@@ -1475,18 +1485,19 @@ static void pre_reset(struct device_queue_manager *dqm)
static int allocate_sdma_queue(struct device_queue_manager *dqm,
struct queue *q, const uint32_t *restore_sdma_id)
{
+ struct device *dev = dqm->dev->adev->dev;
int bit;
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
if (bitmap_empty(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
- pr_err("No more SDMA queue to allocate\n");
+ dev_err(dev, "No more SDMA queue to allocate\n");
return -ENOMEM;
}
if (restore_sdma_id) {
/* Re-use existing sdma_id */
if (!test_bit(*restore_sdma_id, dqm->sdma_bitmap)) {
- pr_err("SDMA queue already in use\n");
+ dev_err(dev, "SDMA queue already in use\n");
return -EBUSY;
}
clear_bit(*restore_sdma_id, dqm->sdma_bitmap);
@@ -1505,13 +1516,13 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
kfd_get_num_sdma_engines(dqm->dev);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
if (bitmap_empty(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
- pr_err("No more XGMI SDMA queue to allocate\n");
+ dev_err(dev, "No more XGMI SDMA queue to allocate\n");
return -ENOMEM;
}
if (restore_sdma_id) {
/* Re-use existing sdma_id */
if (!test_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap)) {
- pr_err("SDMA queue already in use\n");
+ dev_err(dev, "SDMA queue already in use\n");
return -EBUSY;
}
clear_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap);
@@ -1563,11 +1574,12 @@ static int set_sched_resources(struct device_queue_manager *dqm)
{
int i, mec;
struct scheduling_resources res;
+ struct device *dev = dqm->dev->adev->dev;
res.vmid_mask = dqm->dev->compute_vmid_bitmap;
res.queue_mask = 0;
- for (i = 0; i < KGD_MAX_QUEUES; ++i) {
+ for (i = 0; i < AMDGPU_MAX_QUEUES; ++i) {
mec = (i / dqm->dev->kfd->shared_resources.num_queue_per_pipe)
/ dqm->dev->kfd->shared_resources.num_pipe_per_mec;
@@ -1583,7 +1595,7 @@ static int set_sched_resources(struct device_queue_manager *dqm)
* definition of res.queue_mask needs updating
*/
if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
- pr_err("Invalid queue enabled by amdgpu: %d\n", i);
+ dev_err(dev, "Invalid queue enabled by amdgpu: %d\n", i);
break;
}
@@ -1626,6 +1638,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
static int start_cpsch(struct device_queue_manager *dqm)
{
+ struct device *dev = dqm->dev->adev->dev;
int retval;
retval = 0;
@@ -1672,7 +1685,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
retval = pm_update_grace_period(&dqm->packet_mgr,
grace_period);
if (retval)
- pr_err("Setting grace timeout failed\n");
+ dev_err(dev, "Setting grace timeout failed\n");
else if (dqm->dev->kfd2kgd->build_grace_period_packet_info)
/* Update dqm->wait_times maintained in software */
dqm->dev->kfd2kgd->build_grace_period_packet_info(
@@ -1881,15 +1894,17 @@ out:
return retval;
}
-int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
- uint64_t fence_value,
- unsigned int timeout_ms)
+int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm,
+ uint64_t fence_value,
+ unsigned int timeout_ms)
{
unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
+ struct device *dev = dqm->dev->adev->dev;
+ uint64_t *fence_addr = dqm->fence_addr;
while (*fence_addr != fence_value) {
if (time_after(jiffies, end_jiffies)) {
- pr_err("qcm fence wait loop timeout expired\n");
+ dev_err(dev, "qcm fence wait loop timeout expired\n");
/* In HWS case, this is used to halt the driver thread
* in order not to mess up CP states before doing
* scandumps for FW debugging.
@@ -1908,6 +1923,7 @@ int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
/* dqm->lock mutex has to be locked before calling this function */
static int map_queues_cpsch(struct device_queue_manager *dqm)
{
+ struct device *dev = dqm->dev->adev->dev;
int retval;
if (!dqm->sched_running)
@@ -1920,7 +1936,7 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
retval = pm_send_runlist(&dqm->packet_mgr, &dqm->queues);
pr_debug("%s sent runlist\n", __func__);
if (retval) {
- pr_err("failed to execute runlist\n");
+ dev_err(dev, "failed to execute runlist\n");
return retval;
}
dqm->active_runlist = true;
@@ -1935,8 +1951,9 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
uint32_t grace_period,
bool reset)
{
- int retval = 0;
+ struct device *dev = dqm->dev->adev->dev;
struct mqd_manager *mqd_mgr;
+ int retval = 0;
if (!dqm->sched_running)
return 0;
@@ -1959,10 +1976,10 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
pm_send_query_status(&dqm->packet_mgr, dqm->fence_gpu_addr,
KFD_FENCE_COMPLETED);
/* should be timed out */
- retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
- queue_preemption_timeout_ms);
+ retval = amdkfd_fence_wait_timeout(dqm, KFD_FENCE_COMPLETED,
+ queue_preemption_timeout_ms);
if (retval) {
- pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
+ dev_err(dev, "The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
kfd_hws_hang(dqm);
return retval;
}
@@ -1977,7 +1994,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
*/
mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
if (mqd_mgr->read_doorbell_id(dqm->packet_mgr.priv_queue->queue->mqd)) {
- pr_err("HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n");
+ dev_err(dev, "HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n");
while (halt_if_hws_hang)
schedule();
return -ETIME;
@@ -1987,7 +2004,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
if (grace_period != USE_DEFAULT_GRACE_PERIOD) {
if (pm_update_grace_period(&dqm->packet_mgr,
USE_DEFAULT_GRACE_PERIOD))
- pr_err("Failed to reset grace period\n");
+ dev_err(dev, "Failed to reset grace period\n");
}
pm_release_ib(&dqm->packet_mgr);
@@ -2061,6 +2078,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
struct mqd_manager *mqd_mgr;
uint64_t sdma_val = 0;
struct kfd_process_device *pdd = qpd_to_pdd(qpd);
+ struct device *dev = dqm->dev->adev->dev;
/* Get the SDMA queue stats */
if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
@@ -2068,7 +2086,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
&sdma_val);
if (retval)
- pr_err("Failed to read SDMA queue counter for queue: %d\n",
+ dev_err(dev, "Failed to read SDMA queue counter for queue: %d\n",
q->properties.queue_id);
}
@@ -2349,6 +2367,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
{
int retval;
struct queue *q;
+ struct device *dev = dqm->dev->adev->dev;
struct kernel_queue *kq, *kq_next;
struct mqd_manager *mqd_mgr;
struct device_process_node *cur, *next_dpn;
@@ -2382,7 +2401,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
if (dqm->dev->kfd->shared_resources.enable_mes) {
retval = remove_queue_mes(dqm, q, qpd);
if (retval)
- pr_err("Failed to remove queue %d\n",
+ dev_err(dev, "Failed to remove queue %d\n",
q->properties.queue_id);
}
}
@@ -2437,12 +2456,13 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
static int init_mqd_managers(struct device_queue_manager *dqm)
{
int i, j;
+ struct device *dev = dqm->dev->adev->dev;
struct mqd_manager *mqd_mgr;
for (i = 0; i < KFD_MQD_TYPE_MAX; i++) {
mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev);
if (!mqd_mgr) {
- pr_err("mqd manager [%d] initialization failed\n", i);
+ dev_err(dev, "mqd manager [%d] initialization failed\n", i);
goto out_free;
}
dqm->mqd_mgrs[i] = mqd_mgr;
@@ -2552,7 +2572,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
dqm->ops.checkpoint_mqd = checkpoint_mqd;
break;
default:
- pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
+ dev_err(dev->adev->dev, "Invalid scheduling policy %d\n", dqm->sched_policy);
goto out_free;
}
@@ -2590,7 +2610,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
goto out_free;
if (!dev->kfd->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) {
- pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
+ dev_err(dev->adev->dev, "Failed to allocate hiq sdma mqd trunk buffer\n");
goto out_free;
}
@@ -2649,17 +2669,18 @@ int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
int r;
+ struct device *dev = dqm->dev->adev->dev;
int updated_vmid_mask;
if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
- pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
+ dev_err(dev, "Unsupported on sched_policy: %i\n", dqm->sched_policy);
return -EINVAL;
}
dqm_lock(dqm);
if (dqm->trap_debug_vmid != 0) {
- pr_err("Trap debug id already reserved\n");
+ dev_err(dev, "Trap debug id already reserved\n");
r = -EBUSY;
goto out_unlock;
}
@@ -2695,19 +2716,20 @@ out_unlock:
int release_debug_trap_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
+ struct device *dev = dqm->dev->adev->dev;
int r;
int updated_vmid_mask;
uint32_t trap_debug_vmid;
if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
- pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
+ dev_err(dev, "Unsupported on sched_policy: %i\n", dqm->sched_policy);
return -EINVAL;
}
dqm_lock(dqm);
trap_debug_vmid = dqm->trap_debug_vmid;
if (dqm->trap_debug_vmid == 0) {
- pr_err("Trap debug id is not reserved\n");
+ dev_err(dev, "Trap debug id is not reserved\n");
r = -EINVAL;
goto out_unlock;
}
@@ -2844,6 +2866,7 @@ int resume_queues(struct kfd_process *p,
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
struct device_queue_manager *dqm = pdd->dev->dqm;
+ struct device *dev = dqm->dev->adev->dev;
struct qcm_process_device *qpd = &pdd->qpd;
struct queue *q;
int r, per_device_resumed = 0;
@@ -2894,7 +2917,7 @@ int resume_queues(struct kfd_process *p,
0,
USE_DEFAULT_GRACE_PERIOD);
if (r) {
- pr_err("Failed to resume process queues\n");
+ dev_err(dev, "Failed to resume process queues\n");
if (queue_ids) {
list_for_each_entry(q, &qpd->queues_list, list) {
int q_idx = q_array_get_index(
@@ -2946,6 +2969,7 @@ int suspend_queues(struct kfd_process *p,
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
struct device_queue_manager *dqm = pdd->dev->dqm;
+ struct device *dev = dqm->dev->adev->dev;
struct qcm_process_device *qpd = &pdd->qpd;
struct queue *q;
int r, per_device_suspended = 0;
@@ -2994,7 +3018,7 @@ int suspend_queues(struct kfd_process *p,
grace_period);
if (r)
- pr_err("Failed to suspend process queues.\n");
+ dev_err(dev, "Failed to suspend process queues.\n");
else
total_suspended += per_device_suspended;
@@ -3081,10 +3105,11 @@ void set_queue_snapshot_entry(struct queue *q,
int debug_lock_and_unmap(struct device_queue_manager *dqm)
{
+ struct device *dev = dqm->dev->adev->dev;
int r;
if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
- pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
+ dev_err(dev, "Unsupported on sched_policy: %i\n", dqm->sched_policy);
return -EINVAL;
}
@@ -3102,10 +3127,11 @@ int debug_lock_and_unmap(struct device_queue_manager *dqm)
int debug_map_and_unlock(struct device_queue_manager *dqm)
{
+ struct device *dev = dqm->dev->adev->dev;
int r;
if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
- pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
+ dev_err(dev, "Unsupported on sched_policy: %i\n", dqm->sched_policy);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 659313648b..b8680e0753 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -460,7 +460,7 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
0, node->id, trigger);
- svm_range_dma_unmap(adev->dev, scratch, 0, npages);
+ svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
out_free:
kvfree(buf);
@@ -544,7 +544,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
if (cpages) {
prange->actual_loc = best_loc;
- svm_range_free_dma_mappings(prange, true);
+ svm_range_dma_unmap(prange);
} else {
svm_range_vram_node_free(prange);
}
@@ -745,7 +745,7 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
node->id, 0, trigger);
- svm_range_dma_unmap(adev->dev, scratch, 0, npages);
+ svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
out_free:
kvfree(buf);
@@ -1001,7 +1001,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
void *r;
/* Page migration works on gfx9 or newer */
- if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 1))
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1))
return -EINVAL;
if (adev->gmc.is_app_apu)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index 447829c222..050a6936ff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -99,7 +99,8 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
const uint32_t *cu_mask, uint32_t cu_mask_count,
uint32_t *se_mask, uint32_t inst)
{
- struct kfd_cu_info cu_info;
+ struct amdgpu_cu_info *cu_info = &mm->dev->adev->gfx.cu_info;
+ struct amdgpu_gfx_config *gfx_info = &mm->dev->adev->gfx.config;
uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0};
bool wgp_mode_req = KFD_GC_VERSION(mm->dev) >= IP_VERSION(10, 0, 0);
uint32_t en_mask = wgp_mode_req ? 0x3 : 0x1;
@@ -108,9 +109,7 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
int inc = cu_inc * NUM_XCC(mm->dev->xcc_mask);
int xcc_inst = inst + ffs(mm->dev->xcc_mask) - 1;
- amdgpu_amdkfd_get_cu_info(mm->dev->adev, &cu_info);
-
- cu_active_per_node = cu_info.cu_active_number / mm->dev->kfd->num_nodes;
+ cu_active_per_node = cu_info->number / mm->dev->kfd->num_nodes;
if (cu_mask_count > cu_active_per_node)
cu_mask_count = cu_active_per_node;
@@ -118,13 +117,14 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
* Returning with no CU's enabled will hang the queue, which should be
* attention grabbing.
*/
- if (cu_info.num_shader_engines > KFD_MAX_NUM_SE) {
- pr_err("Exceeded KFD_MAX_NUM_SE, chip reports %d\n", cu_info.num_shader_engines);
+ if (gfx_info->max_shader_engines > KFD_MAX_NUM_SE) {
+ pr_err("Exceeded KFD_MAX_NUM_SE, chip reports %d\n",
+ gfx_info->max_shader_engines);
return;
}
- if (cu_info.num_shader_arrays_per_engine > KFD_MAX_NUM_SH_PER_SE) {
+ if (gfx_info->max_sh_per_se > KFD_MAX_NUM_SH_PER_SE) {
pr_err("Exceeded KFD_MAX_NUM_SH, chip reports %d\n",
- cu_info.num_shader_arrays_per_engine * cu_info.num_shader_engines);
+ gfx_info->max_sh_per_se * gfx_info->max_shader_engines);
return;
}
@@ -142,10 +142,10 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
* See note on Arcturus cu_bitmap layout in gfx_v9_0_get_cu_info.
* See note on GFX11 cu_bitmap layout in gfx_v11_0_get_cu_info.
*/
- for (se = 0; se < cu_info.num_shader_engines; se++)
- for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++)
+ for (se = 0; se < gfx_info->max_shader_engines; se++)
+ for (sh = 0; sh < gfx_info->max_sh_per_se; sh++)
cu_per_sh[se][sh] = hweight32(
- cu_info.cu_bitmap[xcc_inst][se % 4][sh + (se / 4) *
+ cu_info->bitmap[xcc_inst][se % 4][sh + (se / 4) *
cu_bitmap_sh_mul]);
/* Symmetrically map cu_mask to all SEs & SHs:
@@ -184,13 +184,13 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
*
* First ensure all CUs are disabled, then enable user specified CUs.
*/
- for (i = 0; i < cu_info.num_shader_engines; i++)
+ for (i = 0; i < gfx_info->max_shader_engines; i++)
se_mask[i] = 0;
i = inst;
for (cu = 0; cu < 16; cu += cu_inc) {
- for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) {
- for (se = 0; se < cu_info.num_shader_engines; se++) {
+ for (sh = 0; sh < gfx_info->max_sh_per_se; sh++) {
+ for (se = 0; se < gfx_info->max_shader_engines; se++) {
if (cu_per_sh[se][sh] > cu) {
if (cu_mask[i / 32] & (en_mask << (i % 32)))
se_mask[se] |= en_mask << (cu + sh * 16);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index 1a03173e23..8ee2bedd30 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -205,7 +205,8 @@ static int pm_set_resources_v9(struct packet_manager *pm, uint32_t *buffer,
static inline bool pm_use_ext_eng(struct kfd_dev *dev)
{
- return dev->adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 2, 0);
+ return amdgpu_ip_version(dev->adev, SDMA0_HWIP, 0) >=
+ IP_VERSION(5, 2, 0);
}
static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 3287a39613..71445ab63b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -202,7 +202,7 @@ enum cache_policy {
cache_policy_noncoherent
};
-#define KFD_GC_VERSION(dev) ((dev)->adev->ip_versions[GC_HWIP][0])
+#define KFD_GC_VERSION(dev) (amdgpu_ip_version((dev)->adev, GC_HWIP, 0))
#define KFD_IS_SOC15(dev) ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1)))
#define KFD_SUPPORT_XNACK_PER_PROCESS(dev)\
((KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) || \
@@ -1343,7 +1343,7 @@ int pqm_get_queue_snapshot(struct process_queue_manager *pqm,
int *num_qss_entries,
uint32_t *entry_size);
-int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
+int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm,
uint64_t fence_value,
unsigned int timeout_ms);
@@ -1482,10 +1482,15 @@ void kfd_dec_compute_active(struct kfd_node *dev);
/* Cgroup Support */
/* Check with device cgroup if @kfd device is accessible */
-static inline int kfd_devcgroup_check_permission(struct kfd_node *kfd)
+static inline int kfd_devcgroup_check_permission(struct kfd_node *node)
{
#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
- struct drm_device *ddev = adev_to_drm(kfd->adev);
+ struct drm_device *ddev;
+
+ if (node->xcp)
+ ddev = node->xcp->ddev;
+ else
+ ddev = adev_to_drm(node->adev);
return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR,
ddev->render->index,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index fbf053001a..7a33e06f5c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -1416,8 +1416,13 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
* per-process XNACK mode selection. But let the dev->noretry
* setting still influence the default XNACK mode.
*/
- if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev))
+ if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev)) {
+ if (!amdgpu_sriov_xnack_support(dev->kfd->adev)) {
+ pr_debug("SRIOV platform xnack not supported\n");
+ return false;
+ }
continue;
+ }
/* GFXv10 and later GPUs do not support shader preemption
* during page faults. This can lead to poor QoS for queue
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 77f493262e..43eff221ea 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -87,6 +87,8 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
return;
dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
+ if (dev->kfd->shared_resources.enable_mes)
+ amdgpu_mes_flush_shader_debugger(dev->adev, pdd->proc_ctx_gpu_addr);
pdd->already_dequeued = true;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 8e368e4659..9af1d09438 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -231,7 +231,7 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
return r;
}
-void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
+void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr,
unsigned long offset, unsigned long npages)
{
enum dma_data_direction dir = DMA_BIDIRECTIONAL;
@@ -249,7 +249,7 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
}
}
-void svm_range_free_dma_mappings(struct svm_range *prange, bool unmap_dma)
+void svm_range_dma_unmap(struct svm_range *prange)
{
struct kfd_process_device *pdd;
dma_addr_t *dma_addr;
@@ -270,10 +270,8 @@ void svm_range_free_dma_mappings(struct svm_range *prange, bool unmap_dma)
continue;
}
dev = &pdd->dev->adev->pdev->dev;
- if (unmap_dma)
- svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
- kvfree(dma_addr);
- prange->dma_addr[gpuidx] = NULL;
+
+ svm_range_dma_unmap_dev(dev, dma_addr, 0, prange->npages);
}
}
@@ -281,18 +279,29 @@ static void svm_range_free(struct svm_range *prange, bool do_unmap)
{
uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
+ uint32_t gpuidx;
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
prange->start, prange->last);
svm_range_vram_node_free(prange);
- svm_range_free_dma_mappings(prange, do_unmap);
+ if (do_unmap)
+ svm_range_dma_unmap(prange);
if (do_unmap && !p->xnack_enabled) {
pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
}
+
+ /* free dma_addr array for each gpu */
+ for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
+ if (prange->dma_addr[gpuidx]) {
+ kvfree(prange->dma_addr[gpuidx]);
+ prange->dma_addr[gpuidx] = NULL;
+ }
+ }
+
mutex_destroy(&prange->lock);
mutex_destroy(&prange->migrate_mutex);
kfree(prange);
@@ -391,14 +400,9 @@ static void svm_range_bo_release(struct kref *kref)
spin_lock(&svm_bo->list_lock);
}
spin_unlock(&svm_bo->list_lock);
- if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) {
- /* We're not in the eviction worker.
- * Signal the fence and synchronize with any
- * pending eviction work.
- */
+ if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
+ /* We're not in the eviction worker. Signal the fence. */
dma_fence_signal(&svm_bo->eviction_fence->base);
- cancel_work_sync(&svm_bo->eviction_work);
- }
dma_fence_put(&svm_bo->eviction_fence->base);
amdgpu_bo_unref(&svm_bo->bo);
kfree(svm_bo);
@@ -1085,26 +1089,32 @@ svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
}
static int
-svm_range_split_tail(struct svm_range *prange,
- uint64_t new_last, struct list_head *insert_list)
+svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
+ struct list_head *insert_list, struct list_head *remap_list)
{
struct svm_range *tail;
int r = svm_range_split(prange, prange->start, new_last, &tail);
- if (!r)
+ if (!r) {
list_add(&tail->list, insert_list);
+ if (!IS_ALIGNED(new_last + 1, 1UL << prange->granularity))
+ list_add(&tail->update_list, remap_list);
+ }
return r;
}
static int
-svm_range_split_head(struct svm_range *prange,
- uint64_t new_start, struct list_head *insert_list)
+svm_range_split_head(struct svm_range *prange, uint64_t new_start,
+ struct list_head *insert_list, struct list_head *remap_list)
{
struct svm_range *head;
int r = svm_range_split(prange, new_start, prange->last, &head);
- if (!r)
+ if (!r) {
list_add(&head->list, insert_list);
+ if (!IS_ALIGNED(new_start, 1UL << prange->granularity))
+ list_add(&head->update_list, remap_list);
+ }
return r;
}
@@ -1196,14 +1206,15 @@ svm_range_get_pte_flags(struct kfd_node *node,
uint32_t mapping_flags = 0;
uint64_t pte_flags;
bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
- bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT;
+ bool coherent = flags & (KFD_IOCTL_SVM_FLAG_COHERENT | KFD_IOCTL_SVM_FLAG_EXT_COHERENT);
+ bool ext_coherent = flags & KFD_IOCTL_SVM_FLAG_EXT_COHERENT;
bool uncached = false; /*flags & KFD_IOCTL_SVM_FLAG_UNCACHED;*/
unsigned int mtype_local;
if (domain == SVM_RANGE_VRAM_DOMAIN)
bo_node = prange->svm_bo->node;
- switch (node->adev->ip_versions[GC_HWIP][0]) {
+ switch (amdgpu_ip_version(node->adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 1):
if (domain == SVM_RANGE_VRAM_DOMAIN) {
if (bo_node == node) {
@@ -1239,8 +1250,11 @@ svm_range_get_pte_flags(struct kfd_node *node,
}
break;
case IP_VERSION(9, 4, 3):
- mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
- (amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW);
+ if (ext_coherent)
+ mtype_local = node->adev->rev_id ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_UC;
+ else
+ mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
+ amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
snoop = true;
if (uncached) {
mapping_flags |= AMDGPU_VM_MTYPE_UC;
@@ -1249,10 +1263,12 @@ svm_range_get_pte_flags(struct kfd_node *node,
if (bo_node->adev == node->adev &&
(!bo_node->xcp || !node->xcp || bo_node->xcp->mem_id == node->xcp->mem_id))
mapping_flags |= mtype_local;
- /* local HBM region far from partition or remote XGMI GPU */
- else if (svm_nodes_in_same_hive(bo_node, node))
+ /* local HBM region far from partition or remote XGMI GPU
+ * with regular system scope coherence
+ */
+ else if (svm_nodes_in_same_hive(bo_node, node) && !ext_coherent)
mapping_flags |= AMDGPU_VM_MTYPE_NC;
- /* PCIe P2P */
+ /* PCIe P2P or extended system scope coherence */
else
mapping_flags |= AMDGPU_VM_MTYPE_UC;
/* system memory accessed by the APU */
@@ -1263,7 +1279,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
if (num_possible_nodes() <= 1)
mapping_flags |= mtype_local;
else
- mapping_flags |= AMDGPU_VM_MTYPE_NC;
+ mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
/* system memory accessed by the dGPU */
} else {
mapping_flags |= AMDGPU_VM_MTYPE_UC;
@@ -1298,7 +1314,7 @@ svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
pr_debug("[0x%llx 0x%llx]\n", start, last);
- return amdgpu_vm_update_range(adev, vm, false, true, true, NULL, start,
+ return amdgpu_vm_update_range(adev, vm, false, true, true, false, NULL, start,
last, init_pte_value, 0, 0, NULL, NULL,
fence);
}
@@ -1405,8 +1421,8 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
* different memory partition based on fpfn/lpfn, we should use
* same vm_manager.vram_base_offset regardless memory partition.
*/
- r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL,
- last_start, prange->start + i,
+ r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, true,
+ NULL, last_start, prange->start + i,
pte_flags,
(last_start - prange->start) << PAGE_SHIFT,
bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
@@ -2051,6 +2067,7 @@ svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last,
* @update_list: output, the ranges need validate and update GPU mapping
* @insert_list: output, the ranges need insert to svms
* @remove_list: output, the ranges are replaced and need remove from svms
+ * @remap_list: output, remap unaligned svm ranges
*
* Check if the virtual address range has overlap with any existing ranges,
* split partly overlapping ranges and add new ranges in the gaps. All changes
@@ -2074,7 +2091,7 @@ static int
svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
struct list_head *update_list, struct list_head *insert_list,
- struct list_head *remove_list)
+ struct list_head *remove_list, struct list_head *remap_list)
{
unsigned long last = start + size - 1UL;
struct svm_range_list *svms = &p->svms;
@@ -2090,6 +2107,7 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
INIT_LIST_HEAD(insert_list);
INIT_LIST_HEAD(remove_list);
INIT_LIST_HEAD(&new_list);
+ INIT_LIST_HEAD(remap_list);
node = interval_tree_iter_first(&svms->objects, start, last);
while (node) {
@@ -2126,14 +2144,14 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
if (node->start < start) {
pr_debug("change old range start\n");
r = svm_range_split_head(prange, start,
- insert_list);
+ insert_list, remap_list);
if (r)
goto out;
}
if (node->last > last) {
pr_debug("change old range last\n");
r = svm_range_split_tail(prange, last,
- insert_list);
+ insert_list, remap_list);
if (r)
goto out;
}
@@ -2348,8 +2366,10 @@ retry:
mutex_unlock(&svms->lock);
mmap_write_unlock(mm);
- /* Pairs with mmget in svm_range_add_list_work */
- mmput(mm);
+ /* Pairs with mmget in svm_range_add_list_work. If dropping the
+ * last mm refcount, schedule release work to avoid circular locking
+ */
+ mmput_async(mm);
spin_lock(&svms->deferred_list_lock);
}
@@ -2660,6 +2680,7 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
{
struct vm_area_struct *vma;
struct interval_tree_node *node;
+ struct rb_node *rb_node;
unsigned long start_limit, end_limit;
vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
@@ -2679,16 +2700,15 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
if (node) {
end_limit = min(end_limit, node->start);
/* Last range that ends before the fault address */
- node = container_of(rb_prev(&node->rb),
- struct interval_tree_node, rb);
+ rb_node = rb_prev(&node->rb);
} else {
/* Last range must end before addr because
* there was no range after addr
*/
- node = container_of(rb_last(&p->svms.objects.rb_root),
- struct interval_tree_node, rb);
+ rb_node = rb_last(&p->svms.objects.rb_root);
}
- if (node) {
+ if (rb_node) {
+ node = container_of(rb_node, struct interval_tree_node, rb);
if (node->last >= addr) {
WARN(1, "Overlap with prev node and page fault addr\n");
return -EFAULT;
@@ -3424,13 +3444,14 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
{
- if (!fence)
- return -EINVAL;
-
- if (dma_fence_is_signaled(&fence->base))
- return 0;
-
- if (fence->svm_bo) {
+ /* Dereferencing fence->svm_bo is safe here because the fence hasn't
+ * signaled yet and we're under the protection of the fence->lock.
+ * After the fence is signaled in svm_range_bo_release, we cannot get
+ * here any more.
+ *
+ * Reference is dropped in svm_range_evict_svm_bo_worker.
+ */
+ if (svm_bo_ref_unless_zero(fence->svm_bo)) {
WRITE_ONCE(fence->svm_bo->evicting, 1);
schedule_work(&fence->svm_bo->eviction_work);
}
@@ -3445,8 +3466,6 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
int r = 0;
svm_bo = container_of(work, struct svm_range_bo, eviction_work);
- if (!svm_bo_ref_unless_zero(svm_bo))
- return; /* svm_bo was freed while eviction was pending */
if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
mm = svm_bo->eviction_fence->mm;
@@ -3509,6 +3528,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
struct list_head update_list;
struct list_head insert_list;
struct list_head remove_list;
+ struct list_head remap_list;
struct svm_range_list *svms;
struct svm_range *prange;
struct svm_range *next;
@@ -3540,7 +3560,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
/* Add new range and split existing ranges as needed */
r = svm_range_add(p, start, size, nattr, attrs, &update_list,
- &insert_list, &remove_list);
+ &insert_list, &remove_list, &remap_list);
if (r) {
mutex_unlock(&svms->lock);
mmap_write_unlock(mm);
@@ -3605,6 +3625,19 @@ out_unlock_range:
ret = r;
}
+ list_for_each_entry(prange, &remap_list, update_list) {
+ pr_debug("Remapping prange 0x%p [0x%lx 0x%lx]\n",
+ prange, prange->start, prange->last);
+ mutex_lock(&prange->migrate_mutex);
+ r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
+ true, true, prange->mapped_to_gpu);
+ if (r)
+ pr_debug("failed %d on remap svm range\n", r);
+ mutex_unlock(&prange->migrate_mutex);
+ if (r)
+ ret = r;
+ }
+
dynamic_svm_range_dump(svms);
mutex_unlock(&svms->lock);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 25f7119057..c528df1d0b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -179,9 +179,9 @@ void svm_range_add_list_work(struct svm_range_list *svms,
struct svm_range *prange, struct mm_struct *mm,
enum svm_work_list_ops op);
void schedule_deferred_list_work(struct svm_range_list *svms);
-void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
+void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr,
unsigned long offset, unsigned long npages);
-void svm_range_free_dma_mappings(struct svm_range *prange, bool unmap_dma);
+void svm_range_dma_unmap(struct svm_range *prange);
int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
uint64_t *svm_priv_data_size);
int kfd_criu_checkpoint_svm(struct kfd_process *p,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 6e75e8fa18..e5f7c92eeb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1452,17 +1452,19 @@ static int kfd_add_peer_prop(struct kfd_topology_device *kdev,
/* CPU->CPU link*/
cpu_dev = kfd_topology_device_by_proximity_domain(iolink1->node_to);
if (cpu_dev) {
- list_for_each_entry(iolink3, &cpu_dev->io_link_props, list)
- if (iolink3->node_to == iolink2->node_to)
- break;
-
- props->weight += iolink3->weight;
- props->min_latency += iolink3->min_latency;
- props->max_latency += iolink3->max_latency;
- props->min_bandwidth = min(props->min_bandwidth,
- iolink3->min_bandwidth);
- props->max_bandwidth = min(props->max_bandwidth,
- iolink3->max_bandwidth);
+ list_for_each_entry(iolink3, &cpu_dev->io_link_props, list) {
+ if (iolink3->node_to != iolink2->node_to)
+ continue;
+
+ props->weight += iolink3->weight;
+ props->min_latency += iolink3->min_latency;
+ props->max_latency += iolink3->max_latency;
+ props->min_bandwidth = min(props->min_bandwidth,
+ iolink3->min_bandwidth);
+ props->max_bandwidth = min(props->max_bandwidth,
+ iolink3->max_bandwidth);
+ break;
+ }
} else {
WARN(1, "CPU node not found");
}
@@ -1536,7 +1538,6 @@ out:
/* Helper function. See kfd_fill_gpu_cache_info for parameter description */
static int fill_in_l1_pcache(struct kfd_cache_properties **props_ext,
struct kfd_gpu_cache_info *pcache_info,
- struct kfd_cu_info *cu_info,
int cu_bitmask,
int cache_type, unsigned int cu_processor_id,
int cu_block)
@@ -1598,18 +1599,22 @@ static int fill_in_l1_pcache(struct kfd_cache_properties **props_ext,
/* Helper function. See kfd_fill_gpu_cache_info for parameter description */
static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
struct kfd_gpu_cache_info *pcache_info,
- struct kfd_cu_info *cu_info,
+ struct amdgpu_cu_info *cu_info,
+ struct amdgpu_gfx_config *gfx_info,
int cache_type, unsigned int cu_processor_id,
struct kfd_node *knode)
{
unsigned int cu_sibling_map_mask;
int first_active_cu;
int i, j, k, xcc, start, end;
+ int num_xcc = NUM_XCC(knode->xcc_mask);
struct kfd_cache_properties *pcache = NULL;
+ enum amdgpu_memory_partition mode;
+ struct amdgpu_device *adev = knode->adev;
start = ffs(knode->xcc_mask) - 1;
- end = start + NUM_XCC(knode->xcc_mask);
- cu_sibling_map_mask = cu_info->cu_bitmap[start][0][0];
+ end = start + num_xcc;
+ cu_sibling_map_mask = cu_info->bitmap[start][0][0];
cu_sibling_map_mask &=
((1 << pcache_info[cache_type].num_cu_shared) - 1);
first_active_cu = ffs(cu_sibling_map_mask);
@@ -1627,7 +1632,18 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
pcache->processor_id_low = cu_processor_id
+ (first_active_cu - 1);
pcache->cache_level = pcache_info[cache_type].cache_level;
- pcache->cache_size = pcache_info[cache_type].cache_size;
+
+ if (KFD_GC_VERSION(knode) == IP_VERSION(9, 4, 3))
+ mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ else
+ mode = UNKNOWN_MEMORY_PARTITION_MODE;
+
+ if (pcache->cache_level == 2)
+ pcache->cache_size = pcache_info[cache_type].cache_size * num_xcc;
+ else if (mode)
+ pcache->cache_size = pcache_info[cache_type].cache_size / mode;
+ else
+ pcache->cache_size = pcache_info[cache_type].cache_size;
if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE)
pcache->cache_type |= HSA_CACHE_TYPE_DATA;
@@ -1645,15 +1661,15 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
k = 0;
for (xcc = start; xcc < end; xcc++) {
- for (i = 0; i < cu_info->num_shader_engines; i++) {
- for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) {
+ for (i = 0; i < gfx_info->max_shader_engines; i++) {
+ for (j = 0; j < gfx_info->max_sh_per_se; j++) {
pcache->sibling_map[k] = (uint8_t)(cu_sibling_map_mask & 0xFF);
pcache->sibling_map[k+1] = (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
pcache->sibling_map[k+2] = (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
pcache->sibling_map[k+3] = (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
k += 4;
- cu_sibling_map_mask = cu_info->cu_bitmap[xcc][i % 4][j + i / 4];
+ cu_sibling_map_mask = cu_info->bitmap[xcc][i % 4][j + i / 4];
cu_sibling_map_mask &= ((1 << pcache_info[cache_type].num_cu_shared) - 1);
}
}
@@ -1678,16 +1694,14 @@ static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct
unsigned int cu_processor_id;
int ret;
unsigned int num_cu_shared;
- struct kfd_cu_info cu_info;
- struct kfd_cu_info *pcu_info;
+ struct amdgpu_cu_info *cu_info = &kdev->adev->gfx.cu_info;
+ struct amdgpu_gfx_config *gfx_info = &kdev->adev->gfx.config;
int gpu_processor_id;
struct kfd_cache_properties *props_ext;
int num_of_entries = 0;
int num_of_cache_types = 0;
struct kfd_gpu_cache_info cache_info[KFD_MAX_CACHE_TYPES];
- amdgpu_amdkfd_get_cu_info(kdev->adev, &cu_info);
- pcu_info = &cu_info;
gpu_processor_id = dev->node_props.simd_id_base;
@@ -1714,12 +1728,12 @@ static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct
cu_processor_id = gpu_processor_id;
if (pcache_info[ct].cache_level == 1) {
for (xcc = start; xcc < end; xcc++) {
- for (i = 0; i < pcu_info->num_shader_engines; i++) {
- for (j = 0; j < pcu_info->num_shader_arrays_per_engine; j++) {
- for (k = 0; k < pcu_info->num_cu_per_sh; k += pcache_info[ct].num_cu_shared) {
+ for (i = 0; i < gfx_info->max_shader_engines; i++) {
+ for (j = 0; j < gfx_info->max_sh_per_se; j++) {
+ for (k = 0; k < gfx_info->max_cu_per_sh; k += pcache_info[ct].num_cu_shared) {
- ret = fill_in_l1_pcache(&props_ext, pcache_info, pcu_info,
- pcu_info->cu_bitmap[xcc][i % 4][j + i / 4], ct,
+ ret = fill_in_l1_pcache(&props_ext, pcache_info,
+ cu_info->bitmap[xcc][i % 4][j + i / 4], ct,
cu_processor_id, k);
if (ret < 0)
@@ -1732,9 +1746,9 @@ static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct
/* Move to next CU block */
num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <=
- pcu_info->num_cu_per_sh) ?
+ gfx_info->max_cu_per_sh) ?
pcache_info[ct].num_cu_shared :
- (pcu_info->num_cu_per_sh - k);
+ (gfx_info->max_cu_per_sh - k);
cu_processor_id += num_cu_shared;
}
}
@@ -1742,7 +1756,7 @@ static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct
}
} else {
ret = fill_in_l2_l3_pcache(&props_ext, pcache_info,
- pcu_info, ct, cu_processor_id, kdev);
+ cu_info, gfx_info, ct, cu_processor_id, kdev);
if (ret < 0)
break;
@@ -1921,10 +1935,11 @@ int kfd_topology_add_device(struct kfd_node *gpu)
{
uint32_t gpu_id;
struct kfd_topology_device *dev;
- struct kfd_cu_info cu_info;
int res = 0;
int i;
const char *asic_name = amdgpu_asic_name[gpu->adev->asic_type];
+ struct amdgpu_gfx_config *gfx_info = &gpu->adev->gfx.config;
+ struct amdgpu_cu_info *cu_info = &gpu->adev->gfx.cu_info;
gpu_id = kfd_generate_gpu_id(gpu);
if (gpu->xcp && !gpu->xcp->ddev) {
@@ -1962,9 +1977,6 @@ int kfd_topology_add_device(struct kfd_node *gpu)
/* Fill-in additional information that is not available in CRAT but
* needed for the topology
*/
-
- amdgpu_amdkfd_get_cu_info(dev->gpu->adev, &cu_info);
-
for (i = 0; i < KFD_TOPOLOGY_PUBLIC_NAME_SIZE-1; i++) {
dev->node_props.name[i] = __tolower(asic_name[i]);
if (asic_name[i] == '\0')
@@ -1973,7 +1985,7 @@ int kfd_topology_add_device(struct kfd_node *gpu)
dev->node_props.name[i] = '\0';
dev->node_props.simd_arrays_per_engine =
- cu_info.num_shader_arrays_per_engine;
+ gfx_info->max_sh_per_se;
dev->node_props.gfx_target_version =
gpu->kfd->device_info.gfx_target_version;
@@ -2054,7 +2066,7 @@ int kfd_topology_add_device(struct kfd_node *gpu)
*/
if (dev->gpu->adev->asic_type == CHIP_CARRIZO) {
dev->node_props.simd_count =
- cu_info.simd_per_cu * cu_info.cu_active_number;
+ cu_info->simd_per_cu * cu_info->number;
dev->node_props.max_waves_per_simd = 10;
}
@@ -2221,7 +2233,7 @@ static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
if (first_cpu_of_numa_node >= nr_cpu_ids)
return -1;
#ifdef CONFIG_X86_64
- return cpu_data(first_cpu_of_numa_node).apicid;
+ return cpu_data(first_cpu_of_numa_node).topo.apicid;
#else
return first_cpu_of_numa_node;
#endif