diff options
Diffstat (limited to 'drivers/gpu/drm/amd')
396 files changed, 10773 insertions, 3767 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 2afecc5509..260e32ef7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -80,7 +80,7 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \ amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \ amdgpu_fw_attestation.o amdgpu_securedisplay.o \ amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \ - amdgpu_ring_mux.o amdgpu_xcp.o + amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 13c62a26aa..79827a6dcd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -109,6 +109,8 @@ #include "amdgpu_mca.h" #include "amdgpu_ras.h" #include "amdgpu_xcp.h" +#include "amdgpu_seq64.h" +#include "amdgpu_reg_state.h" #define MAX_GPU_INSTANCE 64 @@ -251,6 +253,8 @@ extern int amdgpu_seamless; extern int amdgpu_user_partt_mode; extern int amdgpu_agp; +extern int amdgpu_wbrf; + #define AMDGPU_VM_MAX_NUM_CTX 4096 #define AMDGPU_SG_THRESHOLD (256*1024*1024) #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 @@ -469,6 +473,7 @@ struct amdgpu_fpriv { struct amdgpu_vm vm; struct amdgpu_bo_va *prt_va; struct amdgpu_bo_va *csa_va; + struct amdgpu_bo_va *seq64_va; struct mutex bo_list_lock; struct idr bo_list_handles; struct amdgpu_ctx_mgr ctx_mgr; @@ -507,6 +512,31 @@ struct amdgpu_allowed_register_entry { bool grbm_indexed; }; +/** + * enum amd_reset_method - Methods for resetting AMD GPU devices + * + * @AMD_RESET_METHOD_NONE: The device will not be reset. + * @AMD_RESET_LEGACY: Method reserved for SI, CIK and VI ASICs. + * @AMD_RESET_MODE0: Reset the entire ASIC. Not currently available for the + * any device. + * @AMD_RESET_MODE1: Resets all IP blocks on the ASIC (SDMA, GFX, VCN, etc.) + * individually. Suitable only for some discrete GPU, not + * available for all ASICs. + * @AMD_RESET_MODE2: Resets a lesser level of IPs compared to MODE1. Which IPs + * are reset depends on the ASIC. Notably doesn't reset IPs + * shared with the CPU on APUs or the memory controllers (so + * VRAM is not lost). Not available on all ASICs. + * @AMD_RESET_BACO: BACO (Bus Alive, Chip Off) method powers off and on the card + * but without powering off the PCI bus. Suitable only for + * discrete GPUs. + * @AMD_RESET_PCI: Does a full bus reset using core Linux subsystem PCI reset + * and does a secondary bus reset or FLR, depending on what the + * underlying hardware supports. + * + * Methods available for AMD GPU driver for resetting the device. Not all + * methods are suitable for every device. User can override the method using + * module parameter `reset_method`. + */ enum amd_reset_method { AMD_RESET_METHOD_NONE = -1, AMD_RESET_METHOD_LEGACY = 0, @@ -586,6 +616,10 @@ struct amdgpu_asic_funcs { const struct amdgpu_video_codecs **codecs); /* encode "> 32bits" smn addressing */ u64 (*encode_ext_smn_addressing)(int ext_id); + + ssize_t (*get_reg_state)(struct amdgpu_device *adev, + enum amdgpu_reg_state reg_state, void *buf, + size_t max_size); }; /* @@ -988,6 +1022,9 @@ struct amdgpu_device { /* GDS */ struct amdgpu_gds gds; + /* for userq and VM fences */ + struct amdgpu_seq64 seq64; + /* KFD */ struct amdgpu_kfd_dev kfd; @@ -1110,6 +1147,7 @@ struct amdgpu_device { bool debug_vm; bool debug_largebar; bool debug_disable_soft_recovery; + bool debug_use_vram_fw_buf; }; static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 3d126f2967..131983ed43 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -138,6 +138,30 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work) amdgpu_device_gpu_recover(adev, NULL, &reset_context); } +static const struct drm_client_funcs kfd_client_funcs = { + .unregister = drm_client_release, +}; + +int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev) +{ + int ret; + + if (!adev->kfd.init_complete || adev->kfd.client.dev) + return 0; + + ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd", + &kfd_client_funcs); + if (ret) { + dev_err(adev->dev, "Failed to init DRM client: %d\n", + ret); + return ret; + } + + drm_client_register(&adev->kfd.client); + + return 0; +} + void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) { int i; @@ -708,35 +732,6 @@ bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) return false; } -int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev, - uint16_t vmid) -{ - if (adev->family == AMDGPU_FAMILY_AI) { - int i; - - for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) - amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0); - } else { - amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0), 0); - } - - return 0; -} - -int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev, - uint16_t pasid, - enum TLB_FLUSH_TYPE flush_type, - uint32_t inst) -{ - bool all_hub = false; - - if (adev->family == AMDGPU_FAMILY_AI || - adev->family == AMDGPU_FAMILY_RV) - all_hub = true; - - return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub, inst); -} - bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev) { return adev->have_atomics_support; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index dac983da96..27c61c535e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -33,6 +33,7 @@ #include <linux/mmu_notifier.h> #include <linux/memremap.h> #include <kgd_kfd_interface.h> +#include <drm/drm_client.h> #include "amdgpu_sync.h" #include "amdgpu_vm.h" #include "amdgpu_xcp.h" @@ -83,6 +84,7 @@ struct kgd_mem { struct amdgpu_sync sync; + uint32_t gem_handle; bool aql_queue; bool is_imported; }; @@ -105,6 +107,9 @@ struct amdgpu_kfd_dev { /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */ struct dev_pagemap pgmap; + + /* Client for KFD BO GEM handle allocations */ + struct drm_client_dev client; }; enum kgd_engine_type { @@ -162,11 +167,6 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev, uint32_t *ib_cmd, uint32_t ib_len); void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle); bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev); -int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev, - uint16_t vmid); -int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev, - uint16_t pasid, enum TLB_FLUSH_TYPE flush_type, - uint32_t inst); bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid); @@ -182,6 +182,8 @@ int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, struct mm_struct *mm, struct svm_range_bo *svm_bo); + +int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev); #if defined(CONFIG_DEBUG_FS) int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data); #endif @@ -301,7 +303,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv); int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv); -void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv); +int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv); int amdgpu_amdkfd_gpuvm_sync_memory( struct amdgpu_device *adev, struct kgd_mem *mem, bool intr); int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem, @@ -311,14 +313,13 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem); int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_bo *bo); int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info, - struct dma_fence **ef); + struct dma_fence __rcu **ef); int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, struct kfd_vm_fault_info *info); -int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, - struct dma_buf *dmabuf, - uint64_t va, void *drm_priv, - struct kgd_mem **mem, uint64_t *size, - uint64_t *mmap_offset); +int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd, + uint64_t va, void *drm_priv, + struct kgd_mem **mem, uint64_t *size, + uint64_t *mmap_offset); int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem, struct dma_buf **dmabuf); void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index 625db444df..3a3f3ce09f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -200,7 +200,7 @@ int kgd_arcturus_hqd_sdma_dump(struct amdgpu_device *adev, #undef HQD_N_REGS #define HQD_N_REGS (19+6+7+10) - *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; @@ -290,7 +290,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus for (i = 0; i < adev->gfx.num_compute_rings; i++) { struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; - if (!(ring && ring->sched.thread)) + if (!amdgpu_ring_sched_ready(ring)) continue; /* stop secheduler and drain ring. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c index f6598b9e4f..a5c7259cf2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c @@ -141,7 +141,7 @@ static int kgd_gfx_v9_4_3_hqd_sdma_dump(struct amdgpu_device *adev, (*dump)[i++][1] = RREG32(addr); \ } while (0) - *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 6bf448ab3d..ca4a6b8281 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -214,7 +214,7 @@ static int kgd_hqd_dump(struct amdgpu_device *adev, (*dump)[i++][1] = RREG32(addr); \ } while (0) - *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; @@ -301,7 +301,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, #undef HQD_N_REGS #define HQD_N_REGS (19+4) - *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index cd06e4a6d1..0f3e2944ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -238,7 +238,7 @@ static int kgd_hqd_dump(struct amdgpu_device *adev, (*dump)[i++][1] = RREG32(addr); \ } while (0) - *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; @@ -324,7 +324,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, #undef HQD_N_REGS #define HQD_N_REGS (19+4+2+3+7) - *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 00fbc0f44c..5a35a8ca89 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -363,7 +363,7 @@ int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev, (*dump)[i++][1] = RREG32(addr); \ } while (0) - *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; @@ -460,7 +460,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, #undef HQD_N_REGS #define HQD_N_REGS (19+6+7+10) - *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 41fbc4fd0f..daa66eb4f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -25,6 +25,7 @@ #include <linux/pagemap.h> #include <linux/sched/mm.h> #include <linux/sched/task.h> +#include <linux/fdtable.h> #include <drm/ttm/ttm_tt.h> #include <drm/drm_exec.h> @@ -806,13 +807,22 @@ kfd_mem_dmaunmap_attachment(struct kgd_mem *mem, static int kfd_mem_export_dmabuf(struct kgd_mem *mem) { if (!mem->dmabuf) { - struct dma_buf *ret = amdgpu_gem_prime_export( - &mem->bo->tbo.base, + struct amdgpu_device *bo_adev; + struct dma_buf *dmabuf; + int r, fd; + + bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); + r = drm_gem_prime_handle_to_fd(&bo_adev->ddev, bo_adev->kfd.client.file, + mem->gem_handle, mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? - DRM_RDWR : 0); - if (IS_ERR(ret)) - return PTR_ERR(ret); - mem->dmabuf = ret; + DRM_RDWR : 0, &fd); + if (r) + return r; + dmabuf = dma_buf_get(fd); + close_fd(fd); + if (WARN_ON_ONCE(IS_ERR(dmabuf))) + return PTR_ERR(dmabuf); + mem->dmabuf = dmabuf; } return 0; @@ -1137,7 +1147,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem, ctx->n_vms = 1; ctx->sync = &mem->sync; - drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT); + drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); drm_exec_until_all_locked(&ctx->exec) { ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2); drm_exec_retry_on_contention(&ctx->exec); @@ -1176,7 +1186,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem, int ret; ctx->sync = &mem->sync; - drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT); + drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); drm_exec_until_all_locked(&ctx->exec) { ctx->n_vms = 0; list_for_each_entry(entry, &mem->attachments, list) { @@ -1384,7 +1394,6 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, amdgpu_amdkfd_restore_userptr_worker); *process_info = info; - *ef = dma_fence_get(&info->eviction_fence->base); } vm->process_info = *process_info; @@ -1415,6 +1424,8 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, list_add_tail(&vm->vm_list_node, &(vm->process_info->vm_list_head)); vm->process_info->n_vms++; + + *ef = dma_fence_get(&vm->process_info->eviction_fence->base); mutex_unlock(&vm->process_info->lock); return 0; @@ -1426,10 +1437,7 @@ validate_pd_fail: reserve_pd_fail: vm->process_info = NULL; if (info) { - /* Two fence references: one in info and one in *ef */ dma_fence_put(&info->eviction_fence->base); - dma_fence_put(*ef); - *ef = NULL; *process_info = NULL; put_pid(info->pid); create_evict_fence_fail: @@ -1623,7 +1631,8 @@ int amdgpu_amdkfd_criu_resume(void *p) goto out_unlock; } WRITE_ONCE(pinfo->block_mmu_notifications, false); - schedule_delayed_work(&pinfo->restore_userptr_work, 0); + queue_delayed_work(system_freezable_wq, + &pinfo->restore_userptr_work, 0); out_unlock: mutex_unlock(&pinfo->lock); @@ -1779,6 +1788,9 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( pr_debug("Failed to allow vma node access. ret %d\n", ret); goto err_node_allow; } + ret = drm_gem_handle_create(adev->kfd.client.file, gobj, &(*mem)->gem_handle); + if (ret) + goto err_gem_handle_create; bo = gem_to_amdgpu_bo(gobj); if (bo_type == ttm_bo_type_sg) { bo->tbo.sg = sg; @@ -1830,6 +1842,8 @@ allocate_init_user_pages_failed: err_pin_bo: err_validate_bo: remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); + drm_gem_handle_delete(adev->kfd.client.file, (*mem)->gem_handle); +err_gem_handle_create: drm_vma_node_revoke(&gobj->vma_node, drm_priv); err_node_allow: /* Don't unreserve system mem limit twice */ @@ -1837,6 +1851,7 @@ err_node_allow: err_bo_create: amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id); err_reserve_limit: + amdgpu_sync_free(&(*mem)->sync); mutex_destroy(&(*mem)->lock); if (gobj) drm_gem_object_put(gobj); @@ -1942,8 +1957,11 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( /* Free the BO*/ drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv); - if (mem->dmabuf) + drm_gem_handle_delete(adev->kfd.client.file, mem->gem_handle); + if (mem->dmabuf) { dma_buf_put(mem->dmabuf); + mem->dmabuf = NULL; + } mutex_destroy(&mem->lock); /* If this releases the last reference, it will end up calling @@ -2068,21 +2086,35 @@ out: return ret; } -void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv) +int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv) { struct kfd_mem_attachment *entry; struct amdgpu_vm *vm; + int ret; vm = drm_priv_to_vm(drm_priv); mutex_lock(&mem->lock); + ret = amdgpu_bo_reserve(mem->bo, true); + if (ret) + goto out; + list_for_each_entry(entry, &mem->attachments, list) { - if (entry->bo_va->base.vm == vm) - kfd_mem_dmaunmap_attachment(mem, entry); + if (entry->bo_va->base.vm != vm) + continue; + if (entry->bo_va->base.bo->tbo.ttm && + !entry->bo_va->base.bo->tbo.ttm->sg) + continue; + + kfd_mem_dmaunmap_attachment(mem, entry); } + amdgpu_bo_unreserve(mem->bo); +out: mutex_unlock(&mem->lock); + + return ret; } int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( @@ -2295,34 +2327,26 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, return 0; } -int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, - struct dma_buf *dma_buf, - uint64_t va, void *drm_priv, - struct kgd_mem **mem, uint64_t *size, - uint64_t *mmap_offset) +static int import_obj_create(struct amdgpu_device *adev, + struct dma_buf *dma_buf, + struct drm_gem_object *obj, + uint64_t va, void *drm_priv, + struct kgd_mem **mem, uint64_t *size, + uint64_t *mmap_offset) { struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); - struct drm_gem_object *obj; struct amdgpu_bo *bo; int ret; - obj = amdgpu_gem_prime_import(adev_to_drm(adev), dma_buf); - if (IS_ERR(obj)) - return PTR_ERR(obj); - bo = gem_to_amdgpu_bo(obj); if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | - AMDGPU_GEM_DOMAIN_GTT))) { + AMDGPU_GEM_DOMAIN_GTT))) /* Only VRAM and GTT BOs are supported */ - ret = -EINVAL; - goto err_put_obj; - } + return -EINVAL; *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); - if (!*mem) { - ret = -ENOMEM; - goto err_put_obj; - } + if (!*mem) + return -ENOMEM; ret = drm_vma_node_allow(&obj->vma_node, drm_priv); if (ret) @@ -2372,8 +2396,41 @@ err_remove_mem: drm_vma_node_revoke(&obj->vma_node, drm_priv); err_free_mem: kfree(*mem); + return ret; +} + +int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd, + uint64_t va, void *drm_priv, + struct kgd_mem **mem, uint64_t *size, + uint64_t *mmap_offset) +{ + struct drm_gem_object *obj; + uint32_t handle; + int ret; + + ret = drm_gem_prime_fd_to_handle(&adev->ddev, adev->kfd.client.file, fd, + &handle); + if (ret) + return ret; + obj = drm_gem_object_lookup(adev->kfd.client.file, handle); + if (!obj) { + ret = -EINVAL; + goto err_release_handle; + } + + ret = import_obj_create(adev, obj->dma_buf, obj, va, drm_priv, mem, size, + mmap_offset); + if (ret) + goto err_put_obj; + + (*mem)->gem_handle = handle; + + return 0; + err_put_obj: drm_gem_object_put(obj); +err_release_handle: + drm_gem_handle_delete(adev->kfd.client.file, handle); return ret; } @@ -2426,7 +2483,8 @@ int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni, KFD_QUEUE_EVICTION_TRIGGER_USERPTR); if (r) pr_err("Failed to quiesce KFD\n"); - schedule_delayed_work(&process_info->restore_userptr_work, + queue_delayed_work(system_freezable_wq, + &process_info->restore_userptr_work, msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); } mutex_unlock(&process_info->notifier_lock); @@ -2552,7 +2610,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) amdgpu_sync_create(&sync); - drm_exec_init(&exec, 0); + drm_exec_init(&exec, 0, 0); /* Reserve all BOs and page tables for validation */ drm_exec_until_all_locked(&exec) { /* Reserve all the page directories */ @@ -2749,7 +2807,8 @@ unlock_out: /* If validation failed, reschedule another attempt */ if (evicted_bos) { - schedule_delayed_work(&process_info->restore_userptr_work, + queue_delayed_work(system_freezable_wq, + &process_info->restore_userptr_work, msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); kfd_smi_event_queue_restore_rescheduled(mm); @@ -2758,6 +2817,23 @@ unlock_out: put_task_struct(usertask); } +static void replace_eviction_fence(struct dma_fence __rcu **ef, + struct dma_fence *new_ef) +{ + struct dma_fence *old_ef = rcu_replace_pointer(*ef, new_ef, true + /* protected by process_info->lock */); + + /* If we're replacing an unsignaled eviction fence, that fence will + * never be signaled, and if anyone is still waiting on that fence, + * they will hang forever. This should never happen. We should only + * replace the fence in restore_work that only gets scheduled after + * eviction work signaled the fence. + */ + WARN_ONCE(!dma_fence_is_signaled(old_ef), + "Replacing unsignaled eviction fence"); + dma_fence_put(old_ef); +} + /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given * KFD process identified by process_info * @@ -2776,12 +2852,11 @@ unlock_out: * 7. Add fence to all PD and PT BOs. * 8. Unreserve all BOs */ -int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) +int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu **ef) { struct amdkfd_process_info *process_info = info; struct amdgpu_vm *peer_vm; struct kgd_mem *mem; - struct amdgpu_amdkfd_fence *new_fence; struct list_head duplicate_save; struct amdgpu_sync sync_obj; unsigned long failed_size = 0; @@ -2793,7 +2868,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) mutex_lock(&process_info->lock); - drm_exec_init(&exec, 0); + drm_exec_init(&exec, 0, 0); drm_exec_until_all_locked(&exec) { list_for_each_entry(peer_vm, &process_info->vm_list_head, vm_list_node) { @@ -2825,12 +2900,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) if (ret) goto validate_map_fail; - ret = process_sync_pds_resv(process_info, &sync_obj); - if (ret) { - pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n"); - goto validate_map_fail; - } - /* Validate BOs and map them to GPUVM (update VM page tables). */ list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) { @@ -2881,6 +2950,19 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) if (failed_size) pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size); + /* Update mappings not managed by KFD */ + list_for_each_entry(peer_vm, &process_info->vm_list_head, + vm_list_node) { + struct amdgpu_device *adev = amdgpu_ttm_adev( + peer_vm->root.bo->tbo.bdev); + + ret = amdgpu_vm_handle_moved(adev, peer_vm, &exec.ticket); + if (ret) { + pr_debug("Memory eviction: handle moved failed. Try again\n"); + goto validate_map_fail; + } + } + /* Update page directories */ ret = process_update_pds(process_info, &sync_obj); if (ret) { @@ -2888,25 +2970,47 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) goto validate_map_fail; } + /* Sync with fences on all the page tables. They implicitly depend on any + * move fences from amdgpu_vm_handle_moved above. + */ + ret = process_sync_pds_resv(process_info, &sync_obj); + if (ret) { + pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n"); + goto validate_map_fail; + } + /* Wait for validate and PT updates to finish */ amdgpu_sync_wait(&sync_obj, false); - /* Release old eviction fence and create new one, because fence only - * goes from unsignaled to signaled, fence cannot be reused. - * Use context and mm from the old fence. + /* The old eviction fence may be unsignaled if restore happens + * after a GPU reset or suspend/resume. Keep the old fence in that + * case. Otherwise release the old eviction fence and create new + * one, because fence only goes from unsignaled to signaled once + * and cannot be reused. Use context and mm from the old fence. + * + * If an old eviction fence signals after this check, that's OK. + * Anyone signaling an eviction fence must stop the queues first + * and schedule another restore worker. */ - new_fence = amdgpu_amdkfd_fence_create( + if (dma_fence_is_signaled(&process_info->eviction_fence->base)) { + struct amdgpu_amdkfd_fence *new_fence = + amdgpu_amdkfd_fence_create( process_info->eviction_fence->base.context, process_info->eviction_fence->mm, NULL); - if (!new_fence) { - pr_err("Failed to create eviction fence\n"); - ret = -ENOMEM; - goto validate_map_fail; + + if (!new_fence) { + pr_err("Failed to create eviction fence\n"); + ret = -ENOMEM; + goto validate_map_fail; + } + dma_fence_put(&process_info->eviction_fence->base); + process_info->eviction_fence = new_fence; + replace_eviction_fence(ef, dma_fence_get(&new_fence->base)); + } else { + WARN_ONCE(*ef != &process_info->eviction_fence->base, + "KFD eviction fence doesn't match KGD process_info"); } - dma_fence_put(&process_info->eviction_fence->base); - process_info->eviction_fence = new_fence; - *ef = dma_fence_get(&new_fence->base); /* Attach new eviction fence to all BOs except pinned ones */ list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 7473a42f7d..9caba10315 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -103,7 +103,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector) struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_connector_atom_dig *dig_connector; int bpc = 8; - unsigned mode_clock, max_tmds_clock; + unsigned int mode_clock, max_tmds_clock; switch (connector->connector_type) { case DRM_MODE_CONNECTOR_DVII: @@ -255,6 +255,7 @@ struct edid *amdgpu_connector_edid(struct drm_connector *connector) return amdgpu_connector->edid; } else if (edid_blob) { struct edid *edid = kmemdup(edid_blob->data, edid_blob->length, GFP_KERNEL); + if (edid) amdgpu_connector->edid = edid; } @@ -581,6 +582,7 @@ static int amdgpu_connector_set_property(struct drm_connector *connector, amdgpu_encoder = to_amdgpu_encoder(connector->encoder); } else { const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; + amdgpu_encoder = to_amdgpu_encoder(connector_funcs->best_encoder(connector)); } @@ -797,6 +799,7 @@ static int amdgpu_connector_set_lcd_property(struct drm_connector *connector, amdgpu_encoder = to_amdgpu_encoder(connector->encoder); else { const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; + amdgpu_encoder = to_amdgpu_encoder(connector_funcs->best_encoder(connector)); } @@ -979,6 +982,41 @@ amdgpu_connector_check_hpd_status_unchanged(struct drm_connector *connector) return false; } +static void amdgpu_connector_shared_ddc(enum drm_connector_status *status, + struct drm_connector *connector, + struct amdgpu_connector *amdgpu_connector) +{ + struct drm_connector *list_connector; + struct drm_connector_list_iter iter; + struct amdgpu_connector *list_amdgpu_connector; + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + + if (amdgpu_connector->shared_ddc && *status == connector_status_connected) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(list_connector, + &iter) { + if (connector == list_connector) + continue; + list_amdgpu_connector = to_amdgpu_connector(list_connector); + if (list_amdgpu_connector->shared_ddc && + list_amdgpu_connector->ddc_bus->rec.i2c_id == + amdgpu_connector->ddc_bus->rec.i2c_id) { + /* cases where both connectors are digital */ + if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) { + /* hpd is our only option in this case */ + if (!amdgpu_display_hpd_sense(adev, + amdgpu_connector->hpd.hpd)) { + amdgpu_connector_free_edid(connector); + *status = connector_status_disconnected; + } + } + } + } + drm_connector_list_iter_end(&iter); + } +} + /* * DVI is complicated * Do a DDC probe, if DDC probe passes, get the full EDID so @@ -1065,32 +1103,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) * DDC line. The latter is more complex because with DVI<->HDMI adapters * you don't really know what's connected to which port as both are digital. */ - if (amdgpu_connector->shared_ddc && (ret == connector_status_connected)) { - struct drm_connector *list_connector; - struct drm_connector_list_iter iter; - struct amdgpu_connector *list_amdgpu_connector; - - drm_connector_list_iter_begin(dev, &iter); - drm_for_each_connector_iter(list_connector, - &iter) { - if (connector == list_connector) - continue; - list_amdgpu_connector = to_amdgpu_connector(list_connector); - if (list_amdgpu_connector->shared_ddc && - (list_amdgpu_connector->ddc_bus->rec.i2c_id == - amdgpu_connector->ddc_bus->rec.i2c_id)) { - /* cases where both connectors are digital */ - if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) { - /* hpd is our only option in this case */ - if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) { - amdgpu_connector_free_edid(connector); - ret = connector_status_disconnected; - } - } - } - } - drm_connector_list_iter_end(&iter); - } + amdgpu_connector_shared_ddc(&ret, connector, amdgpu_connector); } } @@ -1192,6 +1205,7 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector) static void amdgpu_connector_dvi_force(struct drm_connector *connector) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + if (connector->force == DRM_FORCE_ON) amdgpu_connector->use_digital = false; if (connector->force == DRM_FORCE_ON_DIGITAL) @@ -1426,6 +1440,7 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) ret = connector_status_connected; else if (amdgpu_connector->dac_load_detect) { /* try load detection */ const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; + ret = encoder_funcs->detect(encoder, connector); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index e50be65000..116d3756c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -66,7 +66,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, amdgpu_sync_create(&p->sync); drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT | - DRM_EXEC_IGNORE_DUPLICATES); + DRM_EXEC_IGNORE_DUPLICATES, 0); return 0; } @@ -819,7 +819,7 @@ retry: p->bytes_moved += ctx.bytes_moved; if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && - amdgpu_bo_in_cpu_visible_vram(bo)) + amdgpu_res_cpu_visible(adev, bo->tbo.resource)) p->bytes_moved_vis += ctx.bytes_moved; if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { @@ -870,9 +870,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, struct amdgpu_bo *bo = e->bo; int i; - e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages, - sizeof(struct page *), - GFP_KERNEL | __GFP_ZERO); + e->user_pages = kvcalloc(bo->tbo.ttm->num_pages, + sizeof(struct page *), + GFP_KERNEL); if (!e->user_pages) { DRM_ERROR("kvmalloc_array failure\n"); r = -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c index 7200110197..796fa6f142 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c @@ -70,7 +70,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct drm_exec exec; int r; - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); drm_exec_until_all_locked(&exec) { r = amdgpu_vm_lock_pd(vm, &exec, 0); if (likely(!r)) @@ -110,7 +110,7 @@ int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct drm_exec exec; int r; - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); drm_exec_until_all_locked(&exec) { r = amdgpu_vm_lock_pd(vm, &exec, 0); if (likely(!r)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index e2ae9ba147..5cb33ac99f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -73,10 +73,10 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio) return DRM_SCHED_PRIORITY_NORMAL; case AMDGPU_CTX_PRIORITY_VERY_LOW: - return DRM_SCHED_PRIORITY_MIN; + return DRM_SCHED_PRIORITY_LOW; case AMDGPU_CTX_PRIORITY_LOW: - return DRM_SCHED_PRIORITY_MIN; + return DRM_SCHED_PRIORITY_LOW; case AMDGPU_CTX_PRIORITY_NORMAL: return DRM_SCHED_PRIORITY_NORMAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 8d4a3ff65c..1afbb2e932 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -540,7 +540,11 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, while (size) { uint32_t value; - value = RREG32_PCIE(*pos); + if (upper_32_bits(*pos)) + value = RREG32_PCIE_EXT(*pos); + else + value = RREG32_PCIE(*pos); + r = put_user(value, (uint32_t *)buf); if (r) goto out; @@ -600,7 +604,10 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user if (r) goto out; - WREG32_PCIE(*pos, value); + if (upper_32_bits(*pos)) + WREG32_PCIE_EXT(*pos, value); + else + WREG32_PCIE(*pos, value); result += 4; buf += 4; @@ -1671,9 +1678,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!amdgpu_ring_sched_ready(ring)) continue; - kthread_park(ring->sched.thread); + drm_sched_wqueue_stop(&ring->sched); } seq_puts(m, "run ib test:\n"); @@ -1687,9 +1694,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!amdgpu_ring_sched_ready(ring)) continue; - kthread_unpark(ring->sched.thread); + drm_sched_wqueue_start(&ring->sched); } up_write(&adev->reset_domain->sem); @@ -1909,7 +1916,8 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) ring = adev->rings[val]; - if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread) + if (!amdgpu_ring_sched_ready(ring) || + !ring->funcs->preempt_ib) return -EINVAL; /* the last preemption failed */ @@ -1927,7 +1935,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) goto pro_end; /* stop the scheduler */ - kthread_park(ring->sched.thread); + drm_sched_wqueue_stop(&ring->sched); /* preempt the IB */ r = amdgpu_ring_preempt_ib(ring); @@ -1961,7 +1969,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) failure: /* restart the scheduler */ - kthread_unpark(ring->sched.thread); + drm_sched_wqueue_start(&ring->sched); up_read(&adev->reset_domain->sem); @@ -2146,6 +2154,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) amdgpu_debugfs_firmware_init(adev); amdgpu_ta_if_debugfs_init(adev); + amdgpu_debugfs_mes_event_log_init(adev); + #if defined(CONFIG_DRM_AMD_DC) if (adev->dc_enabled) dtn_debugfs_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h index 371a6f0deb..0425432d86 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h @@ -32,3 +32,5 @@ void amdgpu_debugfs_fini(struct amdgpu_device *adev); void amdgpu_debugfs_fence_init(struct amdgpu_device *adev); void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); void amdgpu_debugfs_gem_init(struct amdgpu_device *adev); +void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev); + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 7f48c7ec41..d0afb9ba37 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -162,6 +162,65 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev, static DEVICE_ATTR(pcie_replay_count, 0444, amdgpu_device_get_pcie_replay_count, NULL); +static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t ppos, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + ssize_t bytes_read; + + switch (ppos) { + case AMDGPU_SYS_REG_STATE_XGMI: + bytes_read = amdgpu_asic_get_reg_state( + adev, AMDGPU_REG_STATE_TYPE_XGMI, buf, count); + break; + case AMDGPU_SYS_REG_STATE_WAFL: + bytes_read = amdgpu_asic_get_reg_state( + adev, AMDGPU_REG_STATE_TYPE_WAFL, buf, count); + break; + case AMDGPU_SYS_REG_STATE_PCIE: + bytes_read = amdgpu_asic_get_reg_state( + adev, AMDGPU_REG_STATE_TYPE_PCIE, buf, count); + break; + case AMDGPU_SYS_REG_STATE_USR: + bytes_read = amdgpu_asic_get_reg_state( + adev, AMDGPU_REG_STATE_TYPE_USR, buf, count); + break; + case AMDGPU_SYS_REG_STATE_USR_1: + bytes_read = amdgpu_asic_get_reg_state( + adev, AMDGPU_REG_STATE_TYPE_USR_1, buf, count); + break; + default: + return -EINVAL; + } + + return bytes_read; +} + +BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL, + AMDGPU_SYS_REG_STATE_END); + +int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev) +{ + int ret; + + if (!amdgpu_asic_get_reg_state_supported(adev)) + return 0; + + ret = sysfs_create_bin_file(&adev->dev->kobj, &bin_attr_reg_state); + + return ret; +} + +void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev) +{ + if (!amdgpu_asic_get_reg_state_supported(adev)) + return; + sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state); +} + /** * DOC: board_info * @@ -1541,7 +1600,7 @@ bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev) if (adev->mman.keep_stolen_vga_memory) return false; - return adev->ip_versions[DCE_HWIP][0] >= IP_VERSION(3, 0, 0); + return amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0); } /* @@ -1552,11 +1611,15 @@ bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev) * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/ * https://gitlab.freedesktop.org/drm/amd/-/issues/2663 */ -static bool amdgpu_device_pcie_dynamic_switching_supported(void) +static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device *adev) { #if IS_ENABLED(CONFIG_X86) struct cpuinfo_x86 *c = &cpu_data(0); + /* eGPU change speeds based on USB4 fabric conditions */ + if (dev_is_removable(adev->dev)) + return true; + if (c->x86_vendor == X86_VENDOR_INTEL) return false; #endif @@ -2389,7 +2452,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK; - if (!amdgpu_device_pcie_dynamic_switching_supported()) + if (!amdgpu_device_pcie_dynamic_switching_supported(adev)) adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK; total = true; @@ -2567,7 +2630,7 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) break; } - r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, + r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT, ring->num_hw_submission, 0, timeout, adev->reset_domain->wq, @@ -2670,6 +2733,12 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) goto init_failed; } } + + r = amdgpu_seq64_init(adev); + if (r) { + DRM_ERROR("allocate seq64 failed %d\n", r); + goto init_failed; + } } } @@ -3132,6 +3201,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) amdgpu_device_wb_fini(adev); amdgpu_device_mem_scratch_fini(adev); amdgpu_ib_pool_fini(adev); + amdgpu_seq64_fini(adev); } r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); @@ -4202,6 +4272,7 @@ fence_driver_init: "Could not create amdgpu board attributes\n"); amdgpu_fru_sysfs_init(adev); + amdgpu_reg_state_sysfs_init(adev); if (IS_ENABLED(CONFIG_PERF_EVENTS)) r = amdgpu_pmu_init(adev); @@ -4324,6 +4395,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes); amdgpu_fru_sysfs_fini(adev); + amdgpu_reg_state_sysfs_fini(adev); + /* disable ras feature must before hw fini */ amdgpu_ras_pre_fini(adev); @@ -4451,6 +4524,8 @@ int amdgpu_device_prepare(struct drm_device *dev) if (r) goto unprepare; + flush_delayed_work(&adev->gfx.gfx_off_delay_work); + for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.valid) continue; @@ -4954,7 +5029,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!amdgpu_ring_sched_ready(ring)) continue; spin_lock(&ring->sched.job_list_lock); @@ -5093,7 +5168,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!amdgpu_ring_sched_ready(ring)) continue; /* Clear job fence from fence drv to avoid force_completion @@ -5560,7 +5635,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = tmp_adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!amdgpu_ring_sched_ready(ring)) continue; drm_sched_stop(&ring->sched, job ? &job->base : NULL); @@ -5629,7 +5704,7 @@ skip_hw_reset: for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = tmp_adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!amdgpu_ring_sched_ready(ring)) continue; drm_sched_start(&ring->sched, true); @@ -5691,6 +5766,39 @@ skip_sched_resume: } /** + * amdgpu_device_partner_bandwidth - find the bandwidth of appropriate partner + * + * @adev: amdgpu_device pointer + * @speed: pointer to the speed of the link + * @width: pointer to the width of the link + * + * Evaluate the hierarchy to find the speed and bandwidth capabilities of the + * first physical partner to an AMD dGPU. + * This will exclude any virtual switches and links. + */ +static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev, + enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + struct pci_dev *parent = adev->pdev; + + if (!speed || !width) + return; + + *speed = PCI_SPEED_UNKNOWN; + *width = PCIE_LNK_WIDTH_UNKNOWN; + + while ((parent = pci_upstream_bridge(parent))) { + /* skip upstream/downstream switches internal to dGPU*/ + if (parent->vendor == PCI_VENDOR_ID_ATI) + continue; + *speed = pcie_get_speed_cap(parent); + *width = pcie_get_width_cap(parent); + break; + } +} + +/** * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot * * @adev: amdgpu_device pointer @@ -5723,8 +5831,8 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask) return; - pcie_bandwidth_available(adev->pdev, NULL, - &platform_speed_cap, &platform_link_width); + amdgpu_device_partner_bandwidth(adev, &platform_speed_cap, + &platform_link_width); if (adev->pm.pcie_gen_mask == 0) { /* asic caps */ @@ -5951,7 +6059,7 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!amdgpu_ring_sched_ready(ring)) continue; drm_sched_stop(&ring->sched, NULL); @@ -6001,6 +6109,20 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev) struct amdgpu_reset_context reset_context; u32 memsize; struct list_head device_list; + struct amdgpu_hive_info *hive; + int hive_ras_recovery = 0; + struct amdgpu_ras *ras; + + /* PCI error slot reset should be skipped During RAS recovery */ + hive = amdgpu_get_xgmi_hive(adev); + if (hive) { + hive_ras_recovery = atomic_read(&hive->ras_recovery); + amdgpu_put_xgmi_hive(hive); + } + ras = amdgpu_ras_get_context(adev); + if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) && + ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery)) + return PCI_ERS_RESULT_RECOVERED; DRM_INFO("PCI error: slot reset callback!!\n"); @@ -6079,7 +6201,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev) for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!amdgpu_ring_sched_ready(ring)) continue; drm_sched_start(&ring->sched, true); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index e7e87a3b26..decbbe3d4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -42,6 +42,7 @@ #include <linux/dma-fence-array.h> #include <linux/pci-p2pdma.h> #include <linux/pm_runtime.h> +#include "amdgpu_trace.h" /** * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation @@ -63,6 +64,7 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, attach->peer2peer = false; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + trace_amdgpu_runpm_reference_dumps(1, __func__); if (r < 0) goto out; @@ -70,6 +72,7 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, out: pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + trace_amdgpu_runpm_reference_dumps(0, __func__); return r; } @@ -90,6 +93,7 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf, pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + trace_amdgpu_runpm_reference_dumps(0, __func__); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 855ab59632..64b1bb2404 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -128,6 +128,7 @@ enum AMDGPU_DEBUG_MASK { AMDGPU_DEBUG_VM = BIT(0), AMDGPU_DEBUG_LARGEBAR = BIT(1), AMDGPU_DEBUG_DISABLE_GPU_SOFT_RECOVERY = BIT(2), + AMDGPU_DEBUG_USE_VRAM_FW_BUF = BIT(3), }; unsigned int amdgpu_vram_limit = UINT_MAX; @@ -209,6 +210,7 @@ int amdgpu_umsch_mm; int amdgpu_seamless = -1; /* auto */ uint amdgpu_debug_mask; int amdgpu_agp = -1; /* auto */ +int amdgpu_wbrf = -1; int amdgpu_damage_clips = -1; /* auto */ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work); @@ -985,6 +987,22 @@ module_param_named(debug_mask, amdgpu_debug_mask, uint, 0444); MODULE_PARM_DESC(agp, "AGP (-1 = auto (default), 0 = disable, 1 = enable)"); module_param_named(agp, amdgpu_agp, int, 0444); +/** + * DOC: wbrf (int) + * Enable Wifi RFI interference mitigation feature. + * Due to electrical and mechanical constraints there may be likely interference of + * relatively high-powered harmonics of the (G-)DDR memory clocks with local radio + * module frequency bands used by Wifi 6/6e/7. To mitigate the possible RFI interference, + * with this feature enabled, PMFW will use either “shadowed P-State” or “P-State” based + * on active list of frequencies in-use (to be avoided) as part of initial setting or + * P-state transition. However, there may be potential performance impact with this + * feature enabled. + * (0 = disabled, 1 = enabled, -1 = auto (default setting, will be enabled if supported)) + */ +MODULE_PARM_DESC(wbrf, + "Enable Wifi RFI interference mitigation (0 = disabled, 1 = enabled, -1 = auto(default)"); +module_param_named(wbrf, amdgpu_wbrf, int, 0444); + /* These devices are not supported by amdgpu. * They are supported by the mach64, r128, radeon drivers */ @@ -2113,6 +2131,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev) pr_info("debug: soft reset for GPU recovery disabled\n"); adev->debug_disable_soft_recovery = true; } + + if (amdgpu_debug_mask & AMDGPU_DEBUG_USE_VRAM_FW_BUF) { + pr_info("debug: place fw in vram for frontdoor loading\n"); + adev->debug_use_vram_fw_buf = true; + } } static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags) @@ -2224,6 +2247,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, ddev); + amdgpu_init_debug_options(adev); + ret = amdgpu_driver_load_kms(adev, flags); if (ret) goto err_pci; @@ -2243,6 +2268,10 @@ retry_init: if (ret) goto err_pci; + ret = amdgpu_amdkfd_drm_client_create(adev); + if (ret) + goto err_pci; + /* * 1. don't init fbdev on hw without DCE * 2. don't init fbdev if there are no connectors @@ -2304,8 +2333,6 @@ retry_init: amdgpu_get_secondary_funcs(adev); } - amdgpu_init_debug_options(adev); - return 0; err_pci: @@ -2424,8 +2451,11 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work) } for (i = 0; i < mgpu_info.num_dgpu; i++) { adev = mgpu_info.gpu_ins[i].adev; - if (!adev->kfd.init_complete) + if (!adev->kfd.init_complete) { + kgd2kfd_init_zone_device(adev); amdgpu_amdkfd_device_init(adev); + amdgpu_amdkfd_drm_client_create(adev); + } amdgpu_ttm_set_buffer_funcs_status(adev, true); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c index 5706b282a0..c7df7fa345 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c @@ -97,6 +97,10 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file) stats.requested_visible_vram/1024UL); drm_printf(p, "amd-requested-gtt:\t%llu KiB\n", stats.requested_gtt/1024UL); + drm_printf(p, "drm-shared-vram:\t%llu KiB\n", stats.vram_shared/1024UL); + drm_printf(p, "drm-shared-gtt:\t%llu KiB\n", stats.gtt_shared/1024UL); + drm_printf(p, "drm-shared-cpu:\t%llu KiB\n", stats.cpu_shared/1024UL); + for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) { if (!usage[hw_ip]) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index dc23021274..70bff8cecf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -183,6 +183,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, seq, flags | AMDGPU_FENCE_FLAG_INT); pm_runtime_get_noresume(adev_to_drm(adev)->dev); + trace_amdgpu_runpm_reference_dumps(1, __func__); ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; if (unlikely(rcu_dereference_protected(*ptr, 1))) { struct dma_fence *old; @@ -310,6 +311,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring) dma_fence_put(fence); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + trace_amdgpu_runpm_reference_dumps(0, __func__); } while (last_seq != seq); return true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 84beeaa4d2..49a5f1c73b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -203,7 +203,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj, struct drm_exec exec; long r; - drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES); + drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); drm_exec_until_all_locked(&exec) { r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1); drm_exec_retry_on_contention(&exec); @@ -739,7 +739,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, } drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | - DRM_EXEC_IGNORE_DUPLICATES); + DRM_EXEC_IGNORE_DUPLICATES, 0); drm_exec_until_all_locked(&exec) { if (gobj) { r = drm_exec_lock_obj(&exec, gobj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c index 57516a8c5d..431ec72655 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c @@ -202,8 +202,8 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, pr_debug("hmm range: start = 0x%lx, end = 0x%lx", hmm_range->start, hmm_range->end); - /* Assuming 128MB takes maximum 1 second to fault page address */ - timeout = max((hmm_range->end - hmm_range->start) >> 27, 1UL); + /* Assuming 64MB takes maximum 1 second to fault page address */ + timeout = max((hmm_range->end - hmm_range->start) >> 26, 1UL); timeout *= HMM_RANGE_DEFAULT_TIMEOUT; timeout = jiffies + msecs_to_jiffies(timeout); @@ -211,6 +211,7 @@ retry: hmm_range->notifier_seq = mmu_interval_read_begin(notifier); r = hmm_range_fault(hmm_range); if (unlikely(r)) { + schedule(); /* * FIXME: This timeout should encompass the retry from * mmu_interval_read_retry() as well. @@ -224,7 +225,6 @@ retry: break; hmm_range->hmm_pfns += MAX_WALK_BYTE >> PAGE_SHIFT; hmm_range->start = hmm_range->end; - schedule(); } while (hmm_range->end < end); hmm_range->start = start; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c index 82608df433..d79cb13e1a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c @@ -175,7 +175,6 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev, i2c->rec = *rec; i2c->adapter.owner = THIS_MODULE; - i2c->adapter.class = I2C_CLASS_DDC; i2c->adapter.dev.parent = dev->dev; i2c->dev = dev; i2c_set_adapdata(&i2c->adapter, i2c); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 1f35719853..71a5cf37b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -115,7 +115,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (!entity) return 0; - return drm_sched_job_init(&(*job)->base, entity, owner); + return drm_sched_job_init(&(*job)->base, entity, 1, owner); } int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, @@ -325,7 +325,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) int i; /* Signal all jobs not yet scheduled */ - for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { + for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { struct drm_sched_rq *rq = sched->sched_rq[i]; spin_lock(&rq->lock); list_for_each_entry(s_entity, &rq->entities, list) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 598867919c..bf4f48fe43 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -1433,6 +1433,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, fpriv->csa_va = NULL; } + amdgpu_seq64_unmap(adev, fpriv); + pasid = fpriv->vm.pasid; pd = amdgpu_bo_ref(fpriv->vm.root.bo); if (!WARN_ON(amdgpu_bo_reserve(pd, true))) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c index 061d88f448..59fafb8392 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -218,6 +218,7 @@ static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, st int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data) { struct amdgpu_smuio_mcm_config_info mcm_info; + struct ras_err_addr err_addr = {0}; struct mca_bank_set mca_set; struct mca_bank_node *node; struct mca_bank_entry *entry; @@ -246,10 +247,18 @@ int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_blo mcm_info.socket_id = entry->info.socket_id; mcm_info.die_id = entry->info.aid; + if (blk == AMDGPU_RAS_BLOCK__UMC) { + err_addr.err_status = entry->regs[MCA_REG_IDX_STATUS]; + err_addr.err_ipid = entry->regs[MCA_REG_IDX_IPID]; + err_addr.err_addr = entry->regs[MCA_REG_IDX_ADDR]; + } + if (type == AMDGPU_MCA_ERROR_TYPE_UE) - amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, (uint64_t)count); + amdgpu_ras_error_statistic_ue_count(err_data, + &mcm_info, &err_addr, (uint64_t)count); else - amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, (uint64_t)count); + amdgpu_ras_error_statistic_ce_count(err_data, + &mcm_info, &err_addr, (uint64_t)count); } out_mca_release: @@ -377,7 +386,7 @@ static int amdgpu_mca_smu_debug_mode_set(void *data, u64 val) struct amdgpu_device *adev = (struct amdgpu_device *)data; int ret; - ret = amdgpu_mca_smu_set_debug_mode(adev, val ? true : false); + ret = amdgpu_ras_set_mca_debug_mode(adev, val ? true : false); if (ret) return ret; @@ -485,7 +494,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(mca_debug_mode_fops, NULL, amdgpu_mca_smu_debug_mode_se void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root) { #if defined(CONFIG_DEBUG_FS) - if (!root || adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 6)) + if (!root || amdgpu_ip_version(adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 6)) return; debugfs_create_file("mca_debug_mode", 0200, root, adev, &mca_debug_mode_fops); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h index e51e8918e6..b399f1b628 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h @@ -46,6 +46,8 @@ #define MCA_REG__STATUS__ERRORCODEEXT(x) MCA_REG_FIELD(x, 21, 16) #define MCA_REG__STATUS__ERRORCODE(x) MCA_REG_FIELD(x, 15, 0) +#define MCA_REG__MISC0__ERRCNT(x) MCA_REG_FIELD(x, 43, 32) + #define MCA_REG__SYND__ERRORINFORMATION(x) MCA_REG_FIELD(x, 17, 0) enum amdgpu_mca_ip { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 30c0108366..da48b6da01 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -98,6 +98,26 @@ static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev) return 0; } +static int amdgpu_mes_event_log_init(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &adev->mes.event_log_gpu_obj, + &adev->mes.event_log_gpu_addr, + &adev->mes.event_log_cpu_addr); + if (r) { + dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r); + return r; + } + + memset(adev->mes.event_log_cpu_addr, 0, PAGE_SIZE); + + return 0; + +} + static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev) { bitmap_free(adev->mes.doorbell_bitmap); @@ -182,8 +202,14 @@ int amdgpu_mes_init(struct amdgpu_device *adev) if (r) goto error; + r = amdgpu_mes_event_log_init(adev); + if (r) + goto error_doorbell; + return 0; +error_doorbell: + amdgpu_mes_doorbell_free(adev); error: amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs); @@ -199,6 +225,10 @@ error_ids: void amdgpu_mes_fini(struct amdgpu_device *adev) { + amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj, + &adev->mes.event_log_gpu_addr, + &adev->mes.event_log_cpu_addr); + amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs); amdgpu_device_wb_free(adev, adev->mes.read_val_offs); @@ -1153,7 +1183,7 @@ int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev, amdgpu_sync_create(&sync); - drm_exec_init(&exec, 0); + drm_exec_init(&exec, 0, 0); drm_exec_until_all_locked(&exec) { r = drm_exec_lock_obj(&exec, &ctx_data->meta_data_obj->tbo.base); @@ -1224,7 +1254,7 @@ int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev, struct drm_exec exec; long r; - drm_exec_init(&exec, 0); + drm_exec_init(&exec, 0, 0); drm_exec_until_all_locked(&exec) { r = drm_exec_lock_obj(&exec, &ctx_data->meta_data_obj->tbo.base); @@ -1510,3 +1540,34 @@ out: amdgpu_ucode_release(&adev->mes.fw[pipe]); return r; } + +#if defined(CONFIG_DEBUG_FS) + +static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused) +{ + struct amdgpu_device *adev = m->private; + uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr); + + seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4, + mem, PAGE_SIZE, false); + + return 0; +} + + +DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log); + +#endif + +void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev) +{ + +#if defined(CONFIG_DEBUG_FS) + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; + + debugfs_create_file("amdgpu_mes_event_log", 0444, root, + adev, &amdgpu_debugfs_mes_event_log_fops); + +#endif +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index c2c88b7723..7d4f93fea9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -133,6 +133,11 @@ struct amdgpu_mes { uint32_t num_mes_dbs; unsigned long *doorbell_bitmap; + /* MES event log buffer */ + struct amdgpu_bo *event_log_gpu_obj; + uint64_t event_log_gpu_addr; + void *event_log_cpu_addr; + /* ip specific functions */ const struct amdgpu_mes_funcs *funcs; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 32fe05c810..2e4911050c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -32,7 +32,6 @@ #include <drm/display/drm_dp_helper.h> #include <drm/drm_crtc.h> -#include <drm/drm_edid.h> #include <drm/drm_encoder.h> #include <drm/drm_fixed.h> #include <drm/drm_framebuffer.h> @@ -51,6 +50,7 @@ struct amdgpu_device; struct amdgpu_encoder; struct amdgpu_router; struct amdgpu_hpd; +struct edid; #define to_amdgpu_crtc(x) container_of(x, struct amdgpu_crtc, base) #define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base) @@ -343,6 +343,97 @@ struct amdgpu_mode_info { int disp_priority; const struct amdgpu_display_funcs *funcs; const enum drm_plane_type *plane_type; + + /* Driver-private color mgmt props */ + + /* @plane_degamma_lut_property: Plane property to set a degamma LUT to + * convert encoded values to light linear values before sampling or + * blending. + */ + struct drm_property *plane_degamma_lut_property; + /* @plane_degamma_lut_size_property: Plane property to define the max + * size of degamma LUT as supported by the driver (read-only). + */ + struct drm_property *plane_degamma_lut_size_property; + /** + * @plane_degamma_tf_property: Plane pre-defined transfer function to + * to go from scanout/encoded values to linear values. + */ + struct drm_property *plane_degamma_tf_property; + /** + * @plane_hdr_mult_property: + */ + struct drm_property *plane_hdr_mult_property; + + struct drm_property *plane_ctm_property; + /** + * @shaper_lut_property: Plane property to set pre-blending shaper LUT + * that converts color content before 3D LUT. If + * plane_shaper_tf_property != Identity TF, AMD color module will + * combine the user LUT values with pre-defined TF into the LUT + * parameters to be programmed. + */ + struct drm_property *plane_shaper_lut_property; + /** + * @shaper_lut_size_property: Plane property for the size of + * pre-blending shaper LUT as supported by the driver (read-only). + */ + struct drm_property *plane_shaper_lut_size_property; + /** + * @plane_shaper_tf_property: Plane property to set a predefined + * transfer function for pre-blending shaper (before applying 3D LUT) + * with or without LUT. There is no shaper ROM, but we can use AMD + * color modules to program LUT parameters from predefined TF (or + * from a combination of pre-defined TF and the custom 1D LUT). + */ + struct drm_property *plane_shaper_tf_property; + /** + * @plane_lut3d_property: Plane property for color transformation using + * a 3D LUT (pre-blending), a three-dimensional array where each + * element is an RGB triplet. Each dimension has the size of + * lut3d_size. The array contains samples from the approximated + * function. On AMD, values between samples are estimated by + * tetrahedral interpolation. The array is accessed with three indices, + * one for each input dimension (color channel), blue being the + * outermost dimension, red the innermost. + */ + struct drm_property *plane_lut3d_property; + /** + * @plane_degamma_lut_size_property: Plane property to define the max + * size of 3D LUT as supported by the driver (read-only). The max size + * is the max size of one dimension and, therefore, the max number of + * entries for 3D LUT array is the 3D LUT size cubed; + */ + struct drm_property *plane_lut3d_size_property; + /** + * @plane_blend_lut_property: Plane property for output gamma before + * blending. Userspace set a blend LUT to convert colors after 3D LUT + * conversion. It works as a post-3DLUT 1D LUT. With shaper LUT, they + * are sandwiching 3D LUT with two 1D LUT. If plane_blend_tf_property + * != Identity TF, AMD color module will combine the user LUT values + * with pre-defined TF into the LUT parameters to be programmed. + */ + struct drm_property *plane_blend_lut_property; + /** + * @plane_blend_lut_size_property: Plane property to define the max + * size of blend LUT as supported by the driver (read-only). + */ + struct drm_property *plane_blend_lut_size_property; + /** + * @plane_blend_tf_property: Plane property to set a predefined + * transfer function for pre-blending blend/out_gamma (after applying + * 3D LUT) with or without LUT. There is no blend ROM, but we can use + * AMD color modules to program LUT parameters from predefined TF (or + * from a combination of pre-defined TF and the custom 1D LUT). + */ + struct drm_property *plane_blend_tf_property; + /* @regamma_tf_property: Transfer function for CRTC regamma + * (post-blending). Possible values are defined by `enum + * amdgpu_transfer_function`. There is no regamma ROM, but we can use + * AMD color modules to program LUT parameters from predefined TF (or + * from a combination of pre-defined TF and the custom 1D LUT). + */ + struct drm_property *regamma_tf_property; }; #define AMDGPU_MAX_BL_LEVEL 0xFF @@ -416,6 +507,10 @@ struct amdgpu_crtc { int otg_inst; struct drm_pending_vblank_event *event; + + bool wb_pending; + bool wb_enabled; + struct drm_writeback_connector *wb_conn; }; struct amdgpu_encoder_atom_dig { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 425cebcc5c..866bfde1ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -620,8 +620,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, return r; if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && - bo->tbo.resource->mem_type == TTM_PL_VRAM && - amdgpu_bo_in_cpu_visible_vram(bo)) + amdgpu_res_cpu_visible(adev, bo->tbo.resource)) amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, ctx.bytes_moved); else @@ -1275,26 +1274,39 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict) void amdgpu_bo_get_memory(struct amdgpu_bo *bo, struct amdgpu_mem_stats *stats) { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + struct ttm_resource *res = bo->tbo.resource; uint64_t size = amdgpu_bo_size(bo); + struct drm_gem_object *obj; unsigned int domain; + bool shared; /* Abort if the BO doesn't currently have a backing store */ - if (!bo->tbo.resource) + if (!res) return; - domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); + obj = &bo->tbo.base; + shared = drm_gem_object_is_shared_for_memory_stats(obj); + + domain = amdgpu_mem_type_to_domain(res->mem_type); switch (domain) { case AMDGPU_GEM_DOMAIN_VRAM: stats->vram += size; - if (amdgpu_bo_in_cpu_visible_vram(bo)) + if (amdgpu_res_cpu_visible(adev, bo->tbo.resource)) stats->visible_vram += size; + if (shared) + stats->vram_shared += size; break; case AMDGPU_GEM_DOMAIN_GTT: stats->gtt += size; + if (shared) + stats->gtt_shared += size; break; case AMDGPU_GEM_DOMAIN_CPU: default: stats->cpu += size; + if (shared) + stats->cpu_shared += size; break; } @@ -1381,10 +1393,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) /* Remember that this BO was accessed by the CPU */ abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - if (bo->resource->mem_type != TTM_PL_VRAM) - return 0; - - if (amdgpu_bo_in_cpu_visible_vram(abo)) + if (amdgpu_res_cpu_visible(adev, bo->resource)) return 0; /* Can't move a pinned BO to visible VRAM */ @@ -1408,7 +1417,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) /* this should never happen */ if (bo->resource->mem_type == TTM_PL_VRAM && - !amdgpu_bo_in_cpu_visible_vram(abo)) + !amdgpu_res_cpu_visible(adev, bo->resource)) return VM_FAULT_SIGBUS; ttm_bo_move_to_lru_tail_unlocked(bo); @@ -1572,6 +1581,7 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, */ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct dma_buf_attachment *attachment; struct dma_buf *dma_buf; const char *placement; @@ -1580,10 +1590,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) if (dma_resv_trylock(bo->tbo.base.resv)) { unsigned int domain; + domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); switch (domain) { case AMDGPU_GEM_DOMAIN_VRAM: - if (amdgpu_bo_in_cpu_visible_vram(bo)) + if (amdgpu_res_cpu_visible(adev, bo->tbo.resource)) placement = "VRAM VISIBLE"; else placement = "VRAM"; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index a3ea8a82db..fa03d9e487 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -138,12 +138,18 @@ struct amdgpu_bo_vm { struct amdgpu_mem_stats { /* current VRAM usage, includes visible VRAM */ uint64_t vram; + /* current shared VRAM usage, includes visible VRAM */ + uint64_t vram_shared; /* current visible VRAM usage */ uint64_t visible_vram; /* current GTT usage */ uint64_t gtt; + /* current shared GTT usage */ + uint64_t gtt_shared; /* current system memory usage */ uint64_t cpu; + /* current shared system memory usage */ + uint64_t cpu_shared; /* sum of evicted buffers, includes visible VRAM */ uint64_t evicted_vram; /* sum of evicted buffers due to CPU access */ @@ -245,28 +251,6 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) } /** - * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM - */ -static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo) -{ - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - struct amdgpu_res_cursor cursor; - - if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM) - return false; - - amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor); - while (cursor.remaining) { - if (cursor.start < adev->gmc.visible_vram_size) - return true; - - amdgpu_res_next(&cursor, cursor.size); - } - - return false; -} - -/** * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced */ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index a21045d018..0328616473 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -466,7 +466,7 @@ static int psp_sw_init(void *handle) } ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, - amdgpu_sriov_vf(adev) ? + (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, &psp->fw_pri_bo, &psp->fw_pri_mc_addr, @@ -1433,8 +1433,8 @@ int psp_xgmi_get_topology_info(struct psp_context *psp, get_extended_data) || amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6); - bool ta_port_num_support = psp->xgmi_context.xgmi_ta_caps & - EXTEND_PEER_LINK_INFO_CMD_FLAG; + bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 : + psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG; /* popluate the shared output buffer rather than the cmd input buffer * with node_ids as the input for GET_PEER_LINKS command execution. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 4a3726bb6d..31823a30de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -305,11 +305,13 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, return -EINVAL; data->head.block = block_id; - /* only ue and ce errors are supported */ + /* only ue, ce and poison errors are supported */ if (!memcmp("ue", err, 2)) data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; else if (!memcmp("ce", err, 2)) data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE; + else if (!memcmp("poison", err, 6)) + data->head.type = AMDGPU_RAS_ERROR__POISON; else return -EINVAL; @@ -431,9 +433,10 @@ static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev, * The block is one of: umc, sdma, gfx, etc. * see ras_block_string[] for details * - * The error type is one of: ue, ce, where, + * The error type is one of: ue, ce and poison where, * ue is multi-uncorrectable * ce is single-correctable + * poison is poison * * The sub-block is a the sub-block index, pass 0 if there is no sub-block. * The address and value are hexadecimal numbers, leading 0x is optional. @@ -1067,8 +1070,7 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev, mcm_info = &err_info->mcm_info; if (err_info->ce_count) { dev_info(adev->dev, "socket: %d, die: %d, " - "%lld new correctable hardware errors detected in %s block, " - "no user action is needed\n", + "%lld new correctable hardware errors detected in %s block\n", mcm_info->socket_id, mcm_info->die_id, err_info->ce_count, @@ -1080,8 +1082,7 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev, err_info = &err_node->err_info; mcm_info = &err_info->mcm_info; dev_info(adev->dev, "socket: %d, die: %d, " - "%lld correctable hardware errors detected in total in %s block, " - "no user action is needed\n", + "%lld correctable hardware errors detected in total in %s block\n", mcm_info->socket_id, mcm_info->die_id, err_info->ce_count, blk_name); } } @@ -1108,16 +1109,14 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev, adev->smuio.funcs->get_die_id) { dev_info(adev->dev, "socket: %d, die: %d " "%ld correctable hardware errors " - "detected in %s block, no user " - "action is needed.\n", + "detected in %s block\n", adev->smuio.funcs->get_socket_id(adev), adev->smuio.funcs->get_die_id(adev), ras_mgr->err_data.ce_count, blk_name); } else { dev_info(adev->dev, "%ld correctable hardware errors " - "detected in %s block, no user " - "action is needed.\n", + "detected in %s block\n", ras_mgr->err_data.ce_count, blk_name); } @@ -1156,8 +1155,10 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s for_each_ras_error(err_node, err_data) { err_info = &err_node->err_info; - amdgpu_ras_error_statistic_ce_count(&obj->err_data, &err_info->mcm_info, err_info->ce_count); - amdgpu_ras_error_statistic_ue_count(&obj->err_data, &err_info->mcm_info, err_info->ue_count); + amdgpu_ras_error_statistic_ce_count(&obj->err_data, + &err_info->mcm_info, NULL, err_info->ce_count); + amdgpu_ras_error_statistic_ue_count(&obj->err_data, + &err_info->mcm_info, NULL, err_info->ue_count); } } else { /* for legacy asic path which doesn't has error source info */ @@ -1918,7 +1919,7 @@ static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj struct amdgpu_iv_entry *entry) { dev_info(obj->adev->dev, - "Poison is created, no user action is needed.\n"); + "Poison is created\n"); } static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj, @@ -2541,7 +2542,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev) return 0; data = &con->eh_data; - *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO); + *data = kzalloc(sizeof(**data), GFP_KERNEL); if (!*data) { ret = -ENOMEM; goto out; @@ -2828,10 +2829,10 @@ int amdgpu_ras_init(struct amdgpu_device *adev) if (con) return 0; - con = kmalloc(sizeof(struct amdgpu_ras) + + con = kzalloc(sizeof(*con) + sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT + sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT, - GFP_KERNEL|__GFP_ZERO); + GFP_KERNEL); if (!con) return -ENOMEM; @@ -2918,6 +2919,11 @@ int amdgpu_ras_init(struct amdgpu_device *adev) amdgpu_ras_query_poison_mode(adev); + /* Packed socket_id to ras feature mask bits[31:29] */ + if (adev->smuio.funcs && + adev->smuio.funcs->get_socket_id) + con->features |= ((adev->smuio.funcs->get_socket_id(adev)) << 29); + /* Get RAS schema for particular SOC */ con->schema = amdgpu_get_ras_schema(adev); @@ -3136,6 +3142,8 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) return 0; + amdgpu_ras_set_mca_debug_mode(adev, false); + list_for_each_entry_safe(node, tmp, &adev->ras_list, node) { if (!node->ras_obj) { dev_warn(adev->dev, "Warning: abnormal ras list node.\n"); @@ -3409,12 +3417,18 @@ int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) return 0; } -void amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable) +int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + int ret = 0; - if (con) - con->is_mca_debug_mode = enable; + if (con) { + ret = amdgpu_mca_smu_set_debug_mode(adev, enable); + if (!ret) + con->is_mca_debug_mode = enable; + } + + return ret; } bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev) @@ -3685,7 +3699,8 @@ static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct } static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data, - struct amdgpu_smuio_mcm_config_info *mcm_info) + struct amdgpu_smuio_mcm_config_info *mcm_info, + struct ras_err_addr *err_addr) { struct ras_err_node *err_node; @@ -3699,6 +3714,9 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info)); + if (err_addr) + memcpy(&err_node->err_info.err_addr, err_addr, sizeof(*err_addr)); + err_data->err_list_count++; list_add_tail(&err_node->node, &err_data->err_node_list); list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp); @@ -3707,7 +3725,8 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d } int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data, - struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count) + struct amdgpu_smuio_mcm_config_info *mcm_info, + struct ras_err_addr *err_addr, u64 count) { struct ras_err_info *err_info; @@ -3717,7 +3736,7 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data, if (!count) return 0; - err_info = amdgpu_ras_error_get_info(err_data, mcm_info); + err_info = amdgpu_ras_error_get_info(err_data, mcm_info, err_addr); if (!err_info) return -EINVAL; @@ -3728,7 +3747,8 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data, } int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data, - struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count) + struct amdgpu_smuio_mcm_config_info *mcm_info, + struct ras_err_addr *err_addr, u64 count) { struct ras_err_info *err_info; @@ -3738,7 +3758,7 @@ int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data, if (!count) return 0; - err_info = amdgpu_ras_error_get_info(err_data, mcm_info); + err_info = amdgpu_ras_error_get_info(err_data, mcm_info, err_addr); if (!err_info) return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 19161916ac..76fb856287 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -452,10 +452,17 @@ struct ras_fs_data { char debugfs_name[32]; }; +struct ras_err_addr { + uint64_t err_status; + uint64_t err_ipid; + uint64_t err_addr; +}; + struct ras_err_info { struct amdgpu_smuio_mcm_config_info mcm_info; u64 ce_count; u64 ue_count; + struct ras_err_addr err_addr; }; struct ras_err_node { @@ -773,7 +780,7 @@ struct amdgpu_ras* amdgpu_ras_get_context(struct amdgpu_device *adev); int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con); -void amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable); +int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable); bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev); bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev, unsigned int *mode); @@ -806,8 +813,10 @@ void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev, int amdgpu_ras_error_data_init(struct ras_err_data *err_data); void amdgpu_ras_error_data_fini(struct ras_err_data *err_data); int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data, - struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count); + struct amdgpu_smuio_mcm_config_info *mcm_info, + struct ras_err_addr *err_addr, u64 count); int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data, - struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count); + struct amdgpu_smuio_mcm_config_info *mcm_info, + struct ras_err_addr *err_addr, u64 count); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 4e526d7730..06f0a6534a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -647,6 +647,7 @@ int amdgpu_ring_test_helper(struct amdgpu_ring *ring) ring->name); ring->sched.ready = !r; + return r; } @@ -729,3 +730,14 @@ void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring) if (ring->is_sw_ring) amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE); } + +bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring) +{ + if (!ring) + return false; + + if (ring->no_scheduler || !drm_sched_wqueue_ready(&ring->sched)) + return false; + + return true; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index bbb53720a0..fe1a61eb6e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -450,5 +450,5 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, int amdgpu_ib_pool_init(struct amdgpu_device *adev); void amdgpu_ib_pool_fini(struct amdgpu_device *adev); int amdgpu_ib_ring_tests(struct amdgpu_device *adev); - +bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c index 35e0ae9aca..2c3675d916 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c @@ -531,13 +531,12 @@ int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev, if (version_major == 2 && version_minor == 1) adev->gfx.rlc.is_rlc_v2_1 = true; - if (version_minor >= 0) { - err = amdgpu_gfx_rlc_init_microcode_v2_0(adev); - if (err) { - dev_err(adev->dev, "fail to init rlc v2_0 microcode\n"); - return err; - } + err = amdgpu_gfx_rlc_init_microcode_v2_0(adev); + if (err) { + dev_err(adev->dev, "fail to init rlc v2_0 microcode\n"); + return err; } + if (version_minor >= 1) amdgpu_gfx_rlc_init_microcode_v2_1(adev); if (version_minor >= 2) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c new file mode 100644 index 0000000000..7a6a672754 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c @@ -0,0 +1,247 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_seq64.h" + +#include <drm/drm_exec.h> + +/** + * DOC: amdgpu_seq64 + * + * amdgpu_seq64 allocates a 64bit memory on each request in sequence order. + * seq64 driver is required for user queue fence memory allocation, TLB + * counters and VM updates. It has maximum count of 32768 64 bit slots. + */ + +/** + * amdgpu_seq64_map - Map the seq64 memory to VM + * + * @adev: amdgpu_device pointer + * @vm: vm pointer + * @bo_va: bo_va pointer + * @seq64_addr: seq64 vaddr start address + * @size: seq64 pool size + * + * Map the seq64 memory to the given VM. + * + * Returns: + * 0 on success or a negative error code on failure + */ +int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_bo_va **bo_va, u64 seq64_addr, + uint32_t size) +{ + struct amdgpu_bo *bo; + struct drm_exec exec; + int r; + + bo = adev->seq64.sbo; + if (!bo) + return -EINVAL; + + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); + drm_exec_until_all_locked(&exec) { + r = amdgpu_vm_lock_pd(vm, &exec, 0); + if (likely(!r)) + r = drm_exec_lock_obj(&exec, &bo->tbo.base); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) + goto error; + } + + *bo_va = amdgpu_vm_bo_add(adev, vm, bo); + if (!*bo_va) { + r = -ENOMEM; + goto error; + } + + r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, size, + AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | + AMDGPU_PTE_EXECUTABLE); + if (r) { + DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r); + amdgpu_vm_bo_del(adev, *bo_va); + goto error; + } + + r = amdgpu_vm_bo_update(adev, *bo_va, false); + if (r) { + DRM_ERROR("failed to do vm_bo_update on userq sem\n"); + amdgpu_vm_bo_del(adev, *bo_va); + goto error; + } + +error: + drm_exec_fini(&exec); + return r; +} + +/** + * amdgpu_seq64_unmap - Unmap the seq64 memory + * + * @adev: amdgpu_device pointer + * @fpriv: DRM file private + * + * Unmap the seq64 memory from the given VM. + */ +void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv) +{ + struct amdgpu_vm *vm; + struct amdgpu_bo *bo; + struct drm_exec exec; + int r; + + if (!fpriv->seq64_va) + return; + + bo = adev->seq64.sbo; + if (!bo) + return; + + vm = &fpriv->vm; + + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); + drm_exec_until_all_locked(&exec) { + r = amdgpu_vm_lock_pd(vm, &exec, 0); + if (likely(!r)) + r = drm_exec_lock_obj(&exec, &bo->tbo.base); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) + goto error; + } + + amdgpu_vm_bo_del(adev, fpriv->seq64_va); + + fpriv->seq64_va = NULL; + +error: + drm_exec_fini(&exec); +} + +/** + * amdgpu_seq64_alloc - Allocate a 64 bit memory + * + * @adev: amdgpu_device pointer + * @gpu_addr: allocated gpu VA start address + * @cpu_addr: allocated cpu VA start address + * + * Alloc a 64 bit memory from seq64 pool. + * + * Returns: + * 0 on success or a negative error code on failure + */ +int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr, + u64 **cpu_addr) +{ + unsigned long bit_pos; + u32 offset; + + bit_pos = find_first_zero_bit(adev->seq64.used, adev->seq64.num_sem); + + if (bit_pos < adev->seq64.num_sem) { + __set_bit(bit_pos, adev->seq64.used); + offset = bit_pos << 6; /* convert to qw offset */ + } else { + return -EINVAL; + } + + *gpu_addr = offset + AMDGPU_SEQ64_VADDR_START; + *cpu_addr = offset + adev->seq64.cpu_base_addr; + + return 0; +} + +/** + * amdgpu_seq64_free - Free the given 64 bit memory + * + * @adev: amdgpu_device pointer + * @gpu_addr: gpu start address to be freed + * + * Free the given 64 bit memory from seq64 pool. + * + */ +void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr) +{ + u32 offset; + + offset = gpu_addr - AMDGPU_SEQ64_VADDR_START; + + offset >>= 6; + if (offset < adev->seq64.num_sem) + __clear_bit(offset, adev->seq64.used); +} + +/** + * amdgpu_seq64_fini - Cleanup seq64 driver + * + * @adev: amdgpu_device pointer + * + * Free the memory space allocated for seq64. + * + */ +void amdgpu_seq64_fini(struct amdgpu_device *adev) +{ + amdgpu_bo_free_kernel(&adev->seq64.sbo, + NULL, + (void **)&adev->seq64.cpu_base_addr); +} + +/** + * amdgpu_seq64_init - Initialize seq64 driver + * + * @adev: amdgpu_device pointer + * + * Allocate the required memory space for seq64. + * + * Returns: + * 0 on success or a negative error code on failure + */ +int amdgpu_seq64_init(struct amdgpu_device *adev) +{ + int r; + + if (adev->seq64.sbo) + return 0; + + /* + * AMDGPU_MAX_SEQ64_SLOTS * sizeof(u64) * 8 = AMDGPU_MAX_SEQ64_SLOTS + * 64bit slots + */ + r = amdgpu_bo_create_kernel(adev, AMDGPU_SEQ64_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, + &adev->seq64.sbo, NULL, + (void **)&adev->seq64.cpu_base_addr); + if (r) { + dev_warn(adev->dev, "(%d) create seq64 failed\n", r); + return r; + } + + memset(adev->seq64.cpu_base_addr, 0, AMDGPU_SEQ64_SIZE); + + adev->seq64.num_sem = AMDGPU_MAX_SEQ64_SLOTS; + memset(&adev->seq64.used, 0, sizeof(adev->seq64.used)); + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h new file mode 100644 index 0000000000..2196e72be5 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_SEQ64_H__ +#define __AMDGPU_SEQ64_H__ + +#define AMDGPU_SEQ64_SIZE (2ULL << 20) +#define AMDGPU_MAX_SEQ64_SLOTS (AMDGPU_SEQ64_SIZE / (sizeof(u64) * 8)) +#define AMDGPU_SEQ64_VADDR_OFFSET 0x50000 +#define AMDGPU_SEQ64_VADDR_START (AMDGPU_VA_RESERVED_SIZE + AMDGPU_SEQ64_VADDR_OFFSET) + +struct amdgpu_seq64 { + struct amdgpu_bo *sbo; + u32 num_sem; + u64 *cpu_base_addr; + DECLARE_BITMAP(used, AMDGPU_MAX_SEQ64_SLOTS); +}; + +void amdgpu_seq64_fini(struct amdgpu_device *adev); +int amdgpu_seq64_init(struct amdgpu_device *adev); +int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr, u64 **cpu_addr); +void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr); +int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_bo_va **bo_va, u64 seq64_addr, uint32_t size); +void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv); + +#endif + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 2fd1bfb359..f539b1d002 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -554,6 +554,21 @@ TRACE_EVENT(amdgpu_reset_reg_dumps, __entry->value) ); +TRACE_EVENT(amdgpu_runpm_reference_dumps, + TP_PROTO(uint32_t index, const char *func), + TP_ARGS(index, func), + TP_STRUCT__entry( + __field(uint32_t, index) + __string(func, func) + ), + TP_fast_assign( + __entry->index = index; + __assign_str(func, func); + ), + TP_printk("amdgpu runpm reference dump 0x%x: 0x%s\n", + __entry->index, + __get_str(func)) +); #undef AMDGPU_JOB_GET_TIMELINE_NAME #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index b0ed10f4de..851509c6e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -137,7 +137,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && - amdgpu_bo_in_cpu_visible_vram(abo)) { + amdgpu_res_cpu_visible(adev, bo->resource)) { /* Try evicting to the CPU inaccessible part of VRAM * first, but only set GTT as busy placement, so this @@ -408,40 +408,55 @@ error: return r; } -/* - * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy +/** + * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU + * @adev: amdgpu device + * @res: the resource to check * - * Called by amdgpu_bo_move() + * Returns: true if the full resource is CPU visible, false otherwise. */ -static bool amdgpu_mem_visible(struct amdgpu_device *adev, - struct ttm_resource *mem) +bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, + struct ttm_resource *res) { - u64 mem_size = (u64)mem->size; struct amdgpu_res_cursor cursor; - u64 end; - if (mem->mem_type == TTM_PL_SYSTEM || - mem->mem_type == TTM_PL_TT) + if (!res) + return false; + + if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT || + res->mem_type == AMDGPU_PL_PREEMPT) return true; - if (mem->mem_type != TTM_PL_VRAM) + + if (res->mem_type != TTM_PL_VRAM) return false; - amdgpu_res_first(mem, 0, mem_size, &cursor); - end = cursor.start + cursor.size; + amdgpu_res_first(res, 0, res->size, &cursor); while (cursor.remaining) { + if ((cursor.start + cursor.size) >= adev->gmc.visible_vram_size) + return false; amdgpu_res_next(&cursor, cursor.size); + } - if (!cursor.remaining) - break; + return true; +} - /* ttm_resource_ioremap only supports contiguous memory */ - if (end != cursor.start) - return false; +/* + * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy + * + * Called by amdgpu_bo_move() + */ +static bool amdgpu_res_copyable(struct amdgpu_device *adev, + struct ttm_resource *mem) +{ + if (!amdgpu_res_cpu_visible(adev, mem)) + return false; - end = cursor.start + cursor.size; - } + /* ttm_resource_ioremap only supports contiguous memory */ + if (mem->mem_type == TTM_PL_VRAM && + !(mem->placement & TTM_PL_FLAG_CONTIGUOUS)) + return false; - return end <= adev->gmc.visible_vram_size; + return true; } /* @@ -534,8 +549,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, if (r) { /* Check that all memory is CPU accessible */ - if (!amdgpu_mem_visible(adev, old_mem) || - !amdgpu_mem_visible(adev, new_mem)) { + if (!amdgpu_res_copyable(adev, old_mem) || + !amdgpu_res_copyable(adev, new_mem)) { pr_err("Move buffer fallback to memcpy unavailable\n"); return r; } @@ -562,7 +577,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); - size_t bus_size = (size_t)mem->size; switch (mem->mem_type) { case TTM_PL_SYSTEM: @@ -573,9 +587,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, break; case TTM_PL_VRAM: mem->bus.offset = mem->start << PAGE_SHIFT; - /* check if it's visible */ - if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size) - return -EINVAL; if (adev->mman.aper_base_kaddr && mem->placement & TTM_PL_FLAG_CONTIGUOUS) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 65ec82141a..32cf6b6f6e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -139,6 +139,9 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, uint64_t start); +bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, + struct ttm_resource *res); + int amdgpu_ttm_init(struct amdgpu_device *adev); void amdgpu_ttm_fini(struct amdgpu_device *adev); void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 0efb2568cb..3e12763e47 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -1062,7 +1062,8 @@ int amdgpu_ucode_create_bo(struct amdgpu_device *adev) { if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) { amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE, - amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, + (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ? + AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, &adev->firmware.fw_buf, &adev->firmware.fw_buf_mc, &adev->firmware.fw_buf_ptr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c index ca45ba8ac1..1b5ef32108 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c @@ -86,7 +86,7 @@ static int map_ring_data(struct amdgpu_device *adev, struct amdgpu_vm *vm, amdgpu_sync_create(&sync); - drm_exec_init(&exec, 0); + drm_exec_init(&exec, 0, 0); drm_exec_until_all_locked(&exec) { r = drm_exec_lock_obj(&exec, &bo->tbo.base); drm_exec_retry_on_contention(&exec); @@ -149,7 +149,7 @@ static int unmap_ring_data(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct drm_exec exec; long r; - drm_exec_init(&exec, 0); + drm_exec_init(&exec, 0, 0); drm_exec_until_all_locked(&exec) { r = drm_exec_lock_obj(&exec, &bo->tbo.base); drm_exec_retry_on_contention(&exec); @@ -766,6 +766,9 @@ static int umsch_mm_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (amdgpu_in_reset(adev) || adev->in_s0ix || adev->in_suspend) + return 0; + return umsch_mm_test(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 3a632c3b1a..0dcff2889e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -1099,7 +1099,8 @@ bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev) { bool xnack_mode = true; - if (amdgpu_sriov_vf(adev) && adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) + if (amdgpu_sriov_vf(adev) && + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2)) xnack_mode = false; return xnack_mode; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index db6fc0cb18..453a4b786c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0+ #include <drm/drm_atomic_helper.h> +#include <drm/drm_edid.h> #include <drm/drm_simple_kms_helper.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 5baefb548a..5b50f73591 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1439,6 +1439,51 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, } /** + * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM + * + * @adev: amdgpu_device pointer + * @vm: requested vm + * @flush_type: flush type + * @xcc_mask: mask of XCCs that belong to the compute partition in need of a TLB flush. + * + * Flush TLB if needed for a compute VM. + * + * Returns: + * 0 for success. + */ +int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + uint32_t flush_type, + uint32_t xcc_mask) +{ + uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm); + bool all_hub = false; + int xcc = 0, r = 0; + + WARN_ON_ONCE(!vm->is_compute_context); + + /* + * It can be that we race and lose here, but that is extremely unlikely + * and the worst thing which could happen is that we flush the changes + * into the TLB once more which is harmless. + */ + if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq) + return 0; + + if (adev->family == AMDGPU_FAMILY_AI || + adev->family == AMDGPU_FAMILY_RV) + all_hub = true; + + for_each_inst(xcc, xcc_mask) { + r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type, + all_hub, xcc); + if (r) + break; + } + return r; +} + +/** * amdgpu_vm_bo_add - add a bo to a specific vm * * @adev: amdgpu_device pointer @@ -1514,6 +1559,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, trace_amdgpu_vm_bo_map(bo_va, mapping); } +/* Validate operation parameters to prevent potential abuse */ +static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev, + struct amdgpu_bo *bo, + uint64_t saddr, + uint64_t offset, + uint64_t size) +{ + uint64_t tmp, lpfn; + + if (saddr & AMDGPU_GPU_PAGE_MASK + || offset & AMDGPU_GPU_PAGE_MASK + || size & AMDGPU_GPU_PAGE_MASK) + return -EINVAL; + + if (check_add_overflow(saddr, size, &tmp) + || check_add_overflow(offset, size, &tmp) + || size == 0 /* which also leads to end < begin */) + return -EINVAL; + + /* make sure object fit at this offset */ + if (bo && offset + size > amdgpu_bo_size(bo)) + return -EINVAL; + + /* Ensure last pfn not exceed max_pfn */ + lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT; + if (lpfn >= adev->vm_manager.max_pfn) + return -EINVAL; + + return 0; +} + /** * amdgpu_vm_bo_map - map bo inside a vm * @@ -1540,21 +1616,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, struct amdgpu_bo *bo = bo_va->base.bo; struct amdgpu_vm *vm = bo_va->base.vm; uint64_t eaddr; + int r; - /* validate the parameters */ - if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) - return -EINVAL; - if (saddr + size <= saddr || offset + size <= offset) - return -EINVAL; - - /* make sure object fit at this offset */ - eaddr = saddr + size - 1; - if ((bo && offset + size > amdgpu_bo_size(bo)) || - (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) - return -EINVAL; + r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size); + if (r) + return r; saddr /= AMDGPU_GPU_PAGE_SIZE; - eaddr /= AMDGPU_GPU_PAGE_SIZE; + eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); if (tmp) { @@ -1607,17 +1676,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, uint64_t eaddr; int r; - /* validate the parameters */ - if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) - return -EINVAL; - if (saddr + size <= saddr || offset + size <= offset) - return -EINVAL; - - /* make sure object fit at this offset */ - eaddr = saddr + size - 1; - if ((bo && offset + size > amdgpu_bo_size(bo)) || - (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) - return -EINVAL; + r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size); + if (r) + return r; /* Allocate all the needed memory */ mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); @@ -1631,7 +1692,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, } saddr /= AMDGPU_GPU_PAGE_SIZE; - eaddr /= AMDGPU_GPU_PAGE_SIZE; + eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; mapping->start = saddr; mapping->last = eaddr; @@ -1718,10 +1779,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping *before, *after, *tmp, *next; LIST_HEAD(removed); uint64_t eaddr; + int r; + + r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size); + if (r) + return r; - eaddr = saddr + size - 1; saddr /= AMDGPU_GPU_PAGE_SIZE; - eaddr /= AMDGPU_GPU_PAGE_SIZE; + eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; /* Allocate all the needed memory */ before = kzalloc(sizeof(*before), GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 2cd86d2bf7..4740dd65b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -116,7 +116,7 @@ struct amdgpu_mem_stats; #define AMDGPU_VM_FAULT_STOP_FIRST 1 #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 -/* Reserve 4MB VRAM for page tables */ +/* How much VRAM be reserved for page tables */ #define AMDGPU_VM_RESERVED_VRAM (8ULL << 20) /* @@ -324,6 +324,7 @@ struct amdgpu_vm { /* Last finished delayed update */ atomic64_t tlb_seq; struct dma_fence *last_tlb_flush; + atomic64_t kfd_last_flushed_seq; /* How many times we had to re-generate the page tables */ uint64_t generation; @@ -445,6 +446,10 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, int amdgpu_vm_handle_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); +int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + uint32_t flush_type, + uint32_t xcc_mask); void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, struct amdgpu_vm *vm, struct amdgpu_bo *bo); int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c index e81579708e..ad44012cc0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c @@ -26,6 +26,7 @@ #include "amdgpu.h" #include "amdgpu_ucode.h" #include "amdgpu_vpe.h" +#include "amdgpu_smu.h" #include "soc15_common.h" #include "vpe_v6_1.h" @@ -33,8 +34,180 @@ /* VPE CSA resides in the 4th page of CSA */ #define AMDGPU_CSA_VPE_OFFSET (4096 * 3) +/* 1 second timeout */ +#define VPE_IDLE_TIMEOUT msecs_to_jiffies(1000) + +#define VPE_MAX_DPM_LEVEL 4 +#define FIXED1_8_BITS_PER_FRACTIONAL_PART 8 +#define GET_PRATIO_INTEGER_PART(x) ((x) >> FIXED1_8_BITS_PER_FRACTIONAL_PART) + static void vpe_set_ring_funcs(struct amdgpu_device *adev); +static inline uint16_t div16_u16_rem(uint16_t dividend, uint16_t divisor, uint16_t *remainder) +{ + *remainder = dividend % divisor; + return dividend / divisor; +} + +static inline uint16_t complete_integer_division_u16( + uint16_t dividend, + uint16_t divisor, + uint16_t *remainder) +{ + return div16_u16_rem(dividend, divisor, (uint16_t *)remainder); +} + +static uint16_t vpe_u1_8_from_fraction(uint16_t numerator, uint16_t denominator) +{ + u16 arg1_value = numerator; + u16 arg2_value = denominator; + + uint16_t remainder; + + /* determine integer part */ + uint16_t res_value = complete_integer_division_u16( + arg1_value, arg2_value, &remainder); + + if (res_value > 127 /* CHAR_MAX */) + return 0; + + /* determine fractional part */ + { + unsigned int i = FIXED1_8_BITS_PER_FRACTIONAL_PART; + + do { + remainder <<= 1; + + res_value <<= 1; + + if (remainder >= arg2_value) { + res_value |= 1; + remainder -= arg2_value; + } + } while (--i != 0); + } + + /* round up LSB */ + { + uint16_t summand = (remainder << 1) >= arg2_value; + + if ((res_value + summand) > 32767 /* SHRT_MAX */) + return 0; + + res_value += summand; + } + + return res_value; +} + +static uint16_t vpe_internal_get_pratio(uint16_t from_frequency, uint16_t to_frequency) +{ + uint16_t pratio = vpe_u1_8_from_fraction(from_frequency, to_frequency); + + if (GET_PRATIO_INTEGER_PART(pratio) > 1) + pratio = 0; + + return pratio; +} + +/* + * VPE has 4 DPM levels from level 0 (lowerest) to 3 (highest), + * VPE FW will dynamically decide which level should be used according to current loading. + * + * Get VPE and SOC clocks from PM, and select the appropriate four clock values, + * calculate the ratios of adjusting from one clock to another. + * The VPE FW can then request the appropriate frequency from the PMFW. + */ +int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe) +{ + struct amdgpu_device *adev = vpe->ring.adev; + uint32_t dpm_ctl; + + if (adev->pm.dpm_enabled) { + struct dpm_clocks clock_table = { 0 }; + struct dpm_clock *VPEClks; + struct dpm_clock *SOCClks; + uint32_t idx; + uint32_t pratio_vmax_vnorm = 0, pratio_vnorm_vmid = 0, pratio_vmid_vmin = 0; + uint16_t pratio_vmin_freq = 0, pratio_vmid_freq = 0, pratio_vnorm_freq = 0, pratio_vmax_freq = 0; + + dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable)); + dpm_ctl |= 1; /* DPM enablement */ + WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl); + + /* Get VPECLK and SOCCLK */ + if (amdgpu_dpm_get_dpm_clock_table(adev, &clock_table)) { + dev_dbg(adev->dev, "%s: get clock failed!\n", __func__); + goto disable_dpm; + } + + SOCClks = clock_table.SocClocks; + VPEClks = clock_table.VPEClocks; + + /* vpe dpm only cares 4 levels. */ + for (idx = 0; idx < VPE_MAX_DPM_LEVEL; idx++) { + uint32_t soc_dpm_level; + uint32_t min_freq; + + if (idx == 0) + soc_dpm_level = 0; + else + soc_dpm_level = (idx * 2) + 1; + + /* clamp the max level */ + if (soc_dpm_level > PP_SMU_NUM_VPECLK_DPM_LEVELS - 1) + soc_dpm_level = PP_SMU_NUM_VPECLK_DPM_LEVELS - 1; + + min_freq = (SOCClks[soc_dpm_level].Freq < VPEClks[soc_dpm_level].Freq) ? + SOCClks[soc_dpm_level].Freq : VPEClks[soc_dpm_level].Freq; + + switch (idx) { + case 0: + pratio_vmin_freq = min_freq; + break; + case 1: + pratio_vmid_freq = min_freq; + break; + case 2: + pratio_vnorm_freq = min_freq; + break; + case 3: + pratio_vmax_freq = min_freq; + break; + default: + break; + } + } + + if (pratio_vmin_freq && pratio_vmid_freq && pratio_vnorm_freq && pratio_vmax_freq) { + uint32_t pratio_ctl; + + pratio_vmax_vnorm = (uint32_t)vpe_internal_get_pratio(pratio_vmax_freq, pratio_vnorm_freq); + pratio_vnorm_vmid = (uint32_t)vpe_internal_get_pratio(pratio_vnorm_freq, pratio_vmid_freq); + pratio_vmid_vmin = (uint32_t)vpe_internal_get_pratio(pratio_vmid_freq, pratio_vmin_freq); + + pratio_ctl = pratio_vmax_vnorm | (pratio_vnorm_vmid << 9) | (pratio_vmid_vmin << 18); + WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_pratio), pratio_ctl); /* PRatio */ + WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_interval), 24000); /* 1ms, unit=1/24MHz */ + WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_decision_threshold), 1200000); /* 50ms */ + WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_busy_clamp_threshold), 1200000);/* 50ms */ + WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_idle_clamp_threshold), 1200000);/* 50ms */ + dev_dbg(adev->dev, "%s: configure vpe dpm pratio done!\n", __func__); + } else { + dev_dbg(adev->dev, "%s: invalid pratio parameters!\n", __func__); + goto disable_dpm; + } + } + return 0; + +disable_dpm: + dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable)); + dpm_ctl &= 0xfffffffe; /* Disable DPM */ + WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl); + dev_dbg(adev->dev, "%s: disable vpe dpm\n", __func__); + return 0; +} + int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev) { struct amdgpu_firmware_info ucode = { @@ -134,6 +307,19 @@ static int vpe_early_init(void *handle) return 0; } +static void vpe_idle_work_handler(struct work_struct *work) +{ + struct amdgpu_device *adev = + container_of(work, struct amdgpu_device, vpe.idle_work.work); + unsigned int fences = 0; + + fences += amdgpu_fence_count_emitted(&adev->vpe.ring); + + if (fences == 0) + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE); + else + schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); +} static int vpe_common_init(struct amdgpu_vpe *vpe) { @@ -150,6 +336,9 @@ static int vpe_common_init(struct amdgpu_vpe *vpe) return r; } + vpe->context_started = false; + INIT_DELAYED_WORK(&adev->vpe.idle_work, vpe_idle_work_handler); + return 0; } @@ -201,6 +390,12 @@ static int vpe_hw_init(void *handle) struct amdgpu_vpe *vpe = &adev->vpe; int ret; + /* Power on VPE */ + ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, + AMD_PG_STATE_UNGATE); + if (ret) + return ret; + ret = vpe_load_microcode(vpe); if (ret) return ret; @@ -219,6 +414,9 @@ static int vpe_hw_fini(void *handle) vpe_ring_stop(vpe); + /* Power off VPE */ + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE); + return 0; } @@ -226,6 +424,8 @@ static int vpe_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_delayed_work_sync(&adev->vpe.idle_work); + return vpe_hw_fini(adev); } @@ -430,6 +630,21 @@ static int vpe_set_clockgating_state(void *handle, static int vpe_set_powergating_state(void *handle, enum amd_powergating_state state) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_vpe *vpe = &adev->vpe; + + if (!adev->pm.dpm_enabled) + dev_err(adev->dev, "Without PM, cannot support powergating\n"); + + dev_dbg(adev->dev, "%s: %s!\n", __func__, (state == AMD_PG_STATE_GATE) ? "GATE":"UNGATE"); + + if (state == AMD_PG_STATE_GATE) { + amdgpu_dpm_enable_vpe(adev, false); + vpe->context_started = false; + } else { + amdgpu_dpm_enable_vpe(adev, true); + } + return 0; } @@ -595,6 +810,38 @@ err0: return ret; } +static void vpe_ring_begin_use(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_vpe *vpe = &adev->vpe; + + cancel_delayed_work_sync(&adev->vpe.idle_work); + + /* Power on VPE and notify VPE of new context */ + if (!vpe->context_started) { + uint32_t context_notify; + + /* Power on VPE */ + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_UNGATE); + + /* Indicates that a job from a new context has been submitted. */ + context_notify = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator)); + if ((context_notify & 0x1) == 0) + context_notify |= 0x1; + else + context_notify &= ~(0x1); + WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator), context_notify); + vpe->context_started = true; + } +} + +static void vpe_ring_end_use(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); +} + static const struct amdgpu_ring_funcs vpe_ring_funcs = { .type = AMDGPU_RING_TYPE_VPE, .align_mask = 0xf, @@ -625,6 +872,8 @@ static const struct amdgpu_ring_funcs vpe_ring_funcs = { .init_cond_exec = vpe_ring_init_cond_exec, .patch_cond_exec = vpe_ring_patch_cond_exec, .preempt_ib = vpe_ring_preempt_ib, + .begin_use = vpe_ring_begin_use, + .end_use = vpe_ring_end_use, }; static void vpe_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h index 29d56f7ae4..1153ddaea6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h @@ -47,6 +47,15 @@ struct vpe_regs { uint32_t queue0_rb_wptr_lo; uint32_t queue0_rb_wptr_hi; uint32_t queue0_preempt; + + uint32_t dpm_enable; + uint32_t dpm_pratio; + uint32_t dpm_request_interval; + uint32_t dpm_decision_threshold; + uint32_t dpm_busy_clamp_threshold; + uint32_t dpm_idle_clamp_threshold; + uint32_t dpm_request_lv; + uint32_t context_indicator; }; struct amdgpu_vpe { @@ -63,12 +72,15 @@ struct amdgpu_vpe { struct amdgpu_bo *cmdbuf_obj; uint64_t cmdbuf_gpu_addr; uint32_t *cmdbuf_cpu_addr; + struct delayed_work idle_work; + bool context_started; }; int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev); int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe); int amdgpu_vpe_ring_init(struct amdgpu_vpe *vpe); int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe); +int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe); #define vpe_ring_init(vpe) ((vpe)->funcs->ring_init ? (vpe)->funcs->ring_init((vpe)) : 0) #define vpe_ring_start(vpe) ((vpe)->funcs->ring_start ? (vpe)->funcs->ring_start((vpe)) : 0) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index bd20cb3b98..a6c88f2fe6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -413,6 +413,38 @@ static ssize_t amdgpu_xgmi_show_num_links(struct device *dev, return sysfs_emit(buf, "%s\n", buf); } +static ssize_t amdgpu_xgmi_show_connected_port_num(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info; + int i, j, size = 0; + int current_node; + /* + * get the node id in the sysfs for the current socket and show + * it in the port num info output in the sysfs for easy reading. + * it is NOT the one retrieved from xgmi ta. + */ + for (i = 0; i < top->num_nodes; i++) { + if (top->nodes[i].node_id == adev->gmc.xgmi.node_id) { + current_node = i; + break; + } + } + + for (i = 0; i < top->num_nodes; i++) { + for (j = 0; j < top->nodes[i].num_links; j++) + /* node id in sysfs starts from 1 rather than 0 so +1 here */ + size += sysfs_emit_at(buf, size, "%02x:%02x -> %02x:%02x\n", current_node + 1, + top->nodes[i].port_num[j].src_xgmi_port_num, i + 1, + top->nodes[i].port_num[j].dst_xgmi_port_num); + } + + return size; +} + #define AMDGPU_XGMI_SET_FICAA(o) ((o) | 0x456801) static ssize_t amdgpu_xgmi_show_error(struct device *dev, struct device_attribute *attr, @@ -452,6 +484,7 @@ static DEVICE_ATTR(xgmi_physical_id, 0444, amdgpu_xgmi_show_physical_id, NULL); static DEVICE_ATTR(xgmi_error, S_IRUGO, amdgpu_xgmi_show_error, NULL); static DEVICE_ATTR(xgmi_num_hops, S_IRUGO, amdgpu_xgmi_show_num_hops, NULL); static DEVICE_ATTR(xgmi_num_links, S_IRUGO, amdgpu_xgmi_show_num_links, NULL); +static DEVICE_ATTR(xgmi_port_num, S_IRUGO, amdgpu_xgmi_show_connected_port_num, NULL); static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev, struct amdgpu_hive_info *hive) @@ -487,6 +520,13 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev, if (ret) pr_err("failed to create xgmi_num_links\n"); + /* Create xgmi port num file if supported */ + if (adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG) { + ret = device_create_file(adev->dev, &dev_attr_xgmi_port_num); + if (ret) + dev_err(adev->dev, "failed to create xgmi_port_num\n"); + } + /* Create sysfs link to hive info folder on the first device */ if (hive->kobj.parent != (&adev->dev->kobj)) { ret = sysfs_create_link(&adev->dev->kobj, &hive->kobj, @@ -517,6 +557,8 @@ remove_file: device_remove_file(adev->dev, &dev_attr_xgmi_error); device_remove_file(adev->dev, &dev_attr_xgmi_num_hops); device_remove_file(adev->dev, &dev_attr_xgmi_num_links); + if (adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG) + device_remove_file(adev->dev, &dev_attr_xgmi_port_num); success: return ret; @@ -533,6 +575,8 @@ static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev, device_remove_file(adev->dev, &dev_attr_xgmi_error); device_remove_file(adev->dev, &dev_attr_xgmi_num_hops); device_remove_file(adev->dev, &dev_attr_xgmi_num_links); + if (adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG) + device_remove_file(adev->dev, &dev_attr_xgmi_port_num); if (hive->kobj.parent != (&adev->dev->kobj)) sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info"); @@ -779,6 +823,28 @@ static int amdgpu_xgmi_initialize_hive_get_data_partition(struct amdgpu_hive_inf return 0; } +static void amdgpu_xgmi_fill_topology_info(struct amdgpu_device *adev, + struct amdgpu_device *peer_adev) +{ + struct psp_xgmi_topology_info *top_info = &adev->psp.xgmi_context.top_info; + struct psp_xgmi_topology_info *peer_info = &peer_adev->psp.xgmi_context.top_info; + + for (int i = 0; i < peer_info->num_nodes; i++) { + if (peer_info->nodes[i].node_id == adev->gmc.xgmi.node_id) { + for (int j = 0; j < top_info->num_nodes; j++) { + if (top_info->nodes[j].node_id == peer_adev->gmc.xgmi.node_id) { + peer_info->nodes[i].num_hops = top_info->nodes[j].num_hops; + peer_info->nodes[i].is_sharing_enabled = + top_info->nodes[j].is_sharing_enabled; + peer_info->nodes[i].num_links = + top_info->nodes[j].num_links; + return; + } + } + } + } +} + int amdgpu_xgmi_add_device(struct amdgpu_device *adev) { struct psp_xgmi_topology_info *top_info; @@ -853,18 +919,38 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) goto exit_unlock; } - /* get latest topology info for each device from psp */ - list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { - ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, - &tmp_adev->psp.xgmi_context.top_info, false); + if (amdgpu_sriov_vf(adev) && + adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG) { + /* only get topology for VF being init if it can support full duplex */ + ret = psp_xgmi_get_topology_info(&adev->psp, count, + &adev->psp.xgmi_context.top_info, false); if (ret) { - dev_err(tmp_adev->dev, + dev_err(adev->dev, "XGMI: Get topology failure on device %llx, hive %llx, ret %d", - tmp_adev->gmc.xgmi.node_id, - tmp_adev->gmc.xgmi.hive_id, ret); - /* To do : continue with some node failed or disable the whole hive */ + adev->gmc.xgmi.node_id, + adev->gmc.xgmi.hive_id, ret); + /* To do: continue with some node failed or disable the whole hive*/ goto exit_unlock; } + + /* fill the topology info for peers instead of getting from PSP */ + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + amdgpu_xgmi_fill_topology_info(adev, tmp_adev); + } + } else { + /* get latest topology info for each device from psp */ + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, + &tmp_adev->psp.xgmi_context.top_info, false); + if (ret) { + dev_err(tmp_adev->dev, + "XGMI: Get topology failure on device %llx, hive %llx, ret %d", + tmp_adev->gmc.xgmi.node_id, + tmp_adev->gmc.xgmi.hive_id, ret); + /* To do : continue with some node failed or disable the whole hive */ + goto exit_unlock; + } + } } /* get topology again for hives that support extended data */ @@ -1227,10 +1313,10 @@ static void __xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, struct a switch (xgmi_v6_4_0_pcs_mca_get_error_type(adev, status)) { case AMDGPU_MCA_ERROR_TYPE_UE: - amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, 1ULL); + amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, NULL, 1ULL); break; case AMDGPU_MCA_ERROR_TYPE_CE: - amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, 1ULL); + amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, NULL, 1ULL); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index 3f715e7fe1..d6f808acfb 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -24,6 +24,7 @@ #include "soc15.h" #include "soc15_common.h" +#include "amdgpu_reg_state.h" #include "amdgpu_xcp.h" #include "gfx_v9_4_3.h" #include "gfxhub_v1_2.h" @@ -656,3 +657,416 @@ int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) return 0; } + +static void aqua_read_smn(struct amdgpu_device *adev, + struct amdgpu_smn_reg_data *regdata, + uint64_t smn_addr) +{ + regdata->addr = smn_addr; + regdata->value = RREG32_PCIE(smn_addr); +} + +struct aqua_reg_list { + uint64_t start_addr; + uint32_t num_regs; + uint32_t incrx; +}; + +#define DW_ADDR_INCR 4 + +static void aqua_read_smn_ext(struct amdgpu_device *adev, + struct amdgpu_smn_reg_data *regdata, + uint64_t smn_addr, int i) +{ + regdata->addr = + smn_addr + adev->asic_funcs->encode_ext_smn_addressing(i); + regdata->value = RREG32_PCIE_EXT(regdata->addr); +} + +#define smnreg_0x1A340218 0x1A340218 +#define smnreg_0x1A3402E4 0x1A3402E4 +#define smnreg_0x1A340294 0x1A340294 +#define smreg_0x1A380088 0x1A380088 + +#define NUM_PCIE_SMN_REGS 14 + +static struct aqua_reg_list pcie_reg_addrs[] = { + { smnreg_0x1A340218, 1, 0 }, + { smnreg_0x1A3402E4, 1, 0 }, + { smnreg_0x1A340294, 6, DW_ADDR_INCR }, + { smreg_0x1A380088, 6, DW_ADDR_INCR }, +}; + +static ssize_t aqua_vanjaram_read_pcie_state(struct amdgpu_device *adev, + void *buf, size_t max_size) +{ + struct amdgpu_reg_state_pcie_v1_0 *pcie_reg_state; + uint32_t start_addr, incrx, num_regs, szbuf; + struct amdgpu_regs_pcie_v1_0 *pcie_regs; + struct amdgpu_smn_reg_data *reg_data; + struct pci_dev *us_pdev, *ds_pdev; + int aer_cap, r, n; + + if (!buf || !max_size) + return -EINVAL; + + pcie_reg_state = (struct amdgpu_reg_state_pcie_v1_0 *)buf; + + szbuf = sizeof(*pcie_reg_state) + + amdgpu_reginst_size(1, sizeof(*pcie_regs), NUM_PCIE_SMN_REGS); + /* Only one instance of pcie regs */ + if (max_size < szbuf) + return -EOVERFLOW; + + pcie_regs = (struct amdgpu_regs_pcie_v1_0 *)((uint8_t *)buf + + sizeof(*pcie_reg_state)); + pcie_regs->inst_header.instance = 0; + pcie_regs->inst_header.state = AMDGPU_INST_S_OK; + pcie_regs->inst_header.num_smn_regs = NUM_PCIE_SMN_REGS; + + reg_data = pcie_regs->smn_reg_values; + + for (r = 0; r < ARRAY_SIZE(pcie_reg_addrs); r++) { + start_addr = pcie_reg_addrs[r].start_addr; + incrx = pcie_reg_addrs[r].incrx; + num_regs = pcie_reg_addrs[r].num_regs; + for (n = 0; n < num_regs; n++) { + aqua_read_smn(adev, reg_data, start_addr + n * incrx); + ++reg_data; + } + } + + ds_pdev = pci_upstream_bridge(adev->pdev); + us_pdev = pci_upstream_bridge(ds_pdev); + + pcie_capability_read_word(us_pdev, PCI_EXP_DEVSTA, + &pcie_regs->device_status); + pcie_capability_read_word(us_pdev, PCI_EXP_LNKSTA, + &pcie_regs->link_status); + + aer_cap = pci_find_ext_capability(us_pdev, PCI_EXT_CAP_ID_ERR); + if (aer_cap) { + pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_COR_STATUS, + &pcie_regs->pcie_corr_err_status); + pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_UNCOR_STATUS, + &pcie_regs->pcie_uncorr_err_status); + } + + pci_read_config_dword(us_pdev, PCI_PRIMARY_BUS, + &pcie_regs->sub_bus_number_latency); + + pcie_reg_state->common_header.structure_size = szbuf; + pcie_reg_state->common_header.format_revision = 1; + pcie_reg_state->common_header.content_revision = 0; + pcie_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_PCIE; + pcie_reg_state->common_header.num_instances = 1; + + return pcie_reg_state->common_header.structure_size; +} + +#define smnreg_0x11A00050 0x11A00050 +#define smnreg_0x11A00180 0x11A00180 +#define smnreg_0x11A00070 0x11A00070 +#define smnreg_0x11A00200 0x11A00200 +#define smnreg_0x11A0020C 0x11A0020C +#define smnreg_0x11A00210 0x11A00210 +#define smnreg_0x11A00108 0x11A00108 + +#define XGMI_LINK_REG(smnreg, l) ((smnreg) | (l << 20)) + +#define NUM_XGMI_SMN_REGS 25 + +static struct aqua_reg_list xgmi_reg_addrs[] = { + { smnreg_0x11A00050, 1, 0 }, + { smnreg_0x11A00180, 16, DW_ADDR_INCR }, + { smnreg_0x11A00070, 4, DW_ADDR_INCR }, + { smnreg_0x11A00200, 1, 0 }, + { smnreg_0x11A0020C, 1, 0 }, + { smnreg_0x11A00210, 1, 0 }, + { smnreg_0x11A00108, 1, 0 }, +}; + +static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev, + void *buf, size_t max_size) +{ + struct amdgpu_reg_state_xgmi_v1_0 *xgmi_reg_state; + uint32_t start_addr, incrx, num_regs, szbuf; + struct amdgpu_regs_xgmi_v1_0 *xgmi_regs; + struct amdgpu_smn_reg_data *reg_data; + const int max_xgmi_instances = 8; + int inst = 0, i, j, r, n; + const int xgmi_inst = 2; + void *p; + + if (!buf || !max_size) + return -EINVAL; + + xgmi_reg_state = (struct amdgpu_reg_state_xgmi_v1_0 *)buf; + + szbuf = sizeof(*xgmi_reg_state) + + amdgpu_reginst_size(max_xgmi_instances, sizeof(*xgmi_regs), + NUM_XGMI_SMN_REGS); + /* Only one instance of pcie regs */ + if (max_size < szbuf) + return -EOVERFLOW; + + p = &xgmi_reg_state->xgmi_state_regs[0]; + for_each_inst(i, adev->aid_mask) { + for (j = 0; j < xgmi_inst; ++j) { + xgmi_regs = (struct amdgpu_regs_xgmi_v1_0 *)p; + xgmi_regs->inst_header.instance = inst++; + + xgmi_regs->inst_header.state = AMDGPU_INST_S_OK; + xgmi_regs->inst_header.num_smn_regs = NUM_XGMI_SMN_REGS; + + reg_data = xgmi_regs->smn_reg_values; + + for (r = 0; r < ARRAY_SIZE(xgmi_reg_addrs); r++) { + start_addr = xgmi_reg_addrs[r].start_addr; + incrx = xgmi_reg_addrs[r].incrx; + num_regs = xgmi_reg_addrs[r].num_regs; + + for (n = 0; n < num_regs; n++) { + aqua_read_smn_ext( + adev, reg_data, + XGMI_LINK_REG(start_addr, j) + + n * incrx, + i); + ++reg_data; + } + } + p = reg_data; + } + } + + xgmi_reg_state->common_header.structure_size = szbuf; + xgmi_reg_state->common_header.format_revision = 1; + xgmi_reg_state->common_header.content_revision = 0; + xgmi_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_XGMI; + xgmi_reg_state->common_header.num_instances = max_xgmi_instances; + + return xgmi_reg_state->common_header.structure_size; +} + +#define smnreg_0x11C00070 0x11C00070 +#define smnreg_0x11C00210 0x11C00210 + +static struct aqua_reg_list wafl_reg_addrs[] = { + { smnreg_0x11C00070, 4, DW_ADDR_INCR }, + { smnreg_0x11C00210, 1, 0 }, +}; + +#define WAFL_LINK_REG(smnreg, l) ((smnreg) | (l << 20)) + +#define NUM_WAFL_SMN_REGS 5 + +static ssize_t aqua_vanjaram_read_wafl_state(struct amdgpu_device *adev, + void *buf, size_t max_size) +{ + struct amdgpu_reg_state_wafl_v1_0 *wafl_reg_state; + uint32_t start_addr, incrx, num_regs, szbuf; + struct amdgpu_regs_wafl_v1_0 *wafl_regs; + struct amdgpu_smn_reg_data *reg_data; + const int max_wafl_instances = 8; + int inst = 0, i, j, r, n; + const int wafl_inst = 2; + void *p; + + if (!buf || !max_size) + return -EINVAL; + + wafl_reg_state = (struct amdgpu_reg_state_wafl_v1_0 *)buf; + + szbuf = sizeof(*wafl_reg_state) + + amdgpu_reginst_size(max_wafl_instances, sizeof(*wafl_regs), + NUM_WAFL_SMN_REGS); + + if (max_size < szbuf) + return -EOVERFLOW; + + p = &wafl_reg_state->wafl_state_regs[0]; + for_each_inst(i, adev->aid_mask) { + for (j = 0; j < wafl_inst; ++j) { + wafl_regs = (struct amdgpu_regs_wafl_v1_0 *)p; + wafl_regs->inst_header.instance = inst++; + + wafl_regs->inst_header.state = AMDGPU_INST_S_OK; + wafl_regs->inst_header.num_smn_regs = NUM_WAFL_SMN_REGS; + + reg_data = wafl_regs->smn_reg_values; + + for (r = 0; r < ARRAY_SIZE(wafl_reg_addrs); r++) { + start_addr = wafl_reg_addrs[r].start_addr; + incrx = wafl_reg_addrs[r].incrx; + num_regs = wafl_reg_addrs[r].num_regs; + for (n = 0; n < num_regs; n++) { + aqua_read_smn_ext( + adev, reg_data, + WAFL_LINK_REG(start_addr, j) + + n * incrx, + i); + ++reg_data; + } + } + p = reg_data; + } + } + + wafl_reg_state->common_header.structure_size = szbuf; + wafl_reg_state->common_header.format_revision = 1; + wafl_reg_state->common_header.content_revision = 0; + wafl_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_WAFL; + wafl_reg_state->common_header.num_instances = max_wafl_instances; + + return wafl_reg_state->common_header.structure_size; +} + +#define smnreg_0x1B311060 0x1B311060 +#define smnreg_0x1B411060 0x1B411060 +#define smnreg_0x1B511060 0x1B511060 +#define smnreg_0x1B611060 0x1B611060 + +#define smnreg_0x1C307120 0x1C307120 +#define smnreg_0x1C317120 0x1C317120 + +#define smnreg_0x1C320830 0x1C320830 +#define smnreg_0x1C380830 0x1C380830 +#define smnreg_0x1C3D0830 0x1C3D0830 +#define smnreg_0x1C420830 0x1C420830 + +#define smnreg_0x1C320100 0x1C320100 +#define smnreg_0x1C380100 0x1C380100 +#define smnreg_0x1C3D0100 0x1C3D0100 +#define smnreg_0x1C420100 0x1C420100 + +#define smnreg_0x1B310500 0x1B310500 +#define smnreg_0x1C300400 0x1C300400 + +#define USR_CAKE_INCR 0x11000 +#define USR_LINK_INCR 0x100000 +#define USR_CP_INCR 0x10000 + +#define NUM_USR_SMN_REGS 20 + +struct aqua_reg_list usr_reg_addrs[] = { + { smnreg_0x1B311060, 4, DW_ADDR_INCR }, + { smnreg_0x1B411060, 4, DW_ADDR_INCR }, + { smnreg_0x1B511060, 4, DW_ADDR_INCR }, + { smnreg_0x1B611060, 4, DW_ADDR_INCR }, + { smnreg_0x1C307120, 2, DW_ADDR_INCR }, + { smnreg_0x1C317120, 2, DW_ADDR_INCR }, +}; + +#define NUM_USR1_SMN_REGS 46 +struct aqua_reg_list usr1_reg_addrs[] = { + { smnreg_0x1C320830, 6, USR_CAKE_INCR }, + { smnreg_0x1C380830, 5, USR_CAKE_INCR }, + { smnreg_0x1C3D0830, 5, USR_CAKE_INCR }, + { smnreg_0x1C420830, 4, USR_CAKE_INCR }, + { smnreg_0x1C320100, 6, USR_CAKE_INCR }, + { smnreg_0x1C380100, 5, USR_CAKE_INCR }, + { smnreg_0x1C3D0100, 5, USR_CAKE_INCR }, + { smnreg_0x1C420100, 4, USR_CAKE_INCR }, + { smnreg_0x1B310500, 4, USR_LINK_INCR }, + { smnreg_0x1C300400, 2, USR_CP_INCR }, +}; + +static ssize_t aqua_vanjaram_read_usr_state(struct amdgpu_device *adev, + void *buf, size_t max_size, + int reg_state) +{ + uint32_t start_addr, incrx, num_regs, szbuf, num_smn; + struct amdgpu_reg_state_usr_v1_0 *usr_reg_state; + struct amdgpu_regs_usr_v1_0 *usr_regs; + struct amdgpu_smn_reg_data *reg_data; + const int max_usr_instances = 4; + struct aqua_reg_list *reg_addrs; + int inst = 0, i, n, r, arr_size; + void *p; + + if (!buf || !max_size) + return -EINVAL; + + switch (reg_state) { + case AMDGPU_REG_STATE_TYPE_USR: + arr_size = ARRAY_SIZE(usr_reg_addrs); + reg_addrs = usr_reg_addrs; + num_smn = NUM_USR_SMN_REGS; + break; + case AMDGPU_REG_STATE_TYPE_USR_1: + arr_size = ARRAY_SIZE(usr1_reg_addrs); + reg_addrs = usr1_reg_addrs; + num_smn = NUM_USR1_SMN_REGS; + break; + default: + return -EINVAL; + } + + usr_reg_state = (struct amdgpu_reg_state_usr_v1_0 *)buf; + + szbuf = sizeof(*usr_reg_state) + amdgpu_reginst_size(max_usr_instances, + sizeof(*usr_regs), + num_smn); + if (max_size < szbuf) + return -EOVERFLOW; + + p = &usr_reg_state->usr_state_regs[0]; + for_each_inst(i, adev->aid_mask) { + usr_regs = (struct amdgpu_regs_usr_v1_0 *)p; + usr_regs->inst_header.instance = inst++; + usr_regs->inst_header.state = AMDGPU_INST_S_OK; + usr_regs->inst_header.num_smn_regs = num_smn; + reg_data = usr_regs->smn_reg_values; + + for (r = 0; r < arr_size; r++) { + start_addr = reg_addrs[r].start_addr; + incrx = reg_addrs[r].incrx; + num_regs = reg_addrs[r].num_regs; + for (n = 0; n < num_regs; n++) { + aqua_read_smn_ext(adev, reg_data, + start_addr + n * incrx, i); + reg_data++; + } + } + p = reg_data; + } + + usr_reg_state->common_header.structure_size = szbuf; + usr_reg_state->common_header.format_revision = 1; + usr_reg_state->common_header.content_revision = 0; + usr_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_USR; + usr_reg_state->common_header.num_instances = max_usr_instances; + + return usr_reg_state->common_header.structure_size; +} + +ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev, + enum amdgpu_reg_state reg_state, void *buf, + size_t max_size) +{ + ssize_t size; + + switch (reg_state) { + case AMDGPU_REG_STATE_TYPE_PCIE: + size = aqua_vanjaram_read_pcie_state(adev, buf, max_size); + break; + case AMDGPU_REG_STATE_TYPE_XGMI: + size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size); + break; + case AMDGPU_REG_STATE_TYPE_WAFL: + size = aqua_vanjaram_read_wafl_state(adev, buf, max_size); + break; + case AMDGPU_REG_STATE_TYPE_USR: + size = aqua_vanjaram_read_usr_state(adev, buf, max_size, + AMDGPU_REG_STATE_TYPE_USR); + break; + case AMDGPU_REG_STATE_TYPE_USR_1: + size = aqua_vanjaram_read_usr_state( + adev, buf, max_size, AMDGPU_REG_STATE_TYPE_USR_1); + break; + default: + return -EINVAL; + } + + return size; +} diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c index f0737fb3a9..d1bba9c64e 100644 --- a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c @@ -30,6 +30,8 @@ #define regATHUB_MISC_CNTL_V3_0_1 0x00d7 #define regATHUB_MISC_CNTL_V3_0_1_BASE_IDX 0 +#define regATHUB_MISC_CNTL_V3_3_0 0x00d8 +#define regATHUB_MISC_CNTL_V3_3_0_BASE_IDX 0 static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev) @@ -40,6 +42,9 @@ static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev) case IP_VERSION(3, 0, 1): data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1); break; + case IP_VERSION(3, 3, 0): + data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_3_0); + break; default: data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL); break; @@ -53,6 +58,9 @@ static void athub_v3_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data) case IP_VERSION(3, 0, 1): WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1, data); break; + case IP_VERSION(3, 3, 0): + WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_3_0, data); + break; default: WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data); break; diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index 1b51be9b5f..ac51d19b51 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -395,7 +395,6 @@ static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr) (*ptr)++; return; } - return; } } diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index 3ee219aa28..7672abe6c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -28,6 +28,7 @@ #include <acpi/video.h> +#include <drm/drm_edid.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" #include "amdgpu_connectors.h" diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index bb666cb752..587ee632a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -21,6 +21,7 @@ * */ +#include <drm/drm_edid.h> #include <drm/drm_fourcc.h> #include <drm/drm_modeset_helper.h> #include <drm/drm_modeset_helper_vtables.h> diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 7af277f61c..f22ec27365 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -21,6 +21,7 @@ * */ +#include <drm/drm_edid.h> #include <drm/drm_fourcc.h> #include <drm/drm_modeset_helper.h> #include <drm/drm_modeset_helper_vtables.h> diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 143efc37a1..4dbe9b3259 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -23,6 +23,7 @@ #include <linux/pci.h> +#include <drm/drm_edid.h> #include <drm/drm_fourcc.h> #include <drm/drm_modeset_helper.h> #include <drm/drm_modeset_helper_vtables.h> diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index adeddfb7ff..05bcce2338 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -21,6 +21,7 @@ * */ +#include <drm/drm_edid.h> #include <drm/drm_fourcc.h> #include <drm/drm_modeset_helper.h> #include <drm/drm_modeset_helper_vtables.h> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 806a8cf904..0afe86bcc9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -67,6 +67,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_1.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_1_me.bin"); @@ -106,23 +107,6 @@ static const struct soc15_reg_golden golden_settings_gc_11_0_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a) }; -static const struct soc15_reg_golden golden_settings_gc_11_5_0[] = { - SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_DEBUG5, 0xffffffff, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0x0c1807ff, 0x00000242), - SOC15_REG_GOLDEN_VALUE(GC, 0, regGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500), - SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3), - SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3), - SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL, 0xffffffff, 0xf37fff3f), - SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xfffffffb, 0x00f40188), - SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL4, 0xf0ffffff, 0x80009007), - SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf1ffffff, 0x00880007), - SOC15_REG_GOLDEN_VALUE(GC, 0, regPC_CONFIG_CNTL_1, 0xffffffff, 0x00010000), - SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000), - SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL2, 0x007f0000, 0x00000000), - SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xffcfffff, 0x0000200a), - SOC15_REG_GOLDEN_VALUE(GC, 0, regUTCL1_CTRL_2, 0xffffffff, 0x0000048f) -}; - #define DEFAULT_SH_MEM_CONFIG \ ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ @@ -293,6 +277,9 @@ static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev) { + if (amdgpu_sriov_vf(adev)) + return; + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 4): @@ -300,11 +287,6 @@ static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_11_0_1, (const u32)ARRAY_SIZE(golden_settings_gc_11_0_1)); break; - case IP_VERSION(11, 5, 0): - soc15_program_register_sequence(adev, - golden_settings_gc_11_5_0, - (const u32)ARRAY_SIZE(golden_settings_gc_11_5_0)); - break; default: break; } @@ -564,7 +546,11 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) } if (!amdgpu_sriov_vf(adev)) { - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix); + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 0) && + adev->pdev->revision == 0xCE) + snprintf(fw_name, sizeof(fw_name), "amdgpu/gc_11_0_0_rlc_1.bin"); + else + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix); err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name); if (err) goto out; @@ -1644,7 +1630,7 @@ static void gfx_v11_0_setup_rb(struct amdgpu_device *adev) active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa)); } - active_rb_bitmap |= global_active_rb_bitmap; + active_rb_bitmap &= global_active_rb_bitmap; adev->gfx.config.backend_enable_mask = active_rb_bitmap; adev->gfx.config.num_rbs = hweight32(active_rb_bitmap); } @@ -4466,11 +4452,43 @@ static int gfx_v11_0_wait_for_idle(void *handle) return -ETIMEDOUT; } +static int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev, + int req) +{ + u32 i, tmp, val; + + for (i = 0; i < adev->usec_timeout; i++) { + /* Request with MeId=2, PipeId=0 */ + tmp = REG_SET_FIELD(0, CP_GFX_INDEX_MUTEX, REQUEST, req); + tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, CLIENTID, 4); + WREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX, tmp); + + val = RREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX); + if (req) { + if (val == tmp) + break; + } else { + tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, + REQUEST, 1); + + /* unlocked or locked by firmware */ + if (val != tmp) + break; + } + udelay(1); + } + + if (i >= adev->usec_timeout) + return -EINVAL; + + return 0; +} + static int gfx_v11_0_soft_reset(void *handle) { u32 grbm_soft_reset = 0; u32 tmp; - int i, j, k; + int r, i, j, k; struct amdgpu_device *adev = (struct amdgpu_device *)handle; tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL); @@ -4510,6 +4528,13 @@ static int gfx_v11_0_soft_reset(void *handle) } } + /* Try to acquire the gfx mutex before access to CP_VMID_RESET */ + r = gfx_v11_0_request_gfx_index_mutex(adev, 1); + if (r) { + DRM_ERROR("Failed to acquire the gfx mutex during soft reset\n"); + return r; + } + WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe); // Read CP_VMID_RESET register three times. @@ -4518,6 +4543,13 @@ static int gfx_v11_0_soft_reset(void *handle) RREG32_SOC15(GC, 0, regCP_VMID_RESET); RREG32_SOC15(GC, 0, regCP_VMID_RESET); + /* release the gfx mutex */ + r = gfx_v11_0_request_gfx_index_mutex(adev, 0); + if (r) { + DRM_ERROR("Failed to release the gfx mutex during soft reset\n"); + return r; + } + for (i = 0; i < adev->usec_timeout; i++) { if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) && !RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE)) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 4a09cc0d8c..131cddbdda 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -3828,8 +3828,8 @@ static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev, /* the caller should make sure initialize value of * err_data->ue_count and err_data->ce_count */ - amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count); - amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count); + amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count); + amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count); } static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev, @@ -3882,150 +3882,6 @@ static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev, mutex_unlock(&adev->grbm_idx_mutex); } -static void gfx_v9_4_3_inst_query_utc_err_status(struct amdgpu_device *adev, - int xcc_id) -{ - uint32_t data; - - data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS); - if (data) { - dev_warn(adev->dev, "GFX UTCL2 Mem Ecc Status: 0x%x!\n", data); - WREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS, 0x3); - } - - data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS); - if (data) { - dev_warn(adev->dev, "GFX VML2 Mem Ecc Status: 0x%x!\n", data); - WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS, 0x3); - } - - data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), - regVML2_WALKER_MEM_ECC_STATUS); - if (data) { - dev_warn(adev->dev, "GFX VML2 Walker Mem Ecc Status: 0x%x!\n", data); - WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_WALKER_MEM_ECC_STATUS, - 0x3); - } -} - -static void gfx_v9_4_3_log_cu_timeout_status(struct amdgpu_device *adev, - uint32_t status, int xcc_id) -{ - struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; - uint32_t i, simd, wave; - uint32_t wave_status; - uint32_t wave_pc_lo, wave_pc_hi; - uint32_t wave_exec_lo, wave_exec_hi; - uint32_t wave_inst_dw0, wave_inst_dw1; - uint32_t wave_ib_sts; - - for (i = 0; i < 32; i++) { - if (!((i << 1) & status)) - continue; - - simd = i / cu_info->max_waves_per_simd; - wave = i % cu_info->max_waves_per_simd; - - wave_status = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS); - wave_pc_lo = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO); - wave_pc_hi = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI); - wave_exec_lo = - wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO); - wave_exec_hi = - wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI); - wave_inst_dw0 = - wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0); - wave_inst_dw1 = - wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1); - wave_ib_sts = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS); - - dev_info( - adev->dev, - "\t SIMD %d, Wave %d: status 0x%x, pc 0x%llx, exec 0x%llx, inst 0x%llx, ib_sts 0x%x\n", - simd, wave, wave_status, - ((uint64_t)wave_pc_hi << 32 | wave_pc_lo), - ((uint64_t)wave_exec_hi << 32 | wave_exec_lo), - ((uint64_t)wave_inst_dw1 << 32 | wave_inst_dw0), - wave_ib_sts); - } -} - -static void gfx_v9_4_3_inst_query_sq_timeout_status(struct amdgpu_device *adev, - int xcc_id) -{ - uint32_t se_idx, sh_idx, cu_idx; - uint32_t status; - - mutex_lock(&adev->grbm_idx_mutex); - for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines; se_idx++) { - for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se; sh_idx++) { - for (cu_idx = 0; cu_idx < adev->gfx.config.max_cu_per_sh; cu_idx++) { - gfx_v9_4_3_xcc_select_se_sh(adev, se_idx, sh_idx, - cu_idx, xcc_id); - status = RREG32_SOC15(GC, GET_INST(GC, xcc_id), - regSQ_TIMEOUT_STATUS); - if (status != 0) { - dev_info( - adev->dev, - "GFX Watchdog Timeout: SE %d, SH %d, CU %d\n", - se_idx, sh_idx, cu_idx); - gfx_v9_4_3_log_cu_timeout_status( - adev, status, xcc_id); - } - /* clear old status */ - WREG32_SOC15(GC, GET_INST(GC, xcc_id), - regSQ_TIMEOUT_STATUS, 0); - } - } - } - gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, - xcc_id); - mutex_unlock(&adev->grbm_idx_mutex); -} - -static void gfx_v9_4_3_inst_query_ras_err_status(struct amdgpu_device *adev, - void *ras_error_status, int xcc_id) -{ - gfx_v9_4_3_inst_query_utc_err_status(adev, xcc_id); - gfx_v9_4_3_inst_query_sq_timeout_status(adev, xcc_id); -} - -static void gfx_v9_4_3_inst_reset_utc_err_status(struct amdgpu_device *adev, - int xcc_id) -{ - WREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS, 0x3); - WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS, 0x3); - WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_WALKER_MEM_ECC_STATUS, 0x3); -} - -static void gfx_v9_4_3_inst_reset_sq_timeout_status(struct amdgpu_device *adev, - int xcc_id) -{ - uint32_t se_idx, sh_idx, cu_idx; - - mutex_lock(&adev->grbm_idx_mutex); - for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines; se_idx++) { - for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se; sh_idx++) { - for (cu_idx = 0; cu_idx < adev->gfx.config.max_cu_per_sh; cu_idx++) { - gfx_v9_4_3_xcc_select_se_sh(adev, se_idx, sh_idx, - cu_idx, xcc_id); - WREG32_SOC15(GC, GET_INST(GC, xcc_id), - regSQ_TIMEOUT_STATUS, 0); - } - } - } - gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, - xcc_id); - mutex_unlock(&adev->grbm_idx_mutex); -} - -static void gfx_v9_4_3_inst_reset_ras_err_status(struct amdgpu_device *adev, - void *ras_error_status, int xcc_id) -{ - gfx_v9_4_3_inst_reset_utc_err_status(adev, xcc_id); - gfx_v9_4_3_inst_reset_sq_timeout_status(adev, xcc_id); -} - static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev, void *ras_error_status, int xcc_id) { @@ -4067,16 +3923,6 @@ static void gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device *adev) amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_count); } -static void gfx_v9_4_3_query_ras_error_status(struct amdgpu_device *adev) -{ - amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_query_ras_err_status); -} - -static void gfx_v9_4_3_reset_ras_error_status(struct amdgpu_device *adev) -{ - amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_status); -} - static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev) { amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer); @@ -4394,8 +4240,6 @@ struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs = { struct amdgpu_ras_block_hw_ops gfx_v9_4_3_ras_ops = { .query_ras_error_count = &gfx_v9_4_3_query_ras_error_count, .reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count, - .query_ras_error_status = &gfx_v9_4_3_query_ras_error_status, - .reset_ras_error_status = &gfx_v9_4_3_reset_ras_error_status, }; struct amdgpu_gfx_ras gfx_v9_4_3_ras = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index 95d06da544..49aecdcee0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -456,10 +456,12 @@ static void gfxhub_v1_2_xcc_gart_disable(struct amdgpu_device *adev, WREG32_SOC15_RLC(GC, GET_INST(GC, j), regMC_VM_MX_L1_TLB_CNTL, tmp); /* Setup L2 cache */ - tmp = RREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); - WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL, tmp); - WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL3, 0); + if (!amdgpu_sriov_vf(adev)) { + tmp = RREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); + WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL, tmp); + WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL3, 0); + } } } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index ced2e1bdcc..e67a62db9e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -883,7 +883,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, * GRBM interface. */ if ((vmhub == AMDGPU_GFXHUB(0)) && - (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) + (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2))) RREG32_NO_KIQ(req); for (j = 0; j < adev->usec_timeout; j++) { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index bc38b90f8c..88ea58d5c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -674,14 +674,6 @@ static int jpeg_v4_0_set_powergating_state(void *handle, return ret; } -static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - unsigned type, - enum amdgpu_interrupt_state state) -{ - return 0; -} - static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned int type, @@ -765,7 +757,6 @@ static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev) } static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = { - .set = jpeg_v4_0_set_interrupt_state, .process = jpeg_v4_0_process_interrupt, }; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c index 6ede85b28c..78b74daf4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c @@ -181,7 +181,6 @@ static int jpeg_v4_0_5_hw_fini(void *handle) RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS)) jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE); } - amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0); return 0; } @@ -516,14 +515,6 @@ static int jpeg_v4_0_5_set_powergating_state(void *handle, return ret; } -static int jpeg_v4_0_5_set_interrupt_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - unsigned type, - enum amdgpu_interrupt_state state) -{ - return 0; -} - static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -603,7 +594,6 @@ static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev) } static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = { - .set = jpeg_v4_0_5_set_interrupt_state, .process = jpeg_v4_0_5_process_interrupt, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 4dfec56e1b..26d71a2239 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -408,6 +408,8 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes) mes_set_hw_res_pkt.enable_reg_active_poll = 1; mes_set_hw_res_pkt.enable_level_process_quantum_check = 1; mes_set_hw_res_pkt.oversubscription_timer = 50; + mes_set_hw_res_pkt.enable_mes_event_int_logging = 1; + mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr; return mes_v11_0_submit_pkt_and_poll_completion(mes, &mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt), diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index 9b0146732e..fb53aacdcb 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -652,8 +652,8 @@ static void mmhub_v1_8_inst_query_ras_error_count(struct amdgpu_device *adev, AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, &ue_count); - amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count); - amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count); + amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count); + amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count); } static void mmhub_v1_8_query_ras_error_count(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 6d24c84924..19986ff6a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -401,8 +401,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device if (err_data.ce_count) dev_info(adev->dev, "%ld correctable hardware " - "errors detected in %s block, " - "no user action is needed.\n", + "errors detected in %s block\n", obj->err_data.ce_count, get_ras_block_str(adev->nbio.ras_if)); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index db013f8614..b4723d68ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -603,8 +603,7 @@ static void nbio_v7_9_handle_ras_controller_intr_no_bifring(struct amdgpu_device if (err_data.ce_count) dev_info(adev->dev, "%ld correctable hardware " - "errors detected in %s block, " - "no user action is needed.\n", + "errors detected in %s block\n", obj->err_data.ce_count, get_ras_block_str(adev->nbio.ras_if)); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 8eed9a73d0..1caed0163b 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -366,7 +366,8 @@ static void sdma_v4_4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring) u32 ref_and_mask = 0; const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; - ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me; + ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 + << (ring->me % adev->sdma.num_inst_per_aid); sdma_v4_4_2_wait_reg_mem(ring, 0, 1, adev->nbio.funcs->get_hdp_flush_done_offset(adev), @@ -2135,7 +2136,7 @@ static void sdma_v4_4_2_inst_query_ras_error_count(struct amdgpu_device *adev, AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, &ue_count); - amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count); + amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count); } static void sdma_v4_4_2_query_ras_error_count(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 0058f3f7cf..46f9f58d84 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -292,17 +292,21 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring) u32 ref_and_mask = 0; const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; - ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me; - - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | - SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | - SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ - amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2); - amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2); - amdgpu_ring_write(ring, ref_and_mask); /* reference */ - amdgpu_ring_write(ring, ref_and_mask); /* mask */ - amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | - SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ + if (ring->me > 1) { + amdgpu_asic_flush_hdp(adev, ring); + } else { + ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me; + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | + SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | + SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ + amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2); + amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2); + amdgpu_ring_write(ring, ref_and_mask); /* reference */ + amdgpu_ring_write(ring, ref_and_mask); /* mask */ + amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | + SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ + } } /** diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index f9ba180304..1c614451de 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -925,6 +925,7 @@ static const struct amdgpu_asic_funcs aqua_vanjaram_asic_funcs = .pre_asic_init = &soc15_pre_asic_init, .query_video_codecs = &soc15_query_video_codecs, .encode_ext_smn_addressing = &aqua_vanjaram_encode_ext_smn_addressing, + .get_reg_state = &aqua_vanjaram_get_reg_state, }; static int soc15_common_early_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index eac54042c6..1444b7765e 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -27,6 +27,7 @@ #include "nbio_v6_1.h" #include "nbio_v7_0.h" #include "nbio_v7_4.h" +#include "amdgpu_reg_state.h" extern const struct amdgpu_ip_block_version vega10_common_ip_block; @@ -114,6 +115,9 @@ int aldebaran_reg_base_init(struct amdgpu_device *adev); void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev); u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id); int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev); +ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev, + enum amdgpu_reg_state reg_state, void *buf, + size_t max_size); void vega10_doorbell_index_init(struct amdgpu_device *adev); void vega20_doorbell_index_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 4d7188912e..fd4505fa4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -450,10 +450,8 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev) { switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 0): - return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC); case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): - return false; default: return true; } @@ -714,7 +712,10 @@ static int soc21_common_early_init(void *handle) AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_JPEG | AMD_PG_SUPPORT_GFX_PG; - adev->external_rev_id = adev->rev_id + 0x1; + if (adev->rev_id == 0) + adev->external_rev_id = 0x1; + else + adev->external_rev_id = adev->rev_id + 0x10; break; default: /* FIXME: not supported yet */ @@ -832,10 +833,35 @@ static int soc21_common_suspend(void *handle) return soc21_common_hw_fini(adev); } +static bool soc21_need_reset_on_resume(struct amdgpu_device *adev) +{ + u32 sol_reg1, sol_reg2; + + /* Will reset for the following suspend abort cases. + * 1) Only reset dGPU side. + * 2) S3 suspend got aborted and TOS is active. + */ + if (!(adev->flags & AMD_IS_APU) && adev->in_s3 && + !adev->suspend_complete) { + sol_reg1 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81); + msleep(100); + sol_reg2 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81); + + return (sol_reg1 != sol_reg2); + } + + return false; +} + static int soc21_common_resume(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (soc21_need_reset_on_resume(adev)) { + dev_info(adev->dev, "S3 suspend aborted, resetting..."); + soc21_asic_reset(adev); + } + return soc21_common_hw_init(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index e9c2ff74f0..7458a218e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -26,6 +26,7 @@ #include "amdgpu.h" #include "umc/umc_12_0_0_offset.h" #include "umc/umc_12_0_0_sh_mask.h" +#include "mp/mp_13_0_6_sh_mask.h" const uint32_t umc_v12_0_channel_idx_tbl[] @@ -88,16 +89,26 @@ static void umc_v12_0_reset_error_count(struct amdgpu_device *adev) umc_v12_0_reset_error_count_per_channel, NULL); } -bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status) +bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status) { + if (amdgpu_ras_is_poison_mode_supported(adev) && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)) + return true; + return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)); } -bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status) +bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status) { + if (amdgpu_ras_is_poison_mode_supported(adev) && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)) + return false; + return (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1 || (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 && @@ -105,7 +116,7 @@ bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status) /* Identify data parity error in replay mode */ ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0x5 || REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0xb) && - !(umc_v12_0_is_uncorrectable_error(mc_umc_status))))); + !(umc_v12_0_is_uncorrectable_error(adev, mc_umc_status))))); } static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev, @@ -124,7 +135,7 @@ static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev, mc_umc_status = RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4); - if (umc_v12_0_is_correctable_error(mc_umc_status)) + if (umc_v12_0_is_correctable_error(adev, mc_umc_status)) *error_count += 1; } @@ -142,7 +153,7 @@ static void umc_v12_0_query_uncorrectable_error_count(struct amdgpu_device *adev mc_umc_status = RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4); - if (umc_v12_0_is_uncorrectable_error(mc_umc_status)) + if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)) *error_count += 1; } @@ -166,8 +177,8 @@ static int umc_v12_0_query_error_count(struct amdgpu_device *adev, umc_v12_0_query_correctable_error_count(adev, umc_reg_offset, &ce_count); umc_v12_0_query_uncorrectable_error_count(adev, umc_reg_offset, &ue_count); - amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count); - amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count); + amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count); + amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count); return 0; } @@ -360,6 +371,59 @@ static int umc_v12_0_err_cnt_init_per_channel(struct amdgpu_device *adev, return 0; } +static void umc_v12_0_ecc_info_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + amdgpu_mca_smu_log_ras_error(adev, + AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_CE, ras_error_status); + amdgpu_mca_smu_log_ras_error(adev, + AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_UE, ras_error_status); +} + +static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_node *err_node; + uint64_t mc_umc_status; + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + for_each_ras_error(err_node, err_data) { + mc_umc_status = err_node->err_info.err_addr.err_status; + if (!mc_umc_status) + continue; + + if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)) { + uint64_t mca_addr, err_addr, mca_ipid; + uint32_t InstanceIdLo; + struct amdgpu_smuio_mcm_config_info *mcm_info; + + mcm_info = &err_node->err_info.mcm_info; + mca_addr = err_node->err_info.err_addr.err_addr; + mca_ipid = err_node->err_info.err_addr.err_ipid; + + err_addr = REG_GET_FIELD(mca_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); + InstanceIdLo = REG_GET_FIELD(mca_ipid, MCMP1_IPIDT0, InstanceIdLo); + + dev_info(adev->dev, "UMC:IPID:0x%llx, aid:%d, inst:%d, ch:%d, err_addr:0x%llx\n", + mca_ipid, + mcm_info->die_id, + MCA_IPID_LO_2_UMC_INST(InstanceIdLo), + MCA_IPID_LO_2_UMC_CH(InstanceIdLo), + err_addr); + + umc_v12_0_convert_error_address(adev, + err_data, err_addr, + MCA_IPID_LO_2_UMC_CH(InstanceIdLo), + MCA_IPID_LO_2_UMC_INST(InstanceIdLo), + mcm_info->die_id); + + /* Clear umc error address content */ + memset(&err_node->err_info.err_addr, + 0, sizeof(err_node->err_info.err_addr)); + } + } +} + static void umc_v12_0_err_cnt_init(struct amdgpu_device *adev) { amdgpu_umc_loop_channels(adev, @@ -386,4 +450,6 @@ struct amdgpu_umc_ras umc_v12_0_ras = { }, .err_cnt_init = umc_v12_0_err_cnt_init, .query_ras_poison_mode = umc_v12_0_query_ras_poison_mode, + .ecc_info_query_ras_error_count = umc_v12_0_ecc_info_query_ras_error_count, + .ecc_info_query_ras_error_address = umc_v12_0_ecc_info_query_ras_error_address, }; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h index b34b1e358f..e8de3a9225 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h @@ -117,8 +117,12 @@ (pa) |= (UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) << UMC_V12_0_PA_CH6_BIT); \ } while (0) -bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status); -bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status); +#define MCA_IPID_LO_2_UMC_CH(_ipid_lo) (((((_ipid_lo) >> 20) & 0x1) * 4) + \ + (((_ipid_lo) >> 12) & 0xF)) +#define MCA_IPID_LO_2_UMC_INST(_ipid_lo) (((_ipid_lo) >> 21) & 0x7) + +bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status); +bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status); extern const uint32_t umc_v12_0_channel_idx_tbl[] diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c index 530549314c..a3ee3c4c65 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c @@ -64,7 +64,7 @@ static void umc_v6_7_query_error_status_helper(struct amdgpu_device *adev, uint64_t reg_value; if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1) - dev_info(adev->dev, "Deferred error, no user action is needed.\n"); + dev_info(adev->dev, "Deferred error\n"); if (mc_umc_status) dev_info(adev->dev, "MCA STATUS 0x%llx, umc_reg_offset 0x%x\n", mc_umc_status, umc_reg_offset); diff --git a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c index 8e7b763cfd..fd23cd3485 100644 --- a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c @@ -225,6 +225,8 @@ static int umsch_mm_v4_0_ring_start(struct amdgpu_umsch_mm *umsch) WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_SIZE, ring->ring_size); + ring->wptr = 0; + data = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE); data &= ~(VCN_RB_ENABLE__AUDIO_RB_EN_MASK); WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, data); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index 48bfcd0d55..8ab01ae919 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -100,6 +100,31 @@ static int vcn_v4_0_early_init(void *handle) return amdgpu_vcn_early_init(adev); } +static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx) +{ + volatile struct amdgpu_vcn4_fw_shared *fw_shared; + + fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; + fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); + fw_shared->sq.is_enabled = 1; + + fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG); + fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ? + AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU; + + if (amdgpu_ip_version(adev, VCN_HWIP, 0) == + IP_VERSION(4, 0, 2)) { + fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT; + fw_shared->drm_key_wa.method = + AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING; + } + + if (amdgpu_vcnfw_log) + amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]); + + return 0; +} + /** * vcn_v4_0_sw_init - sw init for VCN block * @@ -124,8 +149,6 @@ static int vcn_v4_0_sw_init(void *handle) return r; for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_vcn4_fw_shared *fw_shared; - if (adev->vcn.harvest_config & (1 << i)) continue; @@ -161,23 +184,7 @@ static int vcn_v4_0_sw_init(void *handle) if (r) return r; - fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; - fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); - fw_shared->sq.is_enabled = 1; - - fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG); - fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ? - AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU; - - if (amdgpu_ip_version(adev, VCN_HWIP, 0) == - IP_VERSION(4, 0, 2)) { - fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT; - fw_shared->drm_key_wa.method = - AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING; - } - - if (amdgpu_vcnfw_log) - amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); + vcn_v4_0_fw_shared_init(adev, i); } if (amdgpu_sriov_vf(adev)) { @@ -1273,6 +1280,9 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev) if (adev->vcn.harvest_config & (1 << i)) continue; + // Must re/init fw_shared at beginning + vcn_v4_0_fw_shared_init(adev, i); + table_size = 0; MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, i, @@ -2008,22 +2018,6 @@ static int vcn_v4_0_set_powergating_state(void *handle, enum amd_powergating_sta } /** - * vcn_v4_0_set_interrupt_state - set VCN block interrupt state - * - * @adev: amdgpu_device pointer - * @source: interrupt sources - * @type: interrupt types - * @state: interrupt states - * - * Set VCN block interrupt state - */ -static int vcn_v4_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned type, enum amdgpu_interrupt_state state) -{ - return 0; -} - -/** * vcn_v4_0_set_ras_interrupt_state - set VCN block RAS interrupt state * * @adev: amdgpu_device pointer @@ -2087,7 +2081,6 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_ } static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = { - .set = vcn_v4_0_set_interrupt_state, .process = vcn_v4_0_process_interrupt, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index 2eda30e78f..49e4c3c09a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -269,8 +269,6 @@ static int vcn_v4_0_5_hw_fini(void *handle) vcn_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE); } } - - amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0); } return 0; @@ -1669,22 +1667,6 @@ static int vcn_v4_0_5_set_powergating_state(void *handle, enum amd_powergating_s } /** - * vcn_v4_0_5_set_interrupt_state - set VCN block interrupt state - * - * @adev: amdgpu_device pointer - * @source: interrupt sources - * @type: interrupt types - * @state: interrupt states - * - * Set VCN block interrupt state - */ -static int vcn_v4_0_5_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned type, enum amdgpu_interrupt_state state) -{ - return 0; -} - -/** * vcn_v4_0_5_process_interrupt - process VCN block interrupt * * @adev: amdgpu_device pointer @@ -1726,7 +1708,6 @@ static int vcn_v4_0_5_process_interrupt(struct amdgpu_device *adev, struct amdgp } static const struct amdgpu_irq_src_funcs vcn_v4_0_5_irq_funcs = { - .set = vcn_v4_0_5_set_interrupt_state, .process = vcn_v4_0_5_process_interrupt, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c index 174f13eff5..d20060a51e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c @@ -96,6 +96,10 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe) adev->vpe.cmdbuf_cpu_addr[1] = f32_cntl; amdgpu_vpe_psp_update_sram(adev); + + /* Config DPM */ + amdgpu_vpe_configure_dpm(vpe); + return 0; } @@ -128,6 +132,8 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe) } vpe_v6_1_halt(vpe, false); + /* Config DPM */ + amdgpu_vpe_configure_dpm(vpe); return 0; } @@ -264,6 +270,15 @@ static int vpe_v6_1_set_regs(struct amdgpu_vpe *vpe) vpe->regs.queue0_rb_wptr_hi = regVPEC_QUEUE0_RB_WPTR_HI; vpe->regs.queue0_preempt = regVPEC_QUEUE0_PREEMPT; + vpe->regs.dpm_enable = regVPEC_PUB_DUMMY2; + vpe->regs.dpm_pratio = regVPEC_QUEUE6_DUMMY4; + vpe->regs.dpm_request_interval = regVPEC_QUEUE5_DUMMY3; + vpe->regs.dpm_decision_threshold = regVPEC_QUEUE5_DUMMY4; + vpe->regs.dpm_busy_clamp_threshold = regVPEC_QUEUE7_DUMMY2; + vpe->regs.dpm_idle_clamp_threshold = regVPEC_QUEUE7_DUMMY3; + vpe->regs.dpm_request_lv = regVPEC_QUEUE7_DUMMY1; + vpe->regs.context_indicator = regVPEC_QUEUE6_DUMMY3; + return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index d7cd5fa313..d1caaf0e6a 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -674,7 +674,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = { 0x86ea6a6a, 0x8f6e837a, 0xb96ee0c2, 0xbf800002, 0xb97a0002, 0xbf8a0000, - 0xbe801f6c, 0xbf810000, + 0xbe801f6c, 0xbf9b0000, }; static const uint32_t cwsr_trap_nv1x_hex[] = { @@ -1091,7 +1091,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = { 0xb9eef807, 0x876dff6d, 0x0000ffff, 0x87fe7e7e, 0x87ea6a6a, 0xb9faf802, - 0xbe80226c, 0xbf810000, + 0xbe80226c, 0xbf9b0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0x00000000, @@ -1574,7 +1574,7 @@ static const uint32_t cwsr_trap_arcturus_hex[] = { 0x86ea6a6a, 0x8f6e837a, 0xb96ee0c2, 0xbf800002, 0xb97a0002, 0xbf8a0000, - 0xbe801f6c, 0xbf810000, + 0xbe801f6c, 0xbf9b0000, }; static const uint32_t cwsr_trap_aldebaran_hex[] = { @@ -2065,11 +2065,11 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = { 0x86ea6a6a, 0x8f6e837a, 0xb96ee0c2, 0xbf800002, 0xb97a0002, 0xbf8a0000, - 0xbe801f6c, 0xbf810000, + 0xbe801f6c, 0xbf9b0000, }; static const uint32_t cwsr_trap_gfx10_hex[] = { - 0xbf820001, 0xbf820220, + 0xbf820001, 0xbf820221, 0xb0804004, 0xb978f802, 0x8a78ff78, 0x00020006, 0xb97bf803, 0x876eff78, @@ -2118,391 +2118,391 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0xbf900004, 0xbf8cc07f, 0x877aff7f, 0x04000000, 0x8f7a857a, 0x886d7a6d, - 0xbefa037e, 0x877bff7f, - 0x0000ffff, 0xbefe03c1, - 0xbeff03c1, 0xdc5f8000, - 0x007a0000, 0x7e000280, - 0xbefe037a, 0xbeff037b, - 0xb97b02dc, 0x8f7b997b, - 0xb97a3a05, 0x807a817a, - 0xbf0d997b, 0xbf850002, - 0x8f7a897a, 0xbf820001, - 0x8f7a8a7a, 0xb97b1e06, - 0x8f7b8a7b, 0x807a7b7a, + 0x7e008200, 0xbefa037e, 0x877bff7f, 0x0000ffff, - 0x807aff7a, 0x00000200, - 0x807a7e7a, 0x827b807b, - 0xd7610000, 0x00010870, - 0xd7610000, 0x00010a71, - 0xd7610000, 0x00010c72, - 0xd7610000, 0x00010e73, - 0xd7610000, 0x00011074, - 0xd7610000, 0x00011275, - 0xd7610000, 0x00011476, - 0xd7610000, 0x00011677, - 0xd7610000, 0x00011a79, - 0xd7610000, 0x00011c7e, - 0xd7610000, 0x00011e7f, - 0xbefe03ff, 0x00003fff, - 0xbeff0380, 0xdc5f8040, - 0x007a0000, 0xd760007a, - 0x00011d00, 0xd760007b, - 0x00011f00, 0xbefe037a, - 0xbeff037b, 0xbef4037e, - 0x8775ff7f, 0x0000ffff, - 0x8875ff75, 0x00040000, - 0xbef60380, 0xbef703ff, - 0x10807fac, 0xbef1037c, - 0xbef00380, 0xb97302dc, - 0x8f739973, 0xbefe03c1, - 0x907c9973, 0x877c817c, - 0xbf06817c, 0xbf850002, - 0xbeff0380, 0xbf820002, - 0xbeff03c1, 0xbf820009, + 0xbefe03c1, 0xbeff03c1, + 0xdc5f8000, 0x007a0000, + 0x7e000280, 0xbefe037a, + 0xbeff037b, 0xb97b02dc, + 0x8f7b997b, 0xb97a3a05, + 0x807a817a, 0xbf0d997b, + 0xbf850002, 0x8f7a897a, + 0xbf820001, 0x8f7a8a7a, + 0xb97b1e06, 0x8f7b8a7b, + 0x807a7b7a, 0x877bff7f, + 0x0000ffff, 0x807aff7a, + 0x00000200, 0x807a7e7a, + 0x827b807b, 0xd7610000, + 0x00010870, 0xd7610000, + 0x00010a71, 0xd7610000, + 0x00010c72, 0xd7610000, + 0x00010e73, 0xd7610000, + 0x00011074, 0xd7610000, + 0x00011275, 0xd7610000, + 0x00011476, 0xd7610000, + 0x00011677, 0xd7610000, + 0x00011a79, 0xd7610000, + 0x00011c7e, 0xd7610000, + 0x00011e7f, 0xbefe03ff, + 0x00003fff, 0xbeff0380, + 0xdc5f8040, 0x007a0000, + 0xd760007a, 0x00011d00, + 0xd760007b, 0x00011f00, + 0xbefe037a, 0xbeff037b, + 0xbef4037e, 0x8775ff7f, + 0x0000ffff, 0x8875ff75, + 0x00040000, 0xbef60380, + 0xbef703ff, 0x10807fac, + 0xbef1037c, 0xbef00380, + 0xb97302dc, 0x8f739973, + 0xbefe03c1, 0x907c9973, + 0x877c817c, 0xbf06817c, + 0xbf850002, 0xbeff0380, + 0xbf820002, 0xbeff03c1, + 0xbf820009, 0xbef603ff, + 0x01000000, 0xe0704080, + 0x705d0100, 0xe0704100, + 0x705d0200, 0xe0704180, + 0x705d0300, 0xbf820008, 0xbef603ff, 0x01000000, - 0xe0704080, 0x705d0100, - 0xe0704100, 0x705d0200, - 0xe0704180, 0x705d0300, - 0xbf820008, 0xbef603ff, - 0x01000000, 0xe0704100, - 0x705d0100, 0xe0704200, - 0x705d0200, 0xe0704300, - 0x705d0300, 0xb9703a05, - 0x80708170, 0xbf0d9973, - 0xbf850002, 0x8f708970, - 0xbf820001, 0x8f708a70, - 0xb97a1e06, 0x8f7a8a7a, - 0x80707a70, 0x8070ff70, - 0x00000200, 0xbef603ff, - 0x01000000, 0x7e000280, - 0x7e020280, 0x7e040280, - 0xbefc0380, 0xd7610002, - 0x0000f871, 0x807c817c, - 0xd7610002, 0x0000f86c, - 0x807c817c, 0x8a7aff6d, - 0x80000000, 0xd7610002, - 0x0000f87a, 0x807c817c, - 0xd7610002, 0x0000f86e, - 0x807c817c, 0xd7610002, - 0x0000f86f, 0x807c817c, - 0xd7610002, 0x0000f878, - 0x807c817c, 0xb97af803, - 0xd7610002, 0x0000f87a, - 0x807c817c, 0xd7610002, - 0x0000f87b, 0x807c817c, - 0xb971f801, 0xd7610002, - 0x0000f871, 0x807c817c, - 0xb971f814, 0xd7610002, - 0x0000f871, 0x807c817c, - 0xb971f815, 0xd7610002, - 0x0000f871, 0x807c817c, - 0xbefe03ff, 0x0000ffff, - 0xbeff0380, 0xe0704000, - 0x705d0200, 0xbefe03c1, + 0xe0704100, 0x705d0100, + 0xe0704200, 0x705d0200, + 0xe0704300, 0x705d0300, 0xb9703a05, 0x80708170, 0xbf0d9973, 0xbf850002, 0x8f708970, 0xbf820001, 0x8f708a70, 0xb97a1e06, 0x8f7a8a7a, 0x80707a70, + 0x8070ff70, 0x00000200, 0xbef603ff, 0x01000000, - 0xbef90380, 0xbefc0380, - 0xbf800000, 0xbe802f00, - 0xbe822f02, 0xbe842f04, - 0xbe862f06, 0xbe882f08, - 0xbe8a2f0a, 0xbe8c2f0c, - 0xbe8e2f0e, 0xd7610002, - 0x0000f200, 0x80798179, - 0xd7610002, 0x0000f201, + 0x7e000280, 0x7e020280, + 0x7e040280, 0xbefc0380, + 0xd7610002, 0x0000f871, + 0x807c817c, 0xd7610002, + 0x0000f86c, 0x807c817c, + 0x8a7aff6d, 0x80000000, + 0xd7610002, 0x0000f87a, + 0x807c817c, 0xd7610002, + 0x0000f86e, 0x807c817c, + 0xd7610002, 0x0000f86f, + 0x807c817c, 0xd7610002, + 0x0000f878, 0x807c817c, + 0xb97af803, 0xd7610002, + 0x0000f87a, 0x807c817c, + 0xd7610002, 0x0000f87b, + 0x807c817c, 0xb971f801, + 0xd7610002, 0x0000f871, + 0x807c817c, 0xb971f814, + 0xd7610002, 0x0000f871, + 0x807c817c, 0xb971f815, + 0xd7610002, 0x0000f871, + 0x807c817c, 0xbefe03ff, + 0x0000ffff, 0xbeff0380, + 0xe0704000, 0x705d0200, + 0xbefe03c1, 0xb9703a05, + 0x80708170, 0xbf0d9973, + 0xbf850002, 0x8f708970, + 0xbf820001, 0x8f708a70, + 0xb97a1e06, 0x8f7a8a7a, + 0x80707a70, 0xbef603ff, + 0x01000000, 0xbef90380, + 0xbefc0380, 0xbf800000, + 0xbe802f00, 0xbe822f02, + 0xbe842f04, 0xbe862f06, + 0xbe882f08, 0xbe8a2f0a, + 0xbe8c2f0c, 0xbe8e2f0e, + 0xd7610002, 0x0000f200, 0x80798179, 0xd7610002, - 0x0000f202, 0x80798179, - 0xd7610002, 0x0000f203, + 0x0000f201, 0x80798179, + 0xd7610002, 0x0000f202, 0x80798179, 0xd7610002, - 0x0000f204, 0x80798179, - 0xd7610002, 0x0000f205, + 0x0000f203, 0x80798179, + 0xd7610002, 0x0000f204, 0x80798179, 0xd7610002, - 0x0000f206, 0x80798179, - 0xd7610002, 0x0000f207, + 0x0000f205, 0x80798179, + 0xd7610002, 0x0000f206, 0x80798179, 0xd7610002, - 0x0000f208, 0x80798179, - 0xd7610002, 0x0000f209, + 0x0000f207, 0x80798179, + 0xd7610002, 0x0000f208, 0x80798179, 0xd7610002, - 0x0000f20a, 0x80798179, - 0xd7610002, 0x0000f20b, + 0x0000f209, 0x80798179, + 0xd7610002, 0x0000f20a, 0x80798179, 0xd7610002, - 0x0000f20c, 0x80798179, - 0xd7610002, 0x0000f20d, + 0x0000f20b, 0x80798179, + 0xd7610002, 0x0000f20c, 0x80798179, 0xd7610002, - 0x0000f20e, 0x80798179, - 0xd7610002, 0x0000f20f, - 0x80798179, 0xbf06a079, - 0xbf840006, 0xe0704000, - 0x705d0200, 0x8070ff70, - 0x00000080, 0xbef90380, - 0x7e040280, 0x807c907c, - 0xbf0aff7c, 0x00000060, - 0xbf85ffbc, 0xbe802f00, - 0xbe822f02, 0xbe842f04, - 0xbe862f06, 0xbe882f08, - 0xbe8a2f0a, 0xd7610002, - 0x0000f200, 0x80798179, - 0xd7610002, 0x0000f201, + 0x0000f20d, 0x80798179, + 0xd7610002, 0x0000f20e, 0x80798179, 0xd7610002, - 0x0000f202, 0x80798179, - 0xd7610002, 0x0000f203, + 0x0000f20f, 0x80798179, + 0xbf06a079, 0xbf840006, + 0xe0704000, 0x705d0200, + 0x8070ff70, 0x00000080, + 0xbef90380, 0x7e040280, + 0x807c907c, 0xbf0aff7c, + 0x00000060, 0xbf85ffbc, + 0xbe802f00, 0xbe822f02, + 0xbe842f04, 0xbe862f06, + 0xbe882f08, 0xbe8a2f0a, + 0xd7610002, 0x0000f200, 0x80798179, 0xd7610002, - 0x0000f204, 0x80798179, - 0xd7610002, 0x0000f205, + 0x0000f201, 0x80798179, + 0xd7610002, 0x0000f202, 0x80798179, 0xd7610002, - 0x0000f206, 0x80798179, - 0xd7610002, 0x0000f207, + 0x0000f203, 0x80798179, + 0xd7610002, 0x0000f204, 0x80798179, 0xd7610002, - 0x0000f208, 0x80798179, - 0xd7610002, 0x0000f209, + 0x0000f205, 0x80798179, + 0xd7610002, 0x0000f206, 0x80798179, 0xd7610002, - 0x0000f20a, 0x80798179, - 0xd7610002, 0x0000f20b, - 0x80798179, 0xe0704000, - 0x705d0200, 0xbefe03c1, - 0x907c9973, 0x877c817c, - 0xbf06817c, 0xbf850002, - 0xbeff0380, 0xbf820001, - 0xbeff03c1, 0xb97b4306, - 0x877bc17b, 0xbf840044, - 0xbf8a0000, 0x877aff6d, - 0x80000000, 0xbf840040, - 0x8f7b867b, 0x8f7b827b, - 0xbef6037b, 0xb9703a05, - 0x80708170, 0xbf0d9973, - 0xbf850002, 0x8f708970, - 0xbf820001, 0x8f708a70, - 0xb97a1e06, 0x8f7a8a7a, - 0x80707a70, 0x8070ff70, - 0x00000200, 0x8070ff70, - 0x00000080, 0xbef603ff, - 0x01000000, 0xd7650000, - 0x000100c1, 0xd7660000, - 0x000200c1, 0x16000084, - 0x907c9973, 0x877c817c, - 0xbf06817c, 0xbefc0380, - 0xbf850012, 0xbe8303ff, - 0x00000080, 0xbf800000, - 0xbf800000, 0xbf800000, - 0xd8d80000, 0x01000000, - 0xbf8c0000, 0xe0704000, - 0x705d0100, 0x807c037c, - 0x80700370, 0xd5250000, - 0x0001ff00, 0x00000080, - 0xbf0a7b7c, 0xbf85fff4, - 0xbf820011, 0xbe8303ff, - 0x00000100, 0xbf800000, - 0xbf800000, 0xbf800000, - 0xd8d80000, 0x01000000, - 0xbf8c0000, 0xe0704000, - 0x705d0100, 0x807c037c, - 0x80700370, 0xd5250000, - 0x0001ff00, 0x00000100, - 0xbf0a7b7c, 0xbf85fff4, + 0x0000f207, 0x80798179, + 0xd7610002, 0x0000f208, + 0x80798179, 0xd7610002, + 0x0000f209, 0x80798179, + 0xd7610002, 0x0000f20a, + 0x80798179, 0xd7610002, + 0x0000f20b, 0x80798179, + 0xe0704000, 0x705d0200, 0xbefe03c1, 0x907c9973, 0x877c817c, 0xbf06817c, - 0xbf850004, 0xbef003ff, - 0x00000200, 0xbeff0380, - 0xbf820003, 0xbef003ff, - 0x00000400, 0xbeff03c1, - 0xb97b3a05, 0x807b817b, - 0x8f7b827b, 0x907c9973, + 0xbf850002, 0xbeff0380, + 0xbf820001, 0xbeff03c1, + 0xb97b4306, 0x877bc17b, + 0xbf840044, 0xbf8a0000, + 0x877aff6d, 0x80000000, + 0xbf840040, 0x8f7b867b, + 0x8f7b827b, 0xbef6037b, + 0xb9703a05, 0x80708170, + 0xbf0d9973, 0xbf850002, + 0x8f708970, 0xbf820001, + 0x8f708a70, 0xb97a1e06, + 0x8f7a8a7a, 0x80707a70, + 0x8070ff70, 0x00000200, + 0x8070ff70, 0x00000080, + 0xbef603ff, 0x01000000, + 0xd7650000, 0x000100c1, + 0xd7660000, 0x000200c1, + 0x16000084, 0x907c9973, 0x877c817c, 0xbf06817c, - 0xbf850017, 0xbef603ff, - 0x01000000, 0xbefc0384, - 0xbf0a7b7c, 0xbf840037, - 0x7e008700, 0x7e028701, - 0x7e048702, 0x7e068703, - 0xe0704000, 0x705d0000, - 0xe0704080, 0x705d0100, - 0xe0704100, 0x705d0200, - 0xe0704180, 0x705d0300, - 0x807c847c, 0x8070ff70, - 0x00000200, 0xbf0a7b7c, - 0xbf85ffef, 0xbf820025, + 0xbefc0380, 0xbf850012, + 0xbe8303ff, 0x00000080, + 0xbf800000, 0xbf800000, + 0xbf800000, 0xd8d80000, + 0x01000000, 0xbf8c0000, + 0xe0704000, 0x705d0100, + 0x807c037c, 0x80700370, + 0xd5250000, 0x0001ff00, + 0x00000080, 0xbf0a7b7c, + 0xbf85fff4, 0xbf820011, + 0xbe8303ff, 0x00000100, + 0xbf800000, 0xbf800000, + 0xbf800000, 0xd8d80000, + 0x01000000, 0xbf8c0000, + 0xe0704000, 0x705d0100, + 0x807c037c, 0x80700370, + 0xd5250000, 0x0001ff00, + 0x00000100, 0xbf0a7b7c, + 0xbf85fff4, 0xbefe03c1, + 0x907c9973, 0x877c817c, + 0xbf06817c, 0xbf850004, + 0xbef003ff, 0x00000200, + 0xbeff0380, 0xbf820003, + 0xbef003ff, 0x00000400, + 0xbeff03c1, 0xb97b3a05, + 0x807b817b, 0x8f7b827b, + 0x907c9973, 0x877c817c, + 0xbf06817c, 0xbf850017, 0xbef603ff, 0x01000000, 0xbefc0384, 0xbf0a7b7c, - 0xbf840011, 0x7e008700, + 0xbf840037, 0x7e008700, 0x7e028701, 0x7e048702, 0x7e068703, 0xe0704000, - 0x705d0000, 0xe0704100, - 0x705d0100, 0xe0704200, - 0x705d0200, 0xe0704300, + 0x705d0000, 0xe0704080, + 0x705d0100, 0xe0704100, + 0x705d0200, 0xe0704180, 0x705d0300, 0x807c847c, - 0x8070ff70, 0x00000400, + 0x8070ff70, 0x00000200, 0xbf0a7b7c, 0xbf85ffef, - 0xb97b1e06, 0x877bc17b, - 0xbf84000c, 0x8f7b837b, - 0x807b7c7b, 0xbefe03c1, - 0xbeff0380, 0x7e008700, + 0xbf820025, 0xbef603ff, + 0x01000000, 0xbefc0384, + 0xbf0a7b7c, 0xbf840011, + 0x7e008700, 0x7e028701, + 0x7e048702, 0x7e068703, 0xe0704000, 0x705d0000, - 0x807c817c, 0x8070ff70, - 0x00000080, 0xbf0a7b7c, - 0xbf85fff8, 0xbf82013b, - 0xbef4037e, 0x8775ff7f, - 0x0000ffff, 0x8875ff75, - 0x00040000, 0xbef60380, - 0xbef703ff, 0x10807fac, - 0xb97202dc, 0x8f729972, - 0x876eff7f, 0x04000000, - 0xbf840034, 0xbefe03c1, - 0x907c9972, 0x877c817c, - 0xbf06817c, 0xbf850002, - 0xbeff0380, 0xbf820001, - 0xbeff03c1, 0xb96f4306, - 0x876fc16f, 0xbf840029, - 0x8f6f866f, 0x8f6f826f, - 0xbef6036f, 0xb9783a05, - 0x80788178, 0xbf0d9972, - 0xbf850002, 0x8f788978, - 0xbf820001, 0x8f788a78, - 0xb96e1e06, 0x8f6e8a6e, - 0x80786e78, 0x8078ff78, - 0x00000200, 0x8078ff78, - 0x00000080, 0xbef603ff, - 0x01000000, 0x907c9972, - 0x877c817c, 0xbf06817c, - 0xbefc0380, 0xbf850009, - 0xe0310000, 0x781d0000, - 0x807cff7c, 0x00000080, - 0x8078ff78, 0x00000080, - 0xbf0a6f7c, 0xbf85fff8, - 0xbf820008, 0xe0310000, - 0x781d0000, 0x807cff7c, - 0x00000100, 0x8078ff78, - 0x00000100, 0xbf0a6f7c, - 0xbf85fff8, 0xbef80380, + 0xe0704100, 0x705d0100, + 0xe0704200, 0x705d0200, + 0xe0704300, 0x705d0300, + 0x807c847c, 0x8070ff70, + 0x00000400, 0xbf0a7b7c, + 0xbf85ffef, 0xb97b1e06, + 0x877bc17b, 0xbf84000c, + 0x8f7b837b, 0x807b7c7b, + 0xbefe03c1, 0xbeff0380, + 0x7e008700, 0xe0704000, + 0x705d0000, 0x807c817c, + 0x8070ff70, 0x00000080, + 0xbf0a7b7c, 0xbf85fff8, + 0xbf82013b, 0xbef4037e, + 0x8775ff7f, 0x0000ffff, + 0x8875ff75, 0x00040000, + 0xbef60380, 0xbef703ff, + 0x10807fac, 0xb97202dc, + 0x8f729972, 0x876eff7f, + 0x04000000, 0xbf840034, 0xbefe03c1, 0x907c9972, 0x877c817c, 0xbf06817c, 0xbf850002, 0xbeff0380, 0xbf820001, 0xbeff03c1, - 0xb96f3a05, 0x806f816f, - 0x8f6f826f, 0x907c9972, - 0x877c817c, 0xbf06817c, - 0xbf850024, 0xbef603ff, - 0x01000000, 0xbeee0378, + 0xb96f4306, 0x876fc16f, + 0xbf840029, 0x8f6f866f, + 0x8f6f826f, 0xbef6036f, + 0xb9783a05, 0x80788178, + 0xbf0d9972, 0xbf850002, + 0x8f788978, 0xbf820001, + 0x8f788a78, 0xb96e1e06, + 0x8f6e8a6e, 0x80786e78, 0x8078ff78, 0x00000200, - 0xbefc0384, 0xbf0a6f7c, - 0xbf840050, 0xe0304000, - 0x785d0000, 0xe0304080, - 0x785d0100, 0xe0304100, - 0x785d0200, 0xe0304180, - 0x785d0300, 0xbf8c3f70, - 0x7e008500, 0x7e028501, - 0x7e048502, 0x7e068503, - 0x807c847c, 0x8078ff78, - 0x00000200, 0xbf0a6f7c, - 0xbf85ffee, 0xe0304000, - 0x6e5d0000, 0xe0304080, - 0x6e5d0100, 0xe0304100, - 0x6e5d0200, 0xe0304180, - 0x6e5d0300, 0xbf8c3f70, - 0xbf820034, 0xbef603ff, - 0x01000000, 0xbeee0378, - 0x8078ff78, 0x00000400, - 0xbefc0384, 0xbf0a6f7c, - 0xbf840012, 0xe0304000, - 0x785d0000, 0xe0304100, - 0x785d0100, 0xe0304200, - 0x785d0200, 0xe0304300, - 0x785d0300, 0xbf8c3f70, - 0x7e008500, 0x7e028501, - 0x7e048502, 0x7e068503, - 0x807c847c, 0x8078ff78, - 0x00000400, 0xbf0a6f7c, - 0xbf85ffee, 0xb96f1e06, - 0x876fc16f, 0xbf84000e, - 0x8f6f836f, 0x806f7c6f, - 0xbefe03c1, 0xbeff0380, + 0x8078ff78, 0x00000080, + 0xbef603ff, 0x01000000, + 0x907c9972, 0x877c817c, + 0xbf06817c, 0xbefc0380, + 0xbf850009, 0xe0310000, + 0x781d0000, 0x807cff7c, + 0x00000080, 0x8078ff78, + 0x00000080, 0xbf0a6f7c, + 0xbf85fff8, 0xbf820008, + 0xe0310000, 0x781d0000, + 0x807cff7c, 0x00000100, + 0x8078ff78, 0x00000100, + 0xbf0a6f7c, 0xbf85fff8, + 0xbef80380, 0xbefe03c1, + 0x907c9972, 0x877c817c, + 0xbf06817c, 0xbf850002, + 0xbeff0380, 0xbf820001, + 0xbeff03c1, 0xb96f3a05, + 0x806f816f, 0x8f6f826f, + 0x907c9972, 0x877c817c, + 0xbf06817c, 0xbf850024, + 0xbef603ff, 0x01000000, + 0xbeee0378, 0x8078ff78, + 0x00000200, 0xbefc0384, + 0xbf0a6f7c, 0xbf840050, 0xe0304000, 0x785d0000, + 0xe0304080, 0x785d0100, + 0xe0304100, 0x785d0200, + 0xe0304180, 0x785d0300, 0xbf8c3f70, 0x7e008500, - 0x807c817c, 0x8078ff78, - 0x00000080, 0xbf0a6f7c, - 0xbf85fff7, 0xbeff03c1, + 0x7e028501, 0x7e048502, + 0x7e068503, 0x807c847c, + 0x8078ff78, 0x00000200, + 0xbf0a6f7c, 0xbf85ffee, 0xe0304000, 0x6e5d0000, - 0xe0304100, 0x6e5d0100, - 0xe0304200, 0x6e5d0200, - 0xe0304300, 0x6e5d0300, - 0xbf8c3f70, 0xb9783a05, - 0x80788178, 0xbf0d9972, - 0xbf850002, 0x8f788978, - 0xbf820001, 0x8f788a78, - 0xb96e1e06, 0x8f6e8a6e, - 0x80786e78, 0x8078ff78, - 0x00000200, 0x80f8ff78, - 0x00000050, 0xbef603ff, - 0x01000000, 0xbefc03ff, - 0x0000006c, 0x80f89078, - 0xf429003a, 0xf0000000, - 0xbf8cc07f, 0x80fc847c, - 0xbf800000, 0xbe803100, - 0xbe823102, 0x80f8a078, - 0xf42d003a, 0xf0000000, - 0xbf8cc07f, 0x80fc887c, - 0xbf800000, 0xbe803100, - 0xbe823102, 0xbe843104, - 0xbe863106, 0x80f8c078, - 0xf431003a, 0xf0000000, - 0xbf8cc07f, 0x80fc907c, - 0xbf800000, 0xbe803100, - 0xbe823102, 0xbe843104, - 0xbe863106, 0xbe883108, - 0xbe8a310a, 0xbe8c310c, - 0xbe8e310e, 0xbf06807c, - 0xbf84fff0, 0xba80f801, - 0x00000000, 0xbf8a0000, + 0xe0304080, 0x6e5d0100, + 0xe0304100, 0x6e5d0200, + 0xe0304180, 0x6e5d0300, + 0xbf8c3f70, 0xbf820034, + 0xbef603ff, 0x01000000, + 0xbeee0378, 0x8078ff78, + 0x00000400, 0xbefc0384, + 0xbf0a6f7c, 0xbf840012, + 0xe0304000, 0x785d0000, + 0xe0304100, 0x785d0100, + 0xe0304200, 0x785d0200, + 0xe0304300, 0x785d0300, + 0xbf8c3f70, 0x7e008500, + 0x7e028501, 0x7e048502, + 0x7e068503, 0x807c847c, + 0x8078ff78, 0x00000400, + 0xbf0a6f7c, 0xbf85ffee, + 0xb96f1e06, 0x876fc16f, + 0xbf84000e, 0x8f6f836f, + 0x806f7c6f, 0xbefe03c1, + 0xbeff0380, 0xe0304000, + 0x785d0000, 0xbf8c3f70, + 0x7e008500, 0x807c817c, + 0x8078ff78, 0x00000080, + 0xbf0a6f7c, 0xbf85fff7, + 0xbeff03c1, 0xe0304000, + 0x6e5d0000, 0xe0304100, + 0x6e5d0100, 0xe0304200, + 0x6e5d0200, 0xe0304300, + 0x6e5d0300, 0xbf8c3f70, 0xb9783a05, 0x80788178, 0xbf0d9972, 0xbf850002, 0x8f788978, 0xbf820001, 0x8f788a78, 0xb96e1e06, 0x8f6e8a6e, 0x80786e78, 0x8078ff78, 0x00000200, + 0x80f8ff78, 0x00000050, 0xbef603ff, 0x01000000, - 0xf4211bfa, 0xf0000000, - 0x80788478, 0xf4211b3a, + 0xbefc03ff, 0x0000006c, + 0x80f89078, 0xf429003a, + 0xf0000000, 0xbf8cc07f, + 0x80fc847c, 0xbf800000, + 0xbe803100, 0xbe823102, + 0x80f8a078, 0xf42d003a, + 0xf0000000, 0xbf8cc07f, + 0x80fc887c, 0xbf800000, + 0xbe803100, 0xbe823102, + 0xbe843104, 0xbe863106, + 0x80f8c078, 0xf431003a, + 0xf0000000, 0xbf8cc07f, + 0x80fc907c, 0xbf800000, + 0xbe803100, 0xbe823102, + 0xbe843104, 0xbe863106, + 0xbe883108, 0xbe8a310a, + 0xbe8c310c, 0xbe8e310e, + 0xbf06807c, 0xbf84fff0, + 0xba80f801, 0x00000000, + 0xbf8a0000, 0xb9783a05, + 0x80788178, 0xbf0d9972, + 0xbf850002, 0x8f788978, + 0xbf820001, 0x8f788a78, + 0xb96e1e06, 0x8f6e8a6e, + 0x80786e78, 0x8078ff78, + 0x00000200, 0xbef603ff, + 0x01000000, 0xf4211bfa, 0xf0000000, 0x80788478, - 0xf4211b7a, 0xf0000000, - 0x80788478, 0xf4211c3a, + 0xf4211b3a, 0xf0000000, + 0x80788478, 0xf4211b7a, 0xf0000000, 0x80788478, - 0xf4211c7a, 0xf0000000, - 0x80788478, 0xf4211eba, + 0xf4211c3a, 0xf0000000, + 0x80788478, 0xf4211c7a, 0xf0000000, 0x80788478, - 0xf4211efa, 0xf0000000, - 0x80788478, 0xf4211e7a, + 0xf4211eba, 0xf0000000, + 0x80788478, 0xf4211efa, 0xf0000000, 0x80788478, - 0xf4211cfa, 0xf0000000, - 0x80788478, 0xf4211bba, + 0xf4211e7a, 0xf0000000, + 0x80788478, 0xf4211cfa, 0xf0000000, 0x80788478, - 0xbf8cc07f, 0xb9eef814, 0xf4211bba, 0xf0000000, 0x80788478, 0xbf8cc07f, - 0xb9eef815, 0xbefc036f, - 0xbefe0370, 0xbeff0371, - 0x876f7bff, 0x000003ff, - 0xb9ef4803, 0x876f7bff, - 0xfffff800, 0x906f8b6f, - 0xb9efa2c3, 0xb9f3f801, - 0xb96e3a05, 0x806e816e, - 0xbf0d9972, 0xbf850002, - 0x8f6e896e, 0xbf820001, - 0x8f6e8a6e, 0xb96f1e06, - 0x8f6f8a6f, 0x806e6f6e, - 0x806eff6e, 0x00000200, - 0x806e746e, 0x826f8075, - 0x876fff6f, 0x0000ffff, - 0xf4091c37, 0xfa000050, - 0xf4091d37, 0xfa000060, - 0xf4011e77, 0xfa000074, - 0xbf8cc07f, 0x876dff6d, - 0x0000ffff, 0x87fe7e7e, - 0x87ea6a6a, 0xb9faf802, - 0xbe80226c, 0xbf810000, + 0xb9eef814, 0xf4211bba, + 0xf0000000, 0x80788478, + 0xbf8cc07f, 0xb9eef815, + 0xbefc036f, 0xbefe0370, + 0xbeff0371, 0x876f7bff, + 0x000003ff, 0xb9ef4803, + 0x876f7bff, 0xfffff800, + 0x906f8b6f, 0xb9efa2c3, + 0xb9f3f801, 0xb96e3a05, + 0x806e816e, 0xbf0d9972, + 0xbf850002, 0x8f6e896e, + 0xbf820001, 0x8f6e8a6e, + 0xb96f1e06, 0x8f6f8a6f, + 0x806e6f6e, 0x806eff6e, + 0x00000200, 0x806e746e, + 0x826f8075, 0x876fff6f, + 0x0000ffff, 0xf4091c37, + 0xfa000050, 0xf4091d37, + 0xfa000060, 0xf4011e77, + 0xfa000074, 0xbf8cc07f, + 0x876dff6d, 0x0000ffff, + 0x87fe7e7e, 0x87ea6a6a, + 0xb9faf802, 0xbe80226c, + 0xbf9b0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, - 0xbf9f0000, 0x00000000, }; static const uint32_t cwsr_trap_gfx11_hex[] = { @@ -2944,7 +2944,7 @@ static const uint32_t cwsr_trap_gfx11_hex[] = { 0xb8eef802, 0xbf0d866e, 0xbfa20002, 0xb97af802, 0xbe80486c, 0xb97af802, - 0xbe804a6c, 0xbfb00000, + 0xbe804a6c, 0xbfb10000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0x00000000, @@ -3436,5 +3436,5 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = { 0x86ea6a6a, 0x8f6e837a, 0xb96ee0c2, 0xbf800002, 0xb97a0002, 0xbf8a0000, - 0xbe801f6c, 0xbf810000, + 0xbe801f6c, 0xbf9b0000, }; diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm index fdab646244..71b3dc0c73 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm @@ -369,6 +369,12 @@ L_SLEEP: s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp #if NO_SQC_STORE +#if ASIC_FAMILY <= CHIP_SIENNA_CICHLID + // gfx10: If there was a VALU exception, the exception state must be + // cleared before executing the VALU instructions below. + v_clrexcp +#endif + // Trap temporaries must be saved via VGPR but all VGPRs are in use. // There is no ttmp space to hold the resource constant for VGPR save. // Save v0 by itself since it requires only two SGPRs. @@ -1098,7 +1104,7 @@ L_RETURN_WITHOUT_PRIV: s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution L_END_PGM: - s_endpgm + s_endpgm_saved end function write_hwreg_to_mem(s, s_rsrc, s_mem_offset) diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm index e506411ad2..bb26338204 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm @@ -921,7 +921,7 @@ L_RESTORE: /* the END */ /**************************************************************************/ L_END_PGM: - s_endpgm + s_endpgm_saved end diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index f6d4748c19..08da993df1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -778,8 +778,8 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp, * nodes, but not more than args->num_of_nodes as that is * the amount of memory allocated by user */ - pa = kzalloc((sizeof(struct kfd_process_device_apertures) * - args->num_of_nodes), GFP_KERNEL); + pa = kcalloc(args->num_of_nodes, sizeof(struct kfd_process_device_apertures), + GFP_KERNEL); if (!pa) return -ENOMEM; @@ -1442,7 +1442,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT); /* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */ - amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv); + err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv); + if (err) + goto sync_memory_failed; } mutex_unlock(&p->mutex); @@ -1564,16 +1566,11 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, { struct kfd_ioctl_import_dmabuf_args *args = data; struct kfd_process_device *pdd; - struct dma_buf *dmabuf; int idr_handle; uint64_t size; void *mem; int r; - dmabuf = dma_buf_get(args->dmabuf_fd); - if (IS_ERR(dmabuf)) - return PTR_ERR(dmabuf); - mutex_lock(&p->mutex); pdd = kfd_process_device_data_by_id(p, args->gpu_id); if (!pdd) { @@ -1587,10 +1584,10 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, goto err_unlock; } - r = amdgpu_amdkfd_gpuvm_import_dmabuf(pdd->dev->adev, dmabuf, - args->va_addr, pdd->drm_priv, - (struct kgd_mem **)&mem, &size, - NULL); + r = amdgpu_amdkfd_gpuvm_import_dmabuf_fd(pdd->dev->adev, args->dmabuf_fd, + args->va_addr, pdd->drm_priv, + (struct kgd_mem **)&mem, &size, + NULL); if (r) goto err_unlock; @@ -1601,7 +1598,6 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, } mutex_unlock(&p->mutex); - dma_buf_put(dmabuf); args->handle = MAKE_HANDLE(args->gpu_id, idr_handle); @@ -1612,7 +1608,6 @@ err_free: pdd->drm_priv, NULL); err_unlock: mutex_unlock(&p->mutex); - dma_buf_put(dmabuf); return r; } @@ -1855,8 +1850,8 @@ static uint32_t get_process_num_bos(struct kfd_process *p) return num_of_bos; } -static int criu_get_prime_handle(struct kgd_mem *mem, int flags, - u32 *shared_fd) +static int criu_get_prime_handle(struct kgd_mem *mem, + int flags, u32 *shared_fd) { struct dma_buf *dmabuf; int ret; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index c0e7154338..c0ae1a9749 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1997,6 +1997,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, dev_err(dev, "HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n"); while (halt_if_hws_hang) schedule(); + kfd_hws_hang(dqm); return -ETIME; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 0f58be6513..739721254a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -880,6 +880,10 @@ static int copy_signaled_event_data(uint32_t num_events, dst = &data[i].memory_exception_data; src = &event->memory_exception_data; size = sizeof(struct kfd_hsa_memory_exception_data); + } else if (event->type == KFD_EVENT_TYPE_HW_EXCEPTION) { + dst = &data[i].memory_exception_data; + src = &event->hw_exception_data; + size = sizeof(struct kfd_hsa_hw_exception_data); } else if (event->type == KFD_EVENT_TYPE_SIGNAL && waiter->event_age_enabled) { dst = &data[i].signal_event_data.last_event_age; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index b8680e0753..bdc01ca960 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -260,19 +260,6 @@ static void svm_migrate_put_sys_page(unsigned long addr) put_page(page); } -static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate) -{ - unsigned long cpages = 0; - unsigned long i; - - for (i = 0; i < migrate->npages; i++) { - if (migrate->src[i] & MIGRATE_PFN_VALID && - migrate->src[i] & MIGRATE_PFN_MIGRATE) - cpages++; - } - return cpages; -} - static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate) { unsigned long upages = 0; @@ -402,6 +389,7 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, struct dma_fence *mfence = NULL; struct migrate_vma migrate = { 0 }; unsigned long cpages = 0; + unsigned long mpages = 0; dma_addr_t *scratch; void *buf; int r = -ENOMEM; @@ -442,20 +430,21 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, goto out_free; } if (cpages != npages) - pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n", + pr_debug("partial migration, 0x%lx/0x%llx pages collected\n", cpages, npages); else - pr_debug("0x%lx pages migrated\n", cpages); + pr_debug("0x%lx pages collected\n", cpages); r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset); migrate_vma_pages(&migrate); - pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", - svm_migrate_successful_pages(&migrate), cpages, migrate.npages); - svm_migrate_copy_done(adev, mfence); migrate_vma_finalize(&migrate); + mpages = cpages - svm_migrate_unsuccessful_pages(&migrate); + pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", + mpages, cpages, migrate.npages); + kfd_smi_event_migration_end(node, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, 0, node->id, trigger); @@ -465,12 +454,12 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, out_free: kvfree(buf); out: - if (!r && cpages) { + if (!r && mpages) { pdd = svm_range_get_pdd_by_node(prange, node); if (pdd) - WRITE_ONCE(pdd->page_in, pdd->page_in + cpages); + WRITE_ONCE(pdd->page_in, pdd->page_in + mpages); - return cpages; + return mpages; } return r; } @@ -479,6 +468,8 @@ out: * svm_migrate_ram_to_vram - migrate svm range from system to device * @prange: range structure * @best_loc: the device to migrate to + * @start_mgr: start page to migrate + * @last_mgr: last page to migrate * @mm: the process mm structure * @trigger: reason of migration * @@ -489,19 +480,20 @@ out: */ static int svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, + unsigned long start_mgr, unsigned long last_mgr, struct mm_struct *mm, uint32_t trigger) { unsigned long addr, start, end; struct vm_area_struct *vma; uint64_t ttm_res_offset; struct kfd_node *node; - unsigned long cpages = 0; + unsigned long mpages = 0; long r = 0; - if (prange->actual_loc == best_loc) { - pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n", - prange->svms, prange->start, prange->last, best_loc); - return 0; + if (start_mgr < prange->start || last_mgr > prange->last) { + pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n", + start_mgr, last_mgr, prange->start, prange->last); + return -EFAULT; } node = svm_range_get_node_by_id(prange, best_loc); @@ -510,18 +502,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, return -ENODEV; } - pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms, - prange->start, prange->last, best_loc); + pr_debug("svms 0x%p [0x%lx 0x%lx] in [0x%lx 0x%lx] to gpu 0x%x\n", + prange->svms, start_mgr, last_mgr, prange->start, prange->last, + best_loc); - start = prange->start << PAGE_SHIFT; - end = (prange->last + 1) << PAGE_SHIFT; + start = start_mgr << PAGE_SHIFT; + end = (last_mgr + 1) << PAGE_SHIFT; r = svm_range_vram_node_new(node, prange, true); if (r) { dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r); return r; } - ttm_res_offset = prange->offset << PAGE_SHIFT; + ttm_res_offset = (start_mgr - prange->start + prange->offset) << PAGE_SHIFT; for (addr = start; addr < end;) { unsigned long next; @@ -536,16 +529,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, pr_debug("failed %ld to migrate\n", r); break; } else { - cpages += r; + mpages += r; } ttm_res_offset += next - addr; addr = next; } - if (cpages) { + if (mpages) { prange->actual_loc = best_loc; - svm_range_dma_unmap(prange); - } else { + prange->vram_pages += mpages; + } else if (!prange->actual_loc) { + /* if no page migrated and all pages from prange are at + * sys ram drop svm_bo got from svm_range_vram_node_new + */ svm_range_vram_node_free(prange); } @@ -578,7 +574,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start, prange->last); - addr = prange->start << PAGE_SHIFT; + addr = migrate->start; src = (uint64_t *)(scratch + npages); dst = scratch; @@ -663,9 +659,8 @@ out_oom: * Context: Process context, caller hold mmap read lock, prange->migrate_mutex * * Return: - * 0 - success with all pages migrated * negative values - indicate error - * positive values - partial migration, number of pages not migrated + * positive values or zero - number of pages got migrated */ static long svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, @@ -676,6 +671,7 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, uint64_t npages = (end - start) >> PAGE_SHIFT; unsigned long upages = npages; unsigned long cpages = 0; + unsigned long mpages = 0; struct amdgpu_device *adev = node->adev; struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; @@ -725,10 +721,10 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, goto out_free; } if (cpages != npages) - pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n", + pr_debug("partial migration, 0x%lx/0x%llx pages collected\n", cpages, npages); else - pr_debug("0x%lx pages migrated\n", cpages); + pr_debug("0x%lx pages collected\n", cpages); r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence, scratch, npages); @@ -751,17 +747,21 @@ out_free: kvfree(buf); out: if (!r && cpages) { + mpages = cpages - upages; pdd = svm_range_get_pdd_by_node(prange, node); if (pdd) - WRITE_ONCE(pdd->page_out, pdd->page_out + cpages); + WRITE_ONCE(pdd->page_out, pdd->page_out + mpages); } - return r ? r : upages; + + return r ? r : mpages; } /** * svm_migrate_vram_to_ram - migrate svm range from device to system * @prange: range structure * @mm: process mm, use current->mm if NULL + * @start_mgr: start page need be migrated to sys ram + * @last_mgr: last page need be migrated to sys ram * @trigger: reason of migration * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback * @@ -771,6 +771,7 @@ out: * 0 - OK, otherwise error code */ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, + unsigned long start_mgr, unsigned long last_mgr, uint32_t trigger, struct page *fault_page) { struct kfd_node *node; @@ -778,26 +779,33 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, unsigned long addr; unsigned long start; unsigned long end; - unsigned long upages = 0; + unsigned long mpages = 0; long r = 0; + /* this pragne has no any vram page to migrate to sys ram */ if (!prange->actual_loc) { pr_debug("[0x%lx 0x%lx] already migrated to ram\n", prange->start, prange->last); return 0; } + if (start_mgr < prange->start || last_mgr > prange->last) { + pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n", + start_mgr, last_mgr, prange->start, prange->last); + return -EFAULT; + } + node = svm_range_get_node_by_id(prange, prange->actual_loc); if (!node) { pr_debug("failed to get kfd node by id 0x%x\n", prange->actual_loc); return -ENODEV; } pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n", - prange->svms, prange, prange->start, prange->last, + prange->svms, prange, start_mgr, last_mgr, prange->actual_loc); - start = prange->start << PAGE_SHIFT; - end = (prange->last + 1) << PAGE_SHIFT; + start = start_mgr << PAGE_SHIFT; + end = (last_mgr + 1) << PAGE_SHIFT; for (addr = start; addr < end;) { unsigned long next; @@ -816,14 +824,21 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, pr_debug("failed %ld to migrate prange %p\n", r, prange); break; } else { - upages += r; + mpages += r; } addr = next; } - if (r >= 0 && !upages) { - svm_range_vram_node_free(prange); - prange->actual_loc = 0; + if (r >= 0) { + prange->vram_pages -= mpages; + + /* prange does not have vram page set its actual_loc to system + * and drop its svm_bo ref + */ + if (prange->vram_pages == 0 && prange->ttm_res) { + prange->actual_loc = 0; + svm_range_vram_node_free(prange); + } } return r < 0 ? r : 0; @@ -833,17 +848,23 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, * svm_migrate_vram_to_vram - migrate svm range from device to device * @prange: range structure * @best_loc: the device to migrate to + * @start: start page need be migrated to sys ram + * @last: last page need be migrated to sys ram * @mm: process mm, use current->mm if NULL * @trigger: reason of migration * * Context: Process context, caller hold mmap read lock, svms lock, prange lock * + * migrate all vram pages in prange to sys ram, then migrate + * [start, last] pages from sys ram to gpu node best_loc. + * * Return: * 0 - OK, otherwise error code */ static int svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, - struct mm_struct *mm, uint32_t trigger) + unsigned long start, unsigned long last, + struct mm_struct *mm, uint32_t trigger) { int r, retries = 3; @@ -855,7 +876,8 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc); do { - r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL); + r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last, + trigger, NULL); if (r) return r; } while (prange->actual_loc && --retries); @@ -863,17 +885,21 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, if (prange->actual_loc) return -EDEADLK; - return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger); + return svm_migrate_ram_to_vram(prange, best_loc, start, last, mm, trigger); } int svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, + unsigned long start, unsigned long last, struct mm_struct *mm, uint32_t trigger) { - if (!prange->actual_loc) - return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger); + if (!prange->actual_loc || prange->actual_loc == best_loc) + return svm_migrate_ram_to_vram(prange, best_loc, start, last, + mm, trigger); + else - return svm_migrate_vram_to_vram(prange, best_loc, mm, trigger); + return svm_migrate_vram_to_vram(prange, best_loc, start, last, + mm, trigger); } @@ -889,10 +915,9 @@ svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, */ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) { + unsigned long start, last, size; unsigned long addr = vmf->address; struct svm_range_bo *svm_bo; - enum svm_work_list_ops op; - struct svm_range *parent; struct svm_range *prange; struct kfd_process *p; struct mm_struct *mm; @@ -929,51 +954,31 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) mutex_lock(&p->svms.lock); - prange = svm_range_from_addr(&p->svms, addr, &parent); + prange = svm_range_from_addr(&p->svms, addr, NULL); if (!prange) { pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr); r = -EFAULT; goto out_unlock_svms; } - mutex_lock(&parent->migrate_mutex); - if (prange != parent) - mutex_lock_nested(&prange->migrate_mutex, 1); + mutex_lock(&prange->migrate_mutex); if (!prange->actual_loc) goto out_unlock_prange; - svm_range_lock(parent); - if (prange != parent) - mutex_lock_nested(&prange->lock, 1); - r = svm_range_split_by_granularity(p, mm, addr, parent, prange); - if (prange != parent) - mutex_unlock(&prange->lock); - svm_range_unlock(parent); - if (r) { - pr_debug("failed %d to split range by granularity\n", r); - goto out_unlock_prange; - } + /* Align migration range start and size to granularity size */ + size = 1UL << prange->granularity; + start = max(ALIGN_DOWN(addr, size), prange->start); + last = min(ALIGN(addr + 1, size) - 1, prange->last); - r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, - KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, - vmf->page); + r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, start, last, + KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, vmf->page); if (r) pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n", - r, prange->svms, prange, prange->start, prange->last); - - /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */ - if (p->xnack_enabled && parent == prange) - op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP; - else - op = SVM_OP_UPDATE_RANGE_NOTIFIER; - svm_range_add_list_work(&p->svms, parent, mm, op); - schedule_deferred_list_work(&p->svms); + r, prange->svms, prange, start, last); out_unlock_prange: - if (prange != parent) - mutex_unlock(&prange->migrate_mutex); - mutex_unlock(&parent->migrate_mutex); + mutex_unlock(&prange->migrate_mutex); out_unlock_svms: mutex_unlock(&p->svms.lock); out_unref_process: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h index 487f263681..2eebf67f9c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h @@ -41,9 +41,13 @@ enum MIGRATION_COPY_DIR { }; int svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, + unsigned long start, unsigned long last, struct mm_struct *mm, uint32_t trigger); + int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, + unsigned long start, unsigned long last, uint32_t trigger, struct page *fault_page); + unsigned long svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index d722cbd317..826bc4f6c8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -55,8 +55,8 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, m = get_mqd(mqd); if (has_wa_flag) { - uint32_t wa_mask = minfo->update_flag == UPDATE_FLAG_DBG_WA_ENABLE ? - 0xffff : 0xffffffff; + uint32_t wa_mask = + (minfo->update_flag & UPDATE_FLAG_DBG_WA_ENABLE) ? 0xffff : 0xffffffff; m->compute_static_thread_mgmt_se0 = wa_mask; m->compute_static_thread_mgmt_se1 = wa_mask; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 42d881809d..697b6d530d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -303,6 +303,15 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, update_cu_mask(mm, mqd, minfo, 0); set_priority(m, q); + if (minfo && KFD_GC_VERSION(mm->dev) >= IP_VERSION(9, 4, 2)) { + if (minfo->update_flag & UPDATE_FLAG_IS_GWS) + m->compute_resource_limits |= + COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK; + else + m->compute_resource_limits &= + ~COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK; + } + q->is_active = QUEUE_IS_ACTIVE(*q); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 02d1d9afbf..fedeba3e12 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -532,6 +532,7 @@ struct queue_properties { enum mqd_update_flag { UPDATE_FLAG_DBG_WA_ENABLE = 1, UPDATE_FLAG_DBG_WA_DISABLE = 2, + UPDATE_FLAG_IS_GWS = 4, /* quirk for gfx9 IP */ }; struct mqd_update_info { @@ -748,7 +749,6 @@ struct kfd_process_device { /* VM context for GPUVM allocations */ struct file *drm_file; void *drm_priv; - atomic64_t tlb_seq; /* GPUVM allocations storage */ struct idr alloc_idr; @@ -918,7 +918,7 @@ struct kfd_process { * fence will be triggered during eviction and new one will be created * during restore */ - struct dma_fence *ef; + struct dma_fence __rcu *ef; /* Work items for evicting and restoring BOs */ struct delayed_work eviction_work; @@ -1462,7 +1462,14 @@ void kfd_signal_reset_event(struct kfd_node *dev); void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid); -void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type); +static inline void kfd_flush_tlb(struct kfd_process_device *pdd, + enum TLB_FLUSH_TYPE type) +{ + struct amdgpu_device *adev = pdd->dev->adev; + struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv); + + amdgpu_vm_flush_compute_tlb(adev, vm, type, pdd->dev->xcc_mask); +} static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 7a33e06f5c..58c1fe5421 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -664,7 +664,8 @@ int kfd_process_create_wq(void) if (!kfd_process_wq) kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0); if (!kfd_restore_wq) - kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0); + kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", + WQ_FREEZABLE); if (!kfd_process_wq || !kfd_restore_wq) { kfd_process_destroy_wq(); @@ -818,9 +819,9 @@ struct kfd_process *kfd_create_process(struct task_struct *thread) mutex_lock(&kfd_processes_mutex); if (kfd_is_locked()) { - mutex_unlock(&kfd_processes_mutex); pr_debug("KFD is locked! Cannot create process"); - return ERR_PTR(-EINVAL); + process = ERR_PTR(-EINVAL); + goto out; } /* A prior open of /dev/kfd could have already created the process. */ @@ -1109,6 +1110,7 @@ static void kfd_process_wq_release(struct work_struct *work) { struct kfd_process *p = container_of(work, struct kfd_process, release_work); + struct dma_fence *ef; kfd_process_dequeue_from_all_devices(p); pqm_uninit(&p->pqm); @@ -1117,7 +1119,9 @@ static void kfd_process_wq_release(struct work_struct *work) * destroyed. This allows any BOs to be freed without * triggering pointless evictions or waiting for fences. */ - dma_fence_signal(p->ef); + synchronize_rcu(); + ef = rcu_access_pointer(p->ef); + dma_fence_signal(ef); kfd_process_remove_sysfs(p); @@ -1126,7 +1130,7 @@ static void kfd_process_wq_release(struct work_struct *work) svm_range_list_fini(p); kfd_process_destroy_pdds(p); - dma_fence_put(p->ef); + dma_fence_put(ef); kfd_event_free_process(p); @@ -1642,6 +1646,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, struct amdgpu_fpriv *drv_priv; struct amdgpu_vm *avm; struct kfd_process *p; + struct dma_fence *ef; struct kfd_node *dev; int ret; @@ -1661,13 +1666,13 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm, &p->kgd_process_info, - &p->ef); + &ef); if (ret) { pr_err("Failed to create process VM object\n"); return ret; } + RCU_INIT_POINTER(p->ef, ef); pdd->drm_priv = drm_file->private_data; - atomic64_set(&pdd->tlb_seq, 0); ret = kfd_process_device_reserve_ib_mem(pdd); if (ret) @@ -1909,6 +1914,23 @@ kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node, return -EINVAL; } +static int signal_eviction_fence(struct kfd_process *p) +{ + struct dma_fence *ef; + int ret; + + rcu_read_lock(); + ef = dma_fence_get_rcu_safe(&p->ef); + rcu_read_unlock(); + if (!ef) + return -EINVAL; + + ret = dma_fence_signal(ef); + dma_fence_put(ef); + + return ret; +} + static void evict_process_worker(struct work_struct *work) { int ret; @@ -1921,31 +1943,45 @@ static void evict_process_worker(struct work_struct *work) * lifetime of this thread, kfd_process p will be valid */ p = container_of(dwork, struct kfd_process, eviction_work); - WARN_ONCE(p->last_eviction_seqno != p->ef->seqno, - "Eviction fence mismatch\n"); - - /* Narrow window of overlap between restore and evict work - * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos - * unreserves KFD BOs, it is possible to evicted again. But - * restore has few more steps of finish. So lets wait for any - * previous restore work to complete - */ - flush_delayed_work(&p->restore_work); pr_debug("Started evicting pasid 0x%x\n", p->pasid); ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_TTM); if (!ret) { - dma_fence_signal(p->ef); - dma_fence_put(p->ef); - p->ef = NULL; - queue_delayed_work(kfd_restore_wq, &p->restore_work, - msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)); + /* If another thread already signaled the eviction fence, + * they are responsible stopping the queues and scheduling + * the restore work. + */ + if (signal_eviction_fence(p) || + mod_delayed_work(kfd_restore_wq, &p->restore_work, + msecs_to_jiffies(PROCESS_RESTORE_TIME_MS))) + kfd_process_restore_queues(p); pr_debug("Finished evicting pasid 0x%x\n", p->pasid); } else pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid); } +static int restore_process_helper(struct kfd_process *p) +{ + int ret = 0; + + /* VMs may not have been acquired yet during debugging. */ + if (p->kgd_process_info) { + ret = amdgpu_amdkfd_gpuvm_restore_process_bos( + p->kgd_process_info, &p->ef); + if (ret) + return ret; + } + + ret = kfd_process_restore_queues(p); + if (!ret) + pr_debug("Finished restoring pasid 0x%x\n", p->pasid); + else + pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid); + + return ret; +} + static void restore_process_worker(struct work_struct *work) { struct delayed_work *dwork; @@ -1971,24 +2007,15 @@ static void restore_process_worker(struct work_struct *work) */ p->last_restore_timestamp = get_jiffies_64(); - /* VMs may not have been acquired yet during debugging. */ - if (p->kgd_process_info) - ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info, - &p->ef); + + ret = restore_process_helper(p); if (ret) { pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n", p->pasid, PROCESS_BACK_OFF_TIME_MS); - ret = queue_delayed_work(kfd_restore_wq, &p->restore_work, - msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS)); - WARN(!ret, "reschedule restore work failed\n"); - return; + if (mod_delayed_work(kfd_restore_wq, &p->restore_work, + msecs_to_jiffies(PROCESS_RESTORE_TIME_MS))) + kfd_process_restore_queues(p); } - - ret = kfd_process_restore_queues(p); - if (!ret) - pr_debug("Finished restoring pasid 0x%x\n", p->pasid); - else - pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid); } void kfd_suspend_all_processes(void) @@ -1999,14 +2026,9 @@ void kfd_suspend_all_processes(void) WARN(debug_evictions, "Evicting all processes"); hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { - cancel_delayed_work_sync(&p->eviction_work); - flush_delayed_work(&p->restore_work); - if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND)) pr_err("Failed to suspend process 0x%x\n", p->pasid); - dma_fence_signal(p->ef); - dma_fence_put(p->ef); - p->ef = NULL; + signal_eviction_fence(p); } srcu_read_unlock(&kfd_processes_srcu, idx); } @@ -2018,7 +2040,7 @@ int kfd_resume_all_processes(void) int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu); hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { - if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) { + if (restore_process_helper(p)) { pr_err("Restore process %d failed during resume\n", p->pasid); ret = -EFAULT; @@ -2059,36 +2081,6 @@ int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process, KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot); } -void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type) -{ - struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv); - uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm); - struct kfd_node *dev = pdd->dev; - uint32_t xcc_mask = dev->xcc_mask; - int xcc = 0; - - /* - * It can be that we race and lose here, but that is extremely unlikely - * and the worst thing which could happen is that we flush the changes - * into the TLB once more which is harmless. - */ - if (atomic64_xchg(&pdd->tlb_seq, tlb_seq) == tlb_seq) - return; - - if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { - /* Nothing to flush until a VMID is assigned, which - * only happens when the first queue is created. - */ - if (pdd->qpd.vmid) - amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev, - pdd->qpd.vmid); - } else { - for_each_inst(xcc, xcc_mask) - amdgpu_amdkfd_flush_gpu_tlb_pasid( - dev->adev, pdd->process->pasid, type, xcc); - } -} - /* assumes caller holds process lock. */ int kfd_process_drain_interrupts(struct kfd_process_device *pdd) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 43eff221ea..4858112f9a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -95,6 +95,7 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd) int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, void *gws) { + struct mqd_update_info minfo = {0}; struct kfd_node *dev = NULL; struct process_queue_node *pqn; struct kfd_process_device *pdd; @@ -146,9 +147,10 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, } pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0; + minfo.update_flag = gws ? UPDATE_FLAG_IS_GWS : 0; return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, - pqn->q, NULL); + pqn->q, &minfo); } void kfd_process_dequeue_from_all_devices(struct kfd_process *p) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 9af1d09438..c50a0dc9c9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -198,6 +198,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n", addr[i] >> PAGE_SHIFT, page_to_pfn(page)); } + return 0; } @@ -349,6 +350,7 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start, INIT_LIST_HEAD(&prange->child_list); atomic_set(&prange->invalid, 0); prange->validate_timestamp = 0; + prange->vram_pages = 0; mutex_init(&prange->migrate_mutex); mutex_init(&prange->lock); @@ -395,6 +397,8 @@ static void svm_range_bo_release(struct kref *kref) prange->start, prange->last); mutex_lock(&prange->lock); prange->svm_bo = NULL; + /* prange should not hold vram page now */ + WARN_ONCE(prange->actual_loc, "prange should not hold vram page"); mutex_unlock(&prange->lock); spin_lock(&svm_bo->list_lock); @@ -873,14 +877,29 @@ static void svm_range_debug_dump(struct svm_range_list *svms) static void * svm_range_copy_array(void *psrc, size_t size, uint64_t num_elements, - uint64_t offset) + uint64_t offset, uint64_t *vram_pages) { + unsigned char *src = (unsigned char *)psrc + offset; unsigned char *dst; + uint64_t i; dst = kvmalloc_array(num_elements, size, GFP_KERNEL); if (!dst) return NULL; - memcpy(dst, (unsigned char *)psrc + offset, num_elements * size); + + if (!vram_pages) { + memcpy(dst, src, num_elements * size); + return (void *)dst; + } + + *vram_pages = 0; + for (i = 0; i < num_elements; i++) { + dma_addr_t *temp; + temp = (dma_addr_t *)dst + i; + *temp = *((dma_addr_t *)src + i); + if (*temp&SVM_RANGE_VRAM_DOMAIN) + (*vram_pages)++; + } return (void *)dst; } @@ -894,7 +913,7 @@ svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src) if (!src->dma_addr[i]) continue; dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i], - sizeof(*src->dma_addr[i]), src->npages, 0); + sizeof(*src->dma_addr[i]), src->npages, 0, NULL); if (!dst->dma_addr[i]) return -ENOMEM; } @@ -905,7 +924,7 @@ svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src) static int svm_range_split_array(void *ppnew, void *ppold, size_t size, uint64_t old_start, uint64_t old_n, - uint64_t new_start, uint64_t new_n) + uint64_t new_start, uint64_t new_n, uint64_t *new_vram_pages) { unsigned char *new, *old, *pold; uint64_t d; @@ -917,11 +936,12 @@ svm_range_split_array(void *ppnew, void *ppold, size_t size, return 0; d = (new_start - old_start) * size; - new = svm_range_copy_array(pold, size, new_n, d); + /* get dma addr array for new range and calculte its vram page number */ + new = svm_range_copy_array(pold, size, new_n, d, new_vram_pages); if (!new) return -ENOMEM; d = (new_start == old_start) ? new_n * size : 0; - old = svm_range_copy_array(pold, size, old_n, d); + old = svm_range_copy_array(pold, size, old_n, d, NULL); if (!old) { kvfree(new); return -ENOMEM; @@ -943,10 +963,13 @@ svm_range_split_pages(struct svm_range *new, struct svm_range *old, for (i = 0; i < MAX_GPU_INSTANCE; i++) { r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i], sizeof(*old->dma_addr[i]), old->start, - npages, new->start, new->npages); + npages, new->start, new->npages, + old->actual_loc ? &new->vram_pages : NULL); if (r) return r; } + if (old->actual_loc) + old->vram_pages -= new->vram_pages; return 0; } @@ -1092,7 +1115,7 @@ static int svm_range_split_tail(struct svm_range *prange, uint64_t new_last, struct list_head *insert_list, struct list_head *remap_list) { - struct svm_range *tail; + struct svm_range *tail = NULL; int r = svm_range_split(prange, prange->start, new_last, &tail); if (!r) { @@ -1107,7 +1130,7 @@ static int svm_range_split_head(struct svm_range *prange, uint64_t new_start, struct list_head *insert_list, struct list_head *remap_list) { - struct svm_range *head; + struct svm_range *head = NULL; int r = svm_range_split(prange, new_start, prange->last, &head); if (!r) { @@ -1130,66 +1153,6 @@ svm_range_add_child(struct svm_range *prange, struct mm_struct *mm, list_add_tail(&pchild->child_list, &prange->child_list); } -/** - * svm_range_split_by_granularity - collect ranges within granularity boundary - * - * @p: the process with svms list - * @mm: mm structure - * @addr: the vm fault address in pages, to split the prange - * @parent: parent range if prange is from child list - * @prange: prange to split - * - * Trims @prange to be a single aligned block of prange->granularity if - * possible. The head and tail are added to the child_list in @parent. - * - * Context: caller must hold mmap_read_lock and prange->lock - * - * Return: - * 0 - OK, otherwise error code - */ -int -svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, - unsigned long addr, struct svm_range *parent, - struct svm_range *prange) -{ - struct svm_range *head, *tail; - unsigned long start, last, size; - int r; - - /* Align splited range start and size to granularity size, then a single - * PTE will be used for whole range, this reduces the number of PTE - * updated and the L1 TLB space used for translation. - */ - size = 1UL << prange->granularity; - start = ALIGN_DOWN(addr, size); - last = ALIGN(addr + 1, size) - 1; - - pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n", - prange->svms, prange->start, prange->last, start, last, size); - - if (start > prange->start) { - r = svm_range_split(prange, start, prange->last, &head); - if (r) - return r; - svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE); - } - - if (last < prange->last) { - r = svm_range_split(prange, prange->start, last, &tail); - if (r) - return r; - svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE); - } - - /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */ - if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) { - prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP; - pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n", - prange, prange->start, prange->last, - SVM_OP_ADD_RANGE_AND_MAP); - } - return 0; -} static bool svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b) { @@ -1524,7 +1487,7 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr) uint32_t gpuidx; int r; - drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0); + drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0, 0); drm_exec_until_all_locked(&ctx->exec) { for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) { pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx); @@ -1609,6 +1572,7 @@ static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx) * 5. Release page table (and SVM BO) reservation */ static int svm_range_validate_and_map(struct mm_struct *mm, + unsigned long map_start, unsigned long map_last, struct svm_range *prange, int32_t gpuidx, bool intr, bool wait, bool flush_tlb) { @@ -1689,10 +1653,12 @@ static int svm_range_validate_and_map(struct mm_struct *mm, } } - start = prange->start << PAGE_SHIFT; - end = (prange->last + 1) << PAGE_SHIFT; + start = map_start << PAGE_SHIFT; + end = (map_last + 1) << PAGE_SHIFT; for (addr = start; !r && addr < end; ) { struct hmm_range *hmm_range; + unsigned long map_start_vma; + unsigned long map_last_vma; struct vm_area_struct *vma; unsigned long next = 0; unsigned long offset; @@ -1720,7 +1686,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm, } if (!r) { - offset = (addr - start) >> PAGE_SHIFT; + offset = (addr >> PAGE_SHIFT) - prange->start; r = svm_range_dma_map(prange, ctx->bitmap, offset, npages, hmm_range->hmm_pfns); if (r) @@ -1738,9 +1704,16 @@ static int svm_range_validate_and_map(struct mm_struct *mm, r = -EAGAIN; } - if (!r) - r = svm_range_map_to_gpus(prange, offset, npages, readonly, - ctx->bitmap, wait, flush_tlb); + if (!r) { + map_start_vma = max(map_start, prange->start + offset); + map_last_vma = min(map_last, prange->start + offset + npages - 1); + if (map_start_vma <= map_last_vma) { + offset = map_start_vma - prange->start; + npages = map_last_vma - map_start_vma + 1; + r = svm_range_map_to_gpus(prange, offset, npages, readonly, + ctx->bitmap, wait, flush_tlb); + } + } if (!r && next == end) prange->mapped_to_gpu = true; @@ -1833,8 +1806,8 @@ static void svm_range_restore_work(struct work_struct *work) */ mutex_lock(&prange->migrate_mutex); - r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE, - false, true, false); + r = svm_range_validate_and_map(mm, prange->start, prange->last, prange, + MAX_GPU_INSTANCE, false, true, false); if (r) pr_debug("failed %d to map 0x%lx to gpus\n", r, prange->start); @@ -1871,7 +1844,7 @@ out_reschedule: /* If validation failed, reschedule another attempt */ if (evicted_ranges) { pr_debug("reschedule to restore svm range\n"); - schedule_delayed_work(&svms->restore_work, + queue_delayed_work(system_freezable_wq, &svms->restore_work, msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS)); kfd_smi_event_queue_restore_rescheduled(mm); @@ -1947,7 +1920,7 @@ svm_range_evict(struct svm_range *prange, struct mm_struct *mm, pr_debug("failed to quiesce KFD\n"); pr_debug("schedule to restore svm %p ranges\n", svms); - schedule_delayed_work(&svms->restore_work, + queue_delayed_work(system_freezable_wq, &svms->restore_work, msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS)); } else { unsigned long s, l; @@ -2002,6 +1975,7 @@ static struct svm_range *svm_range_clone(struct svm_range *old) new->actual_loc = old->actual_loc; new->granularity = old->granularity; new->mapped_to_gpu = old->mapped_to_gpu; + new->vram_pages = old->vram_pages; bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE); bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE); @@ -2911,6 +2885,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, uint32_t vmid, uint32_t node_id, uint64_t addr, bool write_fault) { + unsigned long start, last, size; struct mm_struct *mm = NULL; struct svm_range_list *svms; struct svm_range *prange; @@ -3046,40 +3021,44 @@ retry_write_locked: kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr, write_fault, timestamp); - if (prange->actual_loc != best_loc) { + /* Align migration range start and size to granularity size */ + size = 1UL << prange->granularity; + start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start); + last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last); + if (prange->actual_loc != 0 || best_loc != 0) { migration = true; + if (best_loc) { - r = svm_migrate_to_vram(prange, best_loc, mm, - KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU); + r = svm_migrate_to_vram(prange, best_loc, start, last, + mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU); if (r) { pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n", r, addr); /* Fallback to system memory if migration to * VRAM failed */ - if (prange->actual_loc) - r = svm_migrate_vram_to_ram(prange, mm, - KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, - NULL); + if (prange->actual_loc && prange->actual_loc != best_loc) + r = svm_migrate_vram_to_ram(prange, mm, start, last, + KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL); else r = 0; } } else { - r = svm_migrate_vram_to_ram(prange, mm, - KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, - NULL); + r = svm_migrate_vram_to_ram(prange, mm, start, last, + KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL); } if (r) { pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n", - r, svms, prange->start, prange->last); + r, svms, start, last); goto out_unlock_range; } } - r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false); + r = svm_range_validate_and_map(mm, start, last, prange, gpuidx, false, + false, false); if (r) pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n", - r, svms, prange->start, prange->last); + r, svms, start, last); kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr, migration); @@ -3425,18 +3404,24 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange, *migrated = false; best_loc = svm_range_best_prefetch_location(prange); - if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED || - best_loc == prange->actual_loc) + /* when best_loc is a gpu node and same as prange->actual_loc + * we still need do migration as prange->actual_loc !=0 does + * not mean all pages in prange are vram. hmm migrate will pick + * up right pages during migration. + */ + if ((best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) || + (best_loc == 0 && prange->actual_loc == 0)) return 0; if (!best_loc) { - r = svm_migrate_vram_to_ram(prange, mm, + r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last, KFD_MIGRATE_TRIGGER_PREFETCH, NULL); *migrated = !r; return r; } - r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH); + r = svm_migrate_to_vram(prange, best_loc, prange->start, prange->last, + mm, KFD_MIGRATE_TRIGGER_PREFETCH); *migrated = !r; return r; @@ -3490,7 +3475,11 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work) mutex_lock(&prange->migrate_mutex); do { + /* migrate all vram pages in this prange to sys ram + * after that prange->actual_loc should be zero + */ r = svm_migrate_vram_to_ram(prange, mm, + prange->start, prange->last, KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL); } while (!r && prange->actual_loc && --retries); @@ -3614,8 +3603,8 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm, flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu; - r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE, - true, true, flush_tlb); + r = svm_range_validate_and_map(mm, prange->start, prange->last, prange, + MAX_GPU_INSTANCE, true, true, flush_tlb); if (r) pr_debug("failed %d to map svm range\n", r); @@ -3629,8 +3618,8 @@ out_unlock_range: pr_debug("Remapping prange 0x%p [0x%lx 0x%lx]\n", prange, prange->start, prange->last); mutex_lock(&prange->migrate_mutex); - r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE, - true, true, prange->mapped_to_gpu); + r = svm_range_validate_and_map(mm, prange->start, prange->last, prange, + MAX_GPU_INSTANCE, true, true, prange->mapped_to_gpu); if (r) pr_debug("failed %d on remap svm range\n", r); mutex_unlock(&prange->migrate_mutex); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index c528df1d0b..026863a0ab 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -78,6 +78,7 @@ struct svm_work_list_item { * @update_list:link list node used to add to update_list * @mapping: bo_va mapping structure to create and update GPU page table * @npages: number of pages + * @vram_pages: vram pages number in this svm_range * @dma_addr: dma mapping address on each GPU for system memory physical page * @ttm_res: vram ttm resource map * @offset: range start offset within mm_nodes @@ -88,7 +89,9 @@ struct svm_work_list_item { * @flags: flags defined as KFD_IOCTL_SVM_FLAG_* * @perferred_loc: perferred location, 0 for CPU, or GPU id * @perfetch_loc: last prefetch location, 0 for CPU, or GPU id - * @actual_loc: the actual location, 0 for CPU, or GPU id + * @actual_loc: this svm_range location. 0: all pages are from sys ram; + * GPU id: this svm_range may include vram pages from GPU with + * id actual_loc. * @granularity:migration granularity, log2 num pages * @invalid: not 0 means cpu page table is invalidated * @validate_timestamp: system timestamp when range is validated @@ -112,6 +115,7 @@ struct svm_range { struct list_head list; struct list_head update_list; uint64_t npages; + uint64_t vram_pages; dma_addr_t *dma_addr[MAX_GPU_INSTANCE]; struct ttm_resource *ttm_res; uint64_t offset; @@ -168,9 +172,6 @@ struct kfd_node *svm_range_get_node_by_id(struct svm_range *prange, int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, bool clear); void svm_range_vram_node_free(struct svm_range *prange); -int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, - unsigned long addr, struct svm_range *parent, - struct svm_range *prange); int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, uint32_t vmid, uint32_t node_id, uint64_t addr, bool write_fault); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index e5f7c92eeb..6ed2ec381a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1638,12 +1638,10 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext, else mode = UNKNOWN_MEMORY_PARTITION_MODE; - if (pcache->cache_level == 2) - pcache->cache_size = pcache_info[cache_type].cache_size * num_xcc; - else if (mode) - pcache->cache_size = pcache_info[cache_type].cache_size / mode; - else - pcache->cache_size = pcache_info[cache_type].cache_size; + pcache->cache_size = pcache_info[cache_type].cache_size; + /* Partition mode only affects L3 cache size */ + if (mode && pcache->cache_level == 3) + pcache->cache_size /= mode; if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE) pcache->cache_type |= HSA_CACHE_TYPE_DATA; diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile index af17ab8027..92a5c5efcf 100644 --- a/drivers/gpu/drm/amd/display/Makefile +++ b/drivers/gpu/drm/amd/display/Makefile @@ -30,6 +30,9 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/hw subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/clk_mgr subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/hwss +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/resource +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dsc +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/optc subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile index 8bf94920d2..ab2a97e354 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile @@ -25,22 +25,25 @@ +ifneq ($(CONFIG_DRM_AMD_DC),) AMDGPUDM = \ amdgpu_dm.o \ amdgpu_dm_plane.o \ amdgpu_dm_crtc.o \ amdgpu_dm_irq.o \ amdgpu_dm_mst_types.o \ - amdgpu_dm_color.o + amdgpu_dm_color.o \ + amdgpu_dm_services.o \ + amdgpu_dm_helpers.o \ + amdgpu_dm_pp_smu.o \ + amdgpu_dm_psr.o \ + amdgpu_dm_replay.o \ + amdgpu_dm_wb.o ifdef CONFIG_DRM_AMD_DC_FP AMDGPUDM += dc_fpu.o endif -ifneq ($(CONFIG_DRM_AMD_DC),) -AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o amdgpu_dm_psr.o amdgpu_dm_replay.o -endif - AMDGPUDM += amdgpu_dm_hdcp.o ifneq ($(CONFIG_DEBUG_FS),) @@ -52,3 +55,4 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM)) AMD_DISPLAY_FILES += $(AMDGPU_DM) +endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index dafe9562a7..718e533ab4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -37,6 +37,7 @@ #include "dc/dc_dmub_srv.h" #include "dc/dc_edid_parser.h" #include "dc/dc_stat.h" +#include "dc/dc_state.h" #include "amdgpu_dm_trace.h" #include "dpcd_defs.h" #include "link/protocols/link_dpcd.h" @@ -54,6 +55,7 @@ #include "amdgpu_dm_crtc.h" #include "amdgpu_dm_hdcp.h" #include <drm/display/drm_hdcp_helper.h> +#include "amdgpu_dm_wb.h" #include "amdgpu_pm.h" #include "amdgpu_atombios.h" @@ -84,12 +86,13 @@ #include <drm/drm_atomic_uapi.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> +#include <drm/drm_fixed.h> #include <drm/drm_fourcc.h> #include <drm/drm_edid.h> +#include <drm/drm_eld.h> #include <drm/drm_vblank.h> #include <drm/drm_audio_component.h> #include <drm/drm_gem_atomic_helper.h> -#include <drm/drm_plane_helper.h> #include <acpi/video.h> @@ -269,6 +272,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, { u32 v_blank_start, v_blank_end, h_position, v_position; struct amdgpu_crtc *acrtc = NULL; + struct dc *dc = adev->dm.dc; if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) return -EINVAL; @@ -281,6 +285,9 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, return 0; } + if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed) + dc_allow_idle_optimizations(dc, false); + /* * TODO rework base driver to use values directly. * for now parse it back into reg-format @@ -574,6 +581,7 @@ static void dm_crtc_high_irq(void *interrupt_params) { struct common_irq_params *irq_params = interrupt_params; struct amdgpu_device *adev = irq_params->adev; + struct drm_writeback_job *job; struct amdgpu_crtc *acrtc; unsigned long flags; int vrr_active; @@ -582,6 +590,33 @@ static void dm_crtc_high_irq(void *interrupt_params) if (!acrtc) return; + if (acrtc->wb_pending) { + if (acrtc->wb_conn) { + spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags); + job = list_first_entry_or_null(&acrtc->wb_conn->job_queue, + struct drm_writeback_job, + list_entry); + spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags); + + if (job) { + unsigned int v_total, refresh_hz; + struct dc_stream_state *stream = acrtc->dm_irq_params.stream; + + v_total = stream->adjust.v_total_max ? + stream->adjust.v_total_max : stream->timing.v_total; + refresh_hz = div_u64((uint64_t) stream->timing.pix_clk_100hz * + 100LL, (v_total * stream->timing.h_total)); + mdelay(1000 / refresh_hz); + + drm_writeback_signal_completion(acrtc->wb_conn, 0); + dc_stream_fc_disable_writeback(adev->dm.dc, + acrtc->dm_irq_params.stream, 0); + } + } else + DRM_ERROR("%s: no amdgpu_crtc wb_conn\n", __func__); + acrtc->wb_pending = false; + } + vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); drm_dbg_vbl(adev_to_drm(adev), @@ -724,6 +759,10 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (link && aconnector->dc_link == link) { if (notify->type == DMUB_NOTIFICATION_HPD) @@ -893,8 +932,7 @@ static int dm_early_init(void *handle); /* Allocate memory for FBC compressed data */ static void amdgpu_dm_fbc_init(struct drm_connector *connector) { - struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_device *adev = drm_to_adev(connector->dev); struct dm_compressor_info *compressor = &adev->dm.compressor; struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector); struct drm_display_mode *mode; @@ -948,6 +986,10 @@ static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (aconnector->audio_inst != port) continue; @@ -988,8 +1030,7 @@ static int amdgpu_dm_audio_component_bind(struct device *kdev, static void amdgpu_dm_audio_component_unbind(struct device *kdev, struct device *hda_kdev, void *data) { - struct drm_device *dev = dev_get_drvdata(kdev); - struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_device *adev = drm_to_adev(dev_get_drvdata(kdev)); struct drm_audio_component *acomp = data; acomp->ops = NULL; @@ -1678,6 +1719,15 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0]; init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0]; + if (amdgpu_dc_debug_mask & DC_DISABLE_IPS) + init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL; + + init_data.flags.disable_ips_in_vpb = 1; + + /* Enable DWB for tested platforms only */ + if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) + init_data.num_virtual_links = 1; + INIT_LIST_HEAD(&adev->dm.da_list); retrieve_dmi_info(&adev->dm); @@ -1720,23 +1770,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ adev->dm.dc->debug.ignore_cable_id = true; - /* TODO: There is a new drm mst change where the freedom of - * vc_next_start_slot update is revoked/moved into drm, instead of in - * driver. This forces us to make sure to get vc_next_start_slot updated - * in drm function each time without considering if mst_state is active - * or not. Otherwise, next time hotplug will give wrong start_slot - * number. We are implementing a temporary solution to even notify drm - * mst deallocation when link is no longer of MST type when uncommitting - * the stream so we will have more time to work on a proper solution. - * Ideally when dm_helpers_dp_mst_stop_top_mgr message is triggered, we - * should notify drm to do a complete "reset" of its states and stop - * calling further drm mst functions when link is no longer of an MST - * type. This could happen when we unplug an MST hubs/displays. When - * uncommit stream comes later after unplug, we should just reset - * hardware states only. - */ - adev->dm.dc->debug.temp_mst_deallocation_sequence = true; - if (adev->dm.dc->caps.dp_hdmi21_pcon_support) DRM_INFO("DP-HDMI FRL PCON supported\n"); @@ -1912,7 +1945,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) &adev->dm.dmub_bo_gpu_addr, &adev->dm.dmub_bo_cpu_addr); - if (adev->dm.hpd_rx_offload_wq) { + if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) { for (i = 0; i < adev->dm.dc->caps.max_links; i++) { if (adev->dm.hpd_rx_offload_wq[i].wq) { destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); @@ -2262,6 +2295,10 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev) drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type == dc_connection_mst_branch && aconnector->mst_mgr.aux) { @@ -2390,6 +2427,10 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type != dc_connection_mst_branch || aconnector->mst_root) @@ -2569,12 +2610,10 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) memset(del_streams, 0, sizeof(del_streams)); - context = dc_create_state(dc); + context = dc_state_create_current_copy(dc); if (context == NULL) goto context_alloc_fail; - dc_resource_state_copy_construct_current(dc, context); - /* First remove from context all streams */ for (i = 0; i < context->stream_count; i++) { struct dc_stream_state *stream = context->streams[i]; @@ -2584,12 +2623,12 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) /* Remove all planes for removed streams and then remove the streams */ for (i = 0; i < del_streams_count; i++) { - if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) { + if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) { res = DC_FAIL_DETACH_SURFACES; goto fail; } - res = dc_remove_stream_from_ctx(dc, context, del_streams[i]); + res = dc_state_remove_stream(dc, context, del_streams[i]); if (res != DC_OK) goto fail; } @@ -2597,7 +2636,7 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) res = dc_commit_streams(dc, context->streams, context->stream_count); fail: - dc_release_state(context); + dc_state_release(context); context_alloc_fail: return res; @@ -2624,7 +2663,7 @@ static int dm_suspend(void *handle) dc_allow_idle_optimizations(adev->dm.dc, false); - dm->cached_dc_state = dc_copy_state(dm->dc->current_state); + dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state); dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); @@ -2871,7 +2910,7 @@ static int dm_resume(void *handle) dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true); - dc_release_state(dm->cached_dc_state); + dc_state_release(dm->cached_dc_state); dm->cached_dc_state = NULL; amdgpu_dm_irq_resume_late(adev); @@ -2881,10 +2920,9 @@ static int dm_resume(void *handle) return 0; } /* Recreate dc_state - DC invalidates it when setting power state to S3. */ - dc_release_state(dm_state->context); - dm_state->context = dc_create_state(dm->dc); + dc_state_release(dm_state->context); + dm_state->context = dc_state_create(dm->dc); /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ - dc_resource_state_construct(dm->dc, dm_state->context); /* Before powering on DC we need to re-initialize DMUB. */ dm_dmub_hw_resume(adev); @@ -2914,6 +2952,10 @@ static int dm_resume(void *handle) /* Do detection*/ drm_connector_list_iter_begin(ddev, &iter); drm_for_each_connector_iter(connector, &iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (!aconnector->dc_link) @@ -3495,6 +3537,9 @@ static void register_hpd_handlers(struct amdgpu_device *adev) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); dc_link = aconnector->dc_link; @@ -3957,7 +4002,7 @@ dm_atomic_duplicate_state(struct drm_private_obj *obj) old_state = to_dm_atomic_state(obj->state); if (old_state && old_state->context) - new_state->context = dc_copy_state(old_state->context); + new_state->context = dc_state_create_copy(old_state->context); if (!new_state->context) { kfree(new_state); @@ -3973,7 +4018,7 @@ static void dm_atomic_destroy_state(struct drm_private_obj *obj, struct dm_atomic_state *dm_state = to_dm_atomic_state(state); if (dm_state && dm_state->context) - dc_release_state(dm_state->context); + dc_state_release(dm_state->context); kfree(dm_state); } @@ -4009,14 +4054,12 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) if (!state) return -ENOMEM; - state->context = dc_create_state(adev->dm.dc); + state->context = dc_state_create_current_copy(adev->dm.dc); if (!state->context) { kfree(state); return -ENOMEM; } - dc_resource_state_copy_construct_current(adev->dm.dc, state->context); - drm_atomic_private_obj_init(adev_to_drm(adev), &adev->dm.atomic_obj, &state->base, @@ -4024,14 +4067,19 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) r = amdgpu_display_modeset_create_props(adev); if (r) { - dc_release_state(state->context); + dc_state_release(state->context); kfree(state); return r; } +#ifdef AMD_PRIVATE_COLOR + if (amdgpu_dm_create_color_properties(adev)) + return -ENOMEM; +#endif + r = amdgpu_dm_audio_init(adev); if (r) { - dc_release_state(state->context); + dc_state_release(state->context); kfree(state); return r; } @@ -4467,6 +4515,28 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) continue; } + link = dc_get_link_at_index(dm->dc, i); + + if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) { + struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL); + + if (!wbcon) { + DRM_ERROR("KMS: Failed to allocate writeback connector\n"); + continue; + } + + if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) { + DRM_ERROR("KMS: Failed to initialize writeback connector\n"); + kfree(wbcon); + continue; + } + + link->psr_settings.psr_feature_enabled = false; + link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; + + continue; + } + aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); if (!aconnector) goto fail; @@ -4485,8 +4555,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) goto fail; } - link = dc_get_link_at_index(dm->dc, i); - if (dm->hpd_rx_offload_wq) dm->hpd_rx_offload_wq[aconnector->base.index].aconnector = aconnector; @@ -5089,7 +5157,9 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, * Always set input transfer function, since plane state is refreshed * every time. */ - ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state); + ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, + plane_state, + dc_plane_state); if (ret) return ret; @@ -5503,10 +5573,13 @@ static void fill_stream_properties_from_drm_display_mode( { struct dc_crtc_timing *timing_out = &stream->timing; const struct drm_display_info *info = &connector->display_info; - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct amdgpu_dm_connector *aconnector = NULL; struct hdmi_vendor_infoframe hv_frame; struct hdmi_avi_infoframe avi_frame; + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) + aconnector = to_amdgpu_dm_connector(connector); + memset(&hv_frame, 0, sizeof(hv_frame)); memset(&avi_frame, 0, sizeof(avi_frame)); @@ -5676,13 +5749,13 @@ decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, } static struct dc_sink * -create_fake_sink(struct amdgpu_dm_connector *aconnector) +create_fake_sink(struct dc_link *link) { struct dc_sink_init_data sink_init_data = { 0 }; struct dc_sink *sink = NULL; - sink_init_data.link = aconnector->dc_link; - sink_init_data.sink_signal = aconnector->dc_link->connector_signal; + sink_init_data.link = link; + sink_init_data.sink_signal = link->connector_signal; sink = dc_sink_create(&sink_init_data); if (!sink) { @@ -6053,6 +6126,7 @@ create_stream_for_sink(struct drm_connector *connector, enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN; struct dsc_dec_dpcd_caps dsc_caps; + struct dc_link *link = NULL; struct dc_sink *sink = NULL; drm_mode_init(&mode, drm_mode); @@ -6066,14 +6140,24 @@ create_stream_for_sink(struct drm_connector *connector, if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) { aconnector = NULL; aconnector = to_amdgpu_dm_connector(connector); - if (!aconnector->dc_sink) { - sink = create_fake_sink(aconnector); - if (!sink) - return stream; - } else { - sink = aconnector->dc_sink; - dc_sink_retain(sink); - } + link = aconnector->dc_link; + } else { + struct drm_writeback_connector *wbcon = NULL; + struct amdgpu_dm_wb_connector *dm_wbcon = NULL; + + wbcon = drm_connector_to_writeback(connector); + dm_wbcon = to_amdgpu_dm_wb_connector(wbcon); + link = dm_wbcon->link; + } + + if (!aconnector || !aconnector->dc_sink) { + sink = create_fake_sink(link); + if (!sink) + return stream; + + } else { + sink = aconnector->dc_sink; + dc_sink_retain(sink); } stream = dc_create_stream_for_sink(sink); @@ -6173,19 +6257,16 @@ create_stream_for_sink(struct drm_connector *connector, if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); - if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) { + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || + stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || + stream->signal == SIGNAL_TYPE_EDP) { // // should decide stream support vsc sdp colorimetry capability // before building vsc info packet // - stream->use_vsc_sdp_for_colorimetry = false; - if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - stream->use_vsc_sdp_for_colorimetry = - aconnector->dc_sink->is_vsc_sdp_colorimetry_supported; - } else { - if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) - stream->use_vsc_sdp_for_colorimetry = true; - } + stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && + stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED; + if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) tf = TRANSFER_FUNC_GAMMA_22; mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); @@ -6455,7 +6536,7 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) struct edid *edid; struct i2c_adapter *ddc; - if (dc_link->aux_mode) + if (dc_link && dc_link->aux_mode) ddc = &aconnector->dm_dp_aux.aux.ddc; else ddc = &aconnector->i2c->base; @@ -6581,7 +6662,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc, if (!dc_plane_state) goto cleanup; - dc_state = dc_create_state(dc); + dc_state = dc_state_create(dc); if (!dc_state) goto cleanup; @@ -6608,9 +6689,9 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc, dc_result = dc_validate_plane(dc, dc_plane_state); if (dc_result == DC_OK) - dc_result = dc_add_stream_to_ctx(dc, dc_state, stream); + dc_result = dc_state_add_stream(dc, dc_state, stream); - if (dc_result == DC_OK && !dc_add_plane_to_context( + if (dc_result == DC_OK && !dc_state_add_plane( dc, stream, dc_plane_state, @@ -6622,7 +6703,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc, cleanup: if (dc_state) - dc_release_state(dc_state); + dc_state_release(dc_state); if (dc_plane_state) dc_plane_state_release(dc_plane_state); @@ -6930,7 +7011,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, if (IS_ERR(mst_state)) return PTR_ERR(mst_state); - mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link); + mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link)); if (!state->duplicated) { int max_bpc = conn_state->max_requested_bpc; @@ -6974,6 +7055,9 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, for_each_new_connector_in_state(state, connector, new_con_state, i) { + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (!aconnector->mst_output_port) @@ -7553,7 +7637,6 @@ create_i2c(struct ddc_service *ddc_service, if (!i2c) return NULL; i2c->base.owner = THIS_MODULE; - i2c->base.class = I2C_CLASS_DDC; i2c->base.dev.parent = &adev->pdev->dev; i2c->base.algo = &amdgpu_dm_i2c_algo; snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); @@ -7579,6 +7662,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, struct dc_link *link = dc_get_link_at_index(dc, link_index); struct amdgpu_i2c_adapter *i2c; + /* Not needed for writeback connector */ link->priv = aconnector; @@ -8189,6 +8273,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; + bundle->surface_updates[planes_count].hdr_mult = dc_plane->hdr_mult; + bundle->surface_updates[planes_count].func_shaper = dc_plane->in_shaper_func; + bundle->surface_updates[planes_count].lut3d_func = dc_plane->lut3d_func; + bundle->surface_updates[planes_count].blend_tf = dc_plane->blend_tf; } amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state, @@ -8402,6 +8490,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, &acrtc_state->stream->csc_color_matrix; bundle->stream_update.out_transfer_func = acrtc_state->stream->out_transfer_func; + bundle->stream_update.lut3d_func = + (struct dc_3dlut *) acrtc_state->stream->lut3d_func; + bundle->stream_update.func_shaper = + (struct dc_transfer_func *) acrtc_state->stream->func_shaper; } acrtc_state->stream->abm_level = acrtc_state->abm_level; @@ -8535,6 +8627,9 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev, if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) continue; + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + notify: aconnector = to_amdgpu_dm_connector(connector); @@ -8568,6 +8663,9 @@ notify: if (!status) continue; + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); mutex_lock(&adev->dm.audio_lock); @@ -8593,6 +8691,12 @@ static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_stat stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); } +static void dm_clear_writeback(struct amdgpu_display_manager *dm, + struct dm_crtc_state *crtc_state) +{ + dc_stream_remove_writeback(dm->dc, crtc_state->stream, 0); +} + static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, struct dc_state *dc_state) { @@ -8602,9 +8706,38 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; + struct drm_connector_state *old_con_state; + struct drm_connector *connector; bool mode_set_reset_required = false; u32 i; + /* Disable writeback */ + for_each_old_connector_in_state(state, connector, old_con_state, i) { + struct dm_connector_state *dm_old_con_state; + struct amdgpu_crtc *acrtc; + + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + old_crtc_state = NULL; + + dm_old_con_state = to_dm_connector_state(old_con_state); + if (!dm_old_con_state->base.crtc) + continue; + + acrtc = to_amdgpu_crtc(dm_old_con_state->base.crtc); + if (acrtc) + old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); + + if (!acrtc->wb_enabled) + continue; + + dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + + dm_clear_writeback(dm, dm_old_crtc_state); + acrtc->wb_enabled = false; + } + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); @@ -8729,7 +8862,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, dc_stream_get_status(dm_new_crtc_state->stream); if (!status) - status = dc_stream_get_status_from_state(dc_state, + status = dc_state_get_stream_status(dc_state, dm_new_crtc_state->stream); if (!status) drm_err(dev, @@ -8741,6 +8874,105 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, } } +static void dm_set_writeback(struct amdgpu_display_manager *dm, + struct dm_crtc_state *crtc_state, + struct drm_connector *connector, + struct drm_connector_state *new_con_state) +{ + struct drm_writeback_connector *wb_conn = drm_connector_to_writeback(connector); + struct amdgpu_device *adev = dm->adev; + struct amdgpu_crtc *acrtc; + struct dc_writeback_info *wb_info; + struct pipe_ctx *pipe = NULL; + struct amdgpu_framebuffer *afb; + int i = 0; + + wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL); + if (!wb_info) { + DRM_ERROR("Failed to allocate wb_info\n"); + return; + } + + acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc); + if (!acrtc) { + DRM_ERROR("no amdgpu_crtc found\n"); + kfree(wb_info); + return; + } + + afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb); + if (!afb) { + DRM_ERROR("No amdgpu_framebuffer found\n"); + kfree(wb_info); + return; + } + + for (i = 0; i < MAX_PIPES; i++) { + if (dm->dc->current_state->res_ctx.pipe_ctx[i].stream == crtc_state->stream) { + pipe = &dm->dc->current_state->res_ctx.pipe_ctx[i]; + break; + } + } + + /* fill in wb_info */ + wb_info->wb_enabled = true; + + wb_info->dwb_pipe_inst = 0; + wb_info->dwb_params.dwbscl_black_color = 0; + wb_info->dwb_params.hdr_mult = 0x1F000; + wb_info->dwb_params.csc_params.gamut_adjust_type = CM_GAMUT_ADJUST_TYPE_BYPASS; + wb_info->dwb_params.csc_params.gamut_coef_format = CM_GAMUT_REMAP_COEF_FORMAT_S2_13; + wb_info->dwb_params.output_depth = DWB_OUTPUT_PIXEL_DEPTH_10BPC; + wb_info->dwb_params.cnv_params.cnv_out_bpc = DWB_CNV_OUT_BPC_10BPC; + + /* width & height from crtc */ + wb_info->dwb_params.cnv_params.src_width = acrtc->base.mode.crtc_hdisplay; + wb_info->dwb_params.cnv_params.src_height = acrtc->base.mode.crtc_vdisplay; + wb_info->dwb_params.dest_width = acrtc->base.mode.crtc_hdisplay; + wb_info->dwb_params.dest_height = acrtc->base.mode.crtc_vdisplay; + + wb_info->dwb_params.cnv_params.crop_en = false; + wb_info->dwb_params.stereo_params.stereo_enabled = false; + + wb_info->dwb_params.cnv_params.out_max_pix_val = 0x3ff; // 10 bits + wb_info->dwb_params.cnv_params.out_min_pix_val = 0; + wb_info->dwb_params.cnv_params.fc_out_format = DWB_OUT_FORMAT_32BPP_ARGB; + wb_info->dwb_params.cnv_params.out_denorm_mode = DWB_OUT_DENORM_BYPASS; + + wb_info->dwb_params.out_format = dwb_scaler_mode_bypass444; + + wb_info->dwb_params.capture_rate = dwb_capture_rate_0; + + wb_info->dwb_params.scaler_taps.h_taps = 4; + wb_info->dwb_params.scaler_taps.v_taps = 4; + wb_info->dwb_params.scaler_taps.h_taps_c = 2; + wb_info->dwb_params.scaler_taps.v_taps_c = 2; + wb_info->dwb_params.subsample_position = DWB_INTERSTITIAL_SUBSAMPLING; + + wb_info->mcif_buf_params.luma_pitch = afb->base.pitches[0]; + wb_info->mcif_buf_params.chroma_pitch = afb->base.pitches[1]; + + for (i = 0; i < DWB_MCIF_BUF_COUNT; i++) { + wb_info->mcif_buf_params.luma_address[i] = afb->address; + wb_info->mcif_buf_params.chroma_address[i] = 0; + } + + wb_info->mcif_buf_params.p_vmid = 1; + if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) { + wb_info->mcif_warmup_params.start_address.quad_part = afb->address; + wb_info->mcif_warmup_params.region_size = + wb_info->mcif_buf_params.luma_pitch * wb_info->dwb_params.dest_height; + } + wb_info->mcif_warmup_params.p_vmid = 1; + wb_info->writeback_source_plane = pipe->plane_state; + + dc_stream_add_writeback(dm->dc, crtc_state->stream, wb_info); + + acrtc->wb_pending = true; + acrtc->wb_conn = wb_conn; + drm_writeback_queue_job(wb_conn, new_con_state); +} + /** * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. * @state: The atomic state to commit @@ -8768,16 +9000,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) trace_amdgpu_dm_atomic_commit_tail_begin(state); - if (dm->dc->caps.ips_support) { - for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { - if (new_con_state->crtc && - new_con_state->crtc->state->active && - drm_atomic_crtc_needs_modeset(new_con_state->crtc->state)) { - dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false); - break; - } - } - } + if (dm->dc->caps.ips_support && dm->dc->idle_optimizations_allowed) + dc_allow_idle_optimizations(dm->dc, false); drm_atomic_helper_update_legacy_modeset_state(dev, state); drm_dp_mst_atomic_wait_for_dependencies(state); @@ -8791,7 +9015,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct amdgpu_dm_connector *aconnector; + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + aconnector = to_amdgpu_dm_connector(connector); if (!adev->dm.hdcp_workqueue) continue; @@ -8975,6 +9204,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) * To fix this, DC should permit updating only stream properties. */ dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC); + if (!dummy_updates) { + DRM_ERROR("Failed to allocate memory for dummy_updates.\n"); + continue; + } for (j = 0; j < status->plane_count; j++) dummy_updates[j].surface = status->plane_states[0]; @@ -9068,6 +9301,31 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank); } + /* Enable writeback */ + for_each_new_connector_in_state(state, connector, new_con_state, i) { + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); + + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + if (!new_con_state->writeback_job) + continue; + + new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); + + if (!new_crtc_state) + continue; + + if (acrtc->wb_enabled) + continue; + + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + + dm_set_writeback(dm, dm_new_crtc_state, connector, new_con_state); + acrtc->wb_enabled = true; + } + /* Update audio instances for each connector. */ amdgpu_dm_commit_audio(dev, state); @@ -9185,10 +9443,15 @@ out: void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector) { - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct amdgpu_dm_connector *aconnector; struct amdgpu_crtc *disconnected_acrtc; struct dm_crtc_state *acrtc_state; + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + return; + + aconnector = to_amdgpu_dm_connector(connector); + if (!aconnector->dc_sink || !connector->state || !connector->encoder) return; @@ -9265,12 +9528,16 @@ static void get_freesync_config_for_crtc( struct dm_connector_state *new_con_state) { struct mod_freesync_config config = {0}; - struct amdgpu_dm_connector *aconnector = - to_amdgpu_dm_connector(new_con_state->base.connector); + struct amdgpu_dm_connector *aconnector; struct drm_display_mode *mode = &new_crtc_state->base.mode; int vrefresh = drm_mode_vrefresh(mode); bool fs_vid_mode = false; + if (new_con_state->base.connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + return; + + aconnector = to_amdgpu_dm_connector(new_con_state->base.connector); + new_crtc_state->vrr_supported = new_con_state->freesync_capable && vrefresh >= aconnector->min_vfreq && vrefresh <= aconnector->max_vfreq; @@ -9516,7 +9783,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, crtc->base.id); /* i.e. reset mode */ - if (dc_remove_stream_from_ctx( + if (dc_state_remove_stream( dm->dc, dm_state->context, dm_old_crtc_state->stream) != DC_OK) { @@ -9559,7 +9826,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n", crtc->base.id); - if (dc_add_stream_to_ctx( + if (dc_state_add_stream( dm->dc, dm_state->context, dm_new_crtc_state->stream) != DC_OK) { @@ -9606,6 +9873,7 @@ skip_modeset: * when a modeset is needed, to ensure it gets reprogrammed. */ if (dm_new_crtc_state->base.color_mgmt_changed || + dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf || drm_atomic_crtc_needs_modeset(new_crtc_state)) { ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state); if (ret) @@ -9639,7 +9907,8 @@ static bool should_reset_plane(struct drm_atomic_state *state, * TODO: Remove this hack for all asics once it proves that the * fast updates works fine on DCN3.2+. */ - if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset) + if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 2, 0) && + state->allow_modeset) return true; /* Exit early if we know that we're adding or removing the plane. */ @@ -9673,6 +9942,10 @@ static bool should_reset_plane(struct drm_atomic_state *state, */ for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { struct amdgpu_framebuffer *old_afb, *new_afb; + struct dm_plane_state *dm_new_other_state, *dm_old_other_state; + + dm_new_other_state = to_dm_plane_state(new_other_state); + dm_old_other_state = to_dm_plane_state(old_other_state); if (other->type == DRM_PLANE_TYPE_CURSOR) continue; @@ -9709,6 +9982,18 @@ static bool should_reset_plane(struct drm_atomic_state *state, old_other_state->color_encoding != new_other_state->color_encoding) return true; + /* HDR/Transfer Function changes. */ + if (dm_old_other_state->degamma_tf != dm_new_other_state->degamma_tf || + dm_old_other_state->degamma_lut != dm_new_other_state->degamma_lut || + dm_old_other_state->hdr_mult != dm_new_other_state->hdr_mult || + dm_old_other_state->ctm != dm_new_other_state->ctm || + dm_old_other_state->shaper_lut != dm_new_other_state->shaper_lut || + dm_old_other_state->shaper_tf != dm_new_other_state->shaper_tf || + dm_old_other_state->lut3d != dm_new_other_state->lut3d || + dm_old_other_state->blend_lut != dm_new_other_state->blend_lut || + dm_old_other_state->blend_tf != dm_new_other_state->blend_tf) + return true; + /* Framebuffer checks fall at the end. */ if (!old_other_state->fb || !new_other_state->fb) continue; @@ -9863,7 +10148,7 @@ static int dm_update_plane_state(struct dc *dc, if (ret) return ret; - if (!dc_remove_plane_from_context( + if (!dc_state_remove_plane( dc, dm_old_crtc_state->stream, dm_old_plane_state->dc_state, @@ -9941,7 +10226,7 @@ static int dm_update_plane_state(struct dc *dc, * state. It'll be released when the atomic state is * cleaned. */ - if (!dc_add_plane_to_context( + if (!dc_state_add_plane( dc, dm_new_crtc_state->stream, dc_new_plane_state, @@ -10103,6 +10388,9 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm if (conn_state->crtc != crtc) continue; + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (!aconnector->mst_output_port || !aconnector->mst_root) aconnector = NULL; @@ -10815,8 +11103,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, struct dm_connector_state *dm_con_state = NULL; struct dc_sink *sink; - struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_device *adev = drm_to_adev(connector->dev); struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; bool freesync_capable = false; enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 3710f4d0f2..9c1871b866 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -32,6 +32,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_plane.h> #include "link_service_types.h" +#include <drm/drm_writeback.h> /* * This file contains the definition for amdgpu_display_manager @@ -54,6 +55,9 @@ #define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A #define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40 #define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3 0x3 + +#define AMDGPU_HDR_MULT_DEFAULT (0x100000000LL) + /* #include "include/amdgpu_dal_power_if.h" #include "amdgpu_dm_irq.h" @@ -714,11 +718,107 @@ static inline void amdgpu_dm_set_mst_status(uint8_t *status, #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) +struct amdgpu_dm_wb_connector { + struct drm_writeback_connector base; + struct dc_link *link; +}; + +#define to_amdgpu_dm_wb_connector(x) container_of(x, struct amdgpu_dm_wb_connector, base) + extern const struct amdgpu_ip_block_version dm_ip_block; +/* enum amdgpu_transfer_function: pre-defined transfer function supported by AMD. + * + * It includes standardized transfer functions and pure power functions. The + * transfer function coefficients are available at modules/color/color_gamma.c + */ +enum amdgpu_transfer_function { + AMDGPU_TRANSFER_FUNCTION_DEFAULT, + AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF, + AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF, + AMDGPU_TRANSFER_FUNCTION_PQ_EOTF, + AMDGPU_TRANSFER_FUNCTION_IDENTITY, + AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF, + AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF, + AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF, + AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF, + AMDGPU_TRANSFER_FUNCTION_BT709_OETF, + AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF, + AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF, + AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF, + AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF, + AMDGPU_TRANSFER_FUNCTION_COUNT +}; + struct dm_plane_state { struct drm_plane_state base; struct dc_plane_state *dc_state; + + /* Plane color mgmt */ + /** + * @degamma_lut: + * + * 1D LUT for mapping framebuffer/plane pixel data before sampling or + * blending operations. It's usually applied to linearize input space. + * The blob (if not NULL) is an array of &struct drm_color_lut. + */ + struct drm_property_blob *degamma_lut; + /** + * @degamma_tf: + * + * Predefined transfer function to tell DC driver the input space to + * linearize. + */ + enum amdgpu_transfer_function degamma_tf; + /** + * @hdr_mult: + * + * Multiplier to 'gain' the plane. When PQ is decoded using the fixed + * func transfer function to the internal FP16 fb, 1.0 -> 80 nits (on + * AMD at least). When sRGB is decoded, 1.0 -> 1.0, obviously. + * Therefore, 1.0 multiplier = 80 nits for SDR content. So if you + * want, 203 nits for SDR content, pass in (203.0 / 80.0). Format is + * S31.32 sign-magnitude. + * + * HDR multiplier can wide range beyond [0.0, 1.0]. This means that PQ + * TF is needed for any subsequent linear-to-non-linear transforms. + */ + __u64 hdr_mult; + /** + * @ctm: + * + * Color transformation matrix. The blob (if not NULL) is a &struct + * drm_color_ctm_3x4. + */ + struct drm_property_blob *ctm; + /** + * @shaper_lut: shaper lookup table blob. The blob (if not NULL) is an + * array of &struct drm_color_lut. + */ + struct drm_property_blob *shaper_lut; + /** + * @shaper_tf: + * + * Predefined transfer function to delinearize color space. + */ + enum amdgpu_transfer_function shaper_tf; + /** + * @lut3d: 3D lookup table blob. The blob (if not NULL) is an array of + * &struct drm_color_lut. + */ + struct drm_property_blob *lut3d; + /** + * @blend_lut: blend lut lookup table blob. The blob (if not NULL) is an + * array of &struct drm_color_lut. + */ + struct drm_property_blob *blend_lut; + /** + * @blend_tf: + * + * Pre-defined transfer function for converting plane pixel data before + * applying blend LUT. + */ + enum amdgpu_transfer_function blend_tf; }; struct dm_crtc_state { @@ -743,6 +843,14 @@ struct dm_crtc_state { struct dc_info_packet vrr_infopacket; int abm_level; + + /** + * @regamma_tf: + * + * Pre-defined transfer function for converting internal FB -> wire + * encoding. + */ + enum amdgpu_transfer_function regamma_tf; }; #define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base) @@ -804,14 +912,22 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, void amdgpu_dm_trigger_timing_sync(struct drm_device *dev); +/* 3D LUT max size is 17x17x17 (4913 entries) */ +#define MAX_COLOR_3DLUT_SIZE 17 +#define MAX_COLOR_3DLUT_BITDEPTH 12 +int amdgpu_dm_verify_lut3d_size(struct amdgpu_device *adev, + struct drm_plane_state *plane_state); +/* 1D LUT size */ #define MAX_COLOR_LUT_ENTRIES 4096 /* Legacy gamm LUT users such as X doesn't like large LUT sizes */ #define MAX_COLOR_LEGACY_LUT_ENTRIES 256 void amdgpu_dm_init_color_mod(void); +int amdgpu_dm_create_color_properties(struct amdgpu_device *adev); int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state); int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc); int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, + struct drm_plane_state *plane_state, struct dc_plane_state *dc_plane_state); void amdgpu_dm_update_connector_after_detect( diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index a4cb23d059..c87b64e464 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -72,6 +72,7 @@ */ #define MAX_DRM_LUT_VALUE 0xFFFF +#define SDR_WHITE_LEVEL_INIT_VALUE 80 /** * amdgpu_dm_init_color_mod - Initialize the color module. @@ -84,6 +85,247 @@ void amdgpu_dm_init_color_mod(void) setup_x_points_distribution(); } +static inline struct fixed31_32 amdgpu_dm_fixpt_from_s3132(__u64 x) +{ + struct fixed31_32 val; + + /* If negative, convert to 2's complement. */ + if (x & (1ULL << 63)) + x = -(x & ~(1ULL << 63)); + + val.value = x; + return val; +} + +#ifdef AMD_PRIVATE_COLOR +/* Pre-defined Transfer Functions (TF) + * + * AMD driver supports pre-defined mathematical functions for transferring + * between encoded values and optical/linear space. Depending on HW color caps, + * ROMs and curves built by the AMD color module support these transforms. + * + * The driver-specific color implementation exposes properties for pre-blending + * degamma TF, shaper TF (before 3D LUT), and blend(dpp.ogam) TF and + * post-blending regamma (mpc.ogam) TF. However, only pre-blending degamma + * supports ROM curves. AMD color module uses pre-defined coefficients to build + * curves for the other blocks. What can be done by each color block is + * described by struct dpp_color_capsand struct mpc_color_caps. + * + * AMD driver-specific color API exposes the following pre-defined transfer + * functions: + * + * - Identity: linear/identity relationship between pixel value and + * luminance value; + * - Gamma 2.2, Gamma 2.4, Gamma 2.6: pure power functions; + * - sRGB: 2.4: The piece-wise transfer function from IEC 61966-2-1:1999; + * - BT.709: has a linear segment in the bottom part and then a power function + * with a 0.45 (~1/2.22) gamma for the rest of the range; standardized by + * ITU-R BT.709-6; + * - PQ (Perceptual Quantizer): used for HDR display, allows luminance range + * capability of 0 to 10,000 nits; standardized by SMPTE ST 2084. + * + * The AMD color model is designed with an assumption that SDR (sRGB, BT.709, + * Gamma 2.2, etc.) peak white maps (normalized to 1.0 FP) to 80 nits in the PQ + * system. This has the implication that PQ EOTF (non-linear to linear) maps to + * [0.0..125.0] where 125.0 = 10,000 nits / 80 nits. + * + * Non-linear and linear forms are described in the table below: + * + * ┌───────────┬─────────────────────┬──────────────────────┐ + * │ │ Non-linear │ Linear │ + * ├───────────┼─────────────────────┼──────────────────────┤ + * │ sRGB │ UNORM or [0.0, 1.0] │ [0.0, 1.0] │ + * ├───────────┼─────────────────────┼──────────────────────┤ + * │ BT709 │ UNORM or [0.0, 1.0] │ [0.0, 1.0] │ + * ├───────────┼─────────────────────┼──────────────────────┤ + * │ Gamma 2.x │ UNORM or [0.0, 1.0] │ [0.0, 1.0] │ + * ├───────────┼─────────────────────┼──────────────────────┤ + * │ PQ │ UNORM or FP16 CCCS* │ [0.0, 125.0] │ + * ├───────────┼─────────────────────┼──────────────────────┤ + * │ Identity │ UNORM or FP16 CCCS* │ [0.0, 1.0] or CCCS** │ + * └───────────┴─────────────────────┴──────────────────────┘ + * * CCCS: Windows canonical composition color space + * ** Respectively + * + * In the driver-specific API, color block names attached to TF properties + * suggest the intention regarding non-linear encoding pixel's luminance + * values. As some newer encodings don't use gamma curve, we make encoding and + * decoding explicit by defining an enum list of transfer functions supported + * in terms of EOTF and inverse EOTF, where: + * + * - EOTF (electro-optical transfer function): is the transfer function to go + * from the encoded value to an optical (linear) value. De-gamma functions + * traditionally do this. + * - Inverse EOTF (simply the inverse of the EOTF): is usually intended to go + * from an optical/linear space (which might have been used for blending) + * back to the encoded values. Gamma functions traditionally do this. + */ +static const char * const +amdgpu_transfer_function_names[] = { + [AMDGPU_TRANSFER_FUNCTION_DEFAULT] = "Default", + [AMDGPU_TRANSFER_FUNCTION_IDENTITY] = "Identity", + [AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF] = "sRGB EOTF", + [AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF] = "BT.709 inv_OETF", + [AMDGPU_TRANSFER_FUNCTION_PQ_EOTF] = "PQ EOTF", + [AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF] = "Gamma 2.2 EOTF", + [AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF] = "Gamma 2.4 EOTF", + [AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF] = "Gamma 2.6 EOTF", + [AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF] = "sRGB inv_EOTF", + [AMDGPU_TRANSFER_FUNCTION_BT709_OETF] = "BT.709 OETF", + [AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF] = "PQ inv_EOTF", + [AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF] = "Gamma 2.2 inv_EOTF", + [AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF] = "Gamma 2.4 inv_EOTF", + [AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF] = "Gamma 2.6 inv_EOTF", +}; + +static const u32 amdgpu_eotf = + BIT(AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF) | + BIT(AMDGPU_TRANSFER_FUNCTION_PQ_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF); + +static const u32 amdgpu_inv_eotf = + BIT(AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_BT709_OETF) | + BIT(AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF); + +static struct drm_property * +amdgpu_create_tf_property(struct drm_device *dev, + const char *name, + u32 supported_tf) +{ + u32 transfer_functions = supported_tf | + BIT(AMDGPU_TRANSFER_FUNCTION_DEFAULT) | + BIT(AMDGPU_TRANSFER_FUNCTION_IDENTITY); + struct drm_prop_enum_list enum_list[AMDGPU_TRANSFER_FUNCTION_COUNT]; + int i, len; + + len = 0; + for (i = 0; i < AMDGPU_TRANSFER_FUNCTION_COUNT; i++) { + if ((transfer_functions & BIT(i)) == 0) + continue; + + enum_list[len].type = i; + enum_list[len].name = amdgpu_transfer_function_names[i]; + len++; + } + + return drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, + name, enum_list, len); +} + +int +amdgpu_dm_create_color_properties(struct amdgpu_device *adev) +{ + struct drm_property *prop; + + prop = drm_property_create(adev_to_drm(adev), + DRM_MODE_PROP_BLOB, + "AMD_PLANE_DEGAMMA_LUT", 0); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_degamma_lut_property = prop; + + prop = drm_property_create_range(adev_to_drm(adev), + DRM_MODE_PROP_IMMUTABLE, + "AMD_PLANE_DEGAMMA_LUT_SIZE", + 0, UINT_MAX); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_degamma_lut_size_property = prop; + + prop = amdgpu_create_tf_property(adev_to_drm(adev), + "AMD_PLANE_DEGAMMA_TF", + amdgpu_eotf); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_degamma_tf_property = prop; + + prop = drm_property_create_range(adev_to_drm(adev), + 0, "AMD_PLANE_HDR_MULT", 0, U64_MAX); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_hdr_mult_property = prop; + + prop = drm_property_create(adev_to_drm(adev), + DRM_MODE_PROP_BLOB, + "AMD_PLANE_CTM", 0); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_ctm_property = prop; + + prop = drm_property_create(adev_to_drm(adev), + DRM_MODE_PROP_BLOB, + "AMD_PLANE_SHAPER_LUT", 0); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_shaper_lut_property = prop; + + prop = drm_property_create_range(adev_to_drm(adev), + DRM_MODE_PROP_IMMUTABLE, + "AMD_PLANE_SHAPER_LUT_SIZE", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_shaper_lut_size_property = prop; + + prop = amdgpu_create_tf_property(adev_to_drm(adev), + "AMD_PLANE_SHAPER_TF", + amdgpu_inv_eotf); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_shaper_tf_property = prop; + + prop = drm_property_create(adev_to_drm(adev), + DRM_MODE_PROP_BLOB, + "AMD_PLANE_LUT3D", 0); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_lut3d_property = prop; + + prop = drm_property_create_range(adev_to_drm(adev), + DRM_MODE_PROP_IMMUTABLE, + "AMD_PLANE_LUT3D_SIZE", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_lut3d_size_property = prop; + + prop = drm_property_create(adev_to_drm(adev), + DRM_MODE_PROP_BLOB, + "AMD_PLANE_BLEND_LUT", 0); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_blend_lut_property = prop; + + prop = drm_property_create_range(adev_to_drm(adev), + DRM_MODE_PROP_IMMUTABLE, + "AMD_PLANE_BLEND_LUT_SIZE", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_blend_lut_size_property = prop; + + prop = amdgpu_create_tf_property(adev_to_drm(adev), + "AMD_PLANE_BLEND_TF", + amdgpu_eotf); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_blend_tf_property = prop; + + prop = amdgpu_create_tf_property(adev_to_drm(adev), + "AMD_CRTC_REGAMMA_TF", + amdgpu_inv_eotf); + if (!prop) + return -ENOMEM; + adev->mode_info.regamma_tf_property = prop; + + return 0; +} +#endif + /** * __extract_blob_lut - Extracts the DRM lut and lut size from a blob. * @blob: DRM color mgmt property blob @@ -182,7 +424,6 @@ static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut, static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm, struct fixed31_32 *matrix) { - int64_t val; int i; /* @@ -201,12 +442,29 @@ static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm, } /* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */ - val = ctm->matrix[i - (i / 4)]; - /* If negative, convert to 2's complement. */ - if (val & (1ULL << 63)) - val = -(val & ~(1ULL << 63)); + matrix[i] = amdgpu_dm_fixpt_from_s3132(ctm->matrix[i - (i / 4)]); + } +} - matrix[i].value = val; +/** + * __drm_ctm_3x4_to_dc_matrix - converts a DRM CTM 3x4 to a DC CSC float matrix + * @ctm: DRM color transformation matrix with 3x4 dimensions + * @matrix: DC CSC float matrix + * + * The matrix needs to be a 3x4 (12 entry) matrix. + */ +static void __drm_ctm_3x4_to_dc_matrix(const struct drm_color_ctm_3x4 *ctm, + struct fixed31_32 *matrix) +{ + int i; + + /* The format provided is S31.32, using signed-magnitude representation. + * Our fixed31_32 is also S31.32, but is using 2's complement. We have + * to convert from signed-magnitude to 2's complement. + */ + for (i = 0; i < 12; i++) { + /* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */ + matrix[i] = amdgpu_dm_fixpt_from_s3132(ctm->matrix[i]); } } @@ -268,16 +526,18 @@ static int __set_output_tf(struct dc_transfer_func *func, struct calculate_buffer cal_buffer = {0}; bool res; - ASSERT(lut && lut_size == MAX_COLOR_LUT_ENTRIES); - cal_buffer.buffer_index = -1; - gamma = dc_create_gamma(); - if (!gamma) - return -ENOMEM; + if (lut_size) { + ASSERT(lut && lut_size == MAX_COLOR_LUT_ENTRIES); - gamma->num_entries = lut_size; - __drm_lut_to_dc_gamma(lut, gamma, false); + gamma = dc_create_gamma(); + if (!gamma) + return -ENOMEM; + + gamma->num_entries = lut_size; + __drm_lut_to_dc_gamma(lut, gamma, false); + } if (func->tf == TRANSFER_FUNCTION_LINEAR) { /* @@ -285,27 +545,68 @@ static int __set_output_tf(struct dc_transfer_func *func, * on top of a linear input. But degamma params can be used * instead to simulate this. */ - gamma->type = GAMMA_CUSTOM; + if (gamma) + gamma->type = GAMMA_CUSTOM; res = mod_color_calculate_degamma_params(NULL, func, - gamma, true); + gamma, gamma != NULL); } else { /* * Assume sRGB. The actual mapping will depend on whether the * input was legacy or not. */ - gamma->type = GAMMA_CS_TFM_1D; - res = mod_color_calculate_regamma_params(func, gamma, false, + if (gamma) + gamma->type = GAMMA_CS_TFM_1D; + res = mod_color_calculate_regamma_params(func, gamma, gamma != NULL, has_rom, NULL, &cal_buffer); } - dc_gamma_release(&gamma); + if (gamma) + dc_gamma_release(&gamma); return res ? 0 : -ENOMEM; } +static int amdgpu_dm_set_atomic_regamma(struct dc_stream_state *stream, + const struct drm_color_lut *regamma_lut, + uint32_t regamma_size, bool has_rom, + enum dc_transfer_func_predefined tf) +{ + struct dc_transfer_func *out_tf = stream->out_transfer_func; + int ret = 0; + + if (regamma_size || tf != TRANSFER_FUNCTION_LINEAR) { + /* + * CRTC RGM goes into RGM LUT. + * + * Note: there is no implicit sRGB regamma here. We are using + * degamma calculation from color module to calculate the curve + * from a linear base if gamma TF is not set. However, if gamma + * TF (!= Linear) and LUT are set at the same time, we will use + * regamma calculation, and the color module will combine the + * pre-defined TF and the custom LUT values into the LUT that's + * actually programmed. + */ + out_tf->type = TF_TYPE_DISTRIBUTED_POINTS; + out_tf->tf = tf; + out_tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + + ret = __set_output_tf(out_tf, regamma_lut, regamma_size, has_rom); + } else { + /* + * No CRTC RGM means we can just put the block into bypass + * since we don't have any plane level adjustments using it. + */ + out_tf->type = TF_TYPE_BYPASS; + out_tf->tf = TRANSFER_FUNCTION_LINEAR; + } + + return ret; +} + /** * __set_input_tf - calculates the input transfer function based on expected * input space. + * @caps: dc color capabilities * @func: transfer function * @lut: lookup table that defines the color space * @lut_size: size of respective lut. @@ -313,27 +614,240 @@ static int __set_output_tf(struct dc_transfer_func *func, * Returns: * 0 in case of success. -ENOMEM if fails. */ -static int __set_input_tf(struct dc_transfer_func *func, +static int __set_input_tf(struct dc_color_caps *caps, struct dc_transfer_func *func, const struct drm_color_lut *lut, uint32_t lut_size) { struct dc_gamma *gamma = NULL; bool res; - gamma = dc_create_gamma(); - if (!gamma) - return -ENOMEM; + if (lut_size) { + gamma = dc_create_gamma(); + if (!gamma) + return -ENOMEM; - gamma->type = GAMMA_CUSTOM; - gamma->num_entries = lut_size; + gamma->type = GAMMA_CUSTOM; + gamma->num_entries = lut_size; + + __drm_lut_to_dc_gamma(lut, gamma, false); + } - __drm_lut_to_dc_gamma(lut, gamma, false); + res = mod_color_calculate_degamma_params(caps, func, gamma, gamma != NULL); - res = mod_color_calculate_degamma_params(NULL, func, gamma, true); - dc_gamma_release(&gamma); + if (gamma) + dc_gamma_release(&gamma); return res ? 0 : -ENOMEM; } +static enum dc_transfer_func_predefined +amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf) +{ + switch (tf) { + default: + case AMDGPU_TRANSFER_FUNCTION_DEFAULT: + case AMDGPU_TRANSFER_FUNCTION_IDENTITY: + return TRANSFER_FUNCTION_LINEAR; + case AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF: + case AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF: + return TRANSFER_FUNCTION_SRGB; + case AMDGPU_TRANSFER_FUNCTION_BT709_OETF: + case AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF: + return TRANSFER_FUNCTION_BT709; + case AMDGPU_TRANSFER_FUNCTION_PQ_EOTF: + case AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF: + return TRANSFER_FUNCTION_PQ; + case AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF: + case AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF: + return TRANSFER_FUNCTION_GAMMA22; + case AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF: + case AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF: + return TRANSFER_FUNCTION_GAMMA24; + case AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF: + case AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF: + return TRANSFER_FUNCTION_GAMMA26; + } +} + +static void __to_dc_lut3d_color(struct dc_rgb *rgb, + const struct drm_color_lut lut, + int bit_precision) +{ + rgb->red = drm_color_lut_extract(lut.red, bit_precision); + rgb->green = drm_color_lut_extract(lut.green, bit_precision); + rgb->blue = drm_color_lut_extract(lut.blue, bit_precision); +} + +static void __drm_3dlut_to_dc_3dlut(const struct drm_color_lut *lut, + uint32_t lut3d_size, + struct tetrahedral_params *params, + bool use_tetrahedral_9, + int bit_depth) +{ + struct dc_rgb *lut0; + struct dc_rgb *lut1; + struct dc_rgb *lut2; + struct dc_rgb *lut3; + int lut_i, i; + + + if (use_tetrahedral_9) { + lut0 = params->tetrahedral_9.lut0; + lut1 = params->tetrahedral_9.lut1; + lut2 = params->tetrahedral_9.lut2; + lut3 = params->tetrahedral_9.lut3; + } else { + lut0 = params->tetrahedral_17.lut0; + lut1 = params->tetrahedral_17.lut1; + lut2 = params->tetrahedral_17.lut2; + lut3 = params->tetrahedral_17.lut3; + } + + for (lut_i = 0, i = 0; i < lut3d_size - 4; lut_i++, i += 4) { + /* + * We should consider the 3D LUT RGB values are distributed + * along four arrays lut0-3 where the first sizes 1229 and the + * other 1228. The bit depth supported for 3dlut channel is + * 12-bit, but DC also supports 10-bit. + * + * TODO: improve color pipeline API to enable the userspace set + * bit depth and 3D LUT size/stride, as specified by VA-API. + */ + __to_dc_lut3d_color(&lut0[lut_i], lut[i], bit_depth); + __to_dc_lut3d_color(&lut1[lut_i], lut[i + 1], bit_depth); + __to_dc_lut3d_color(&lut2[lut_i], lut[i + 2], bit_depth); + __to_dc_lut3d_color(&lut3[lut_i], lut[i + 3], bit_depth); + } + /* lut0 has 1229 points (lut_size/4 + 1) */ + __to_dc_lut3d_color(&lut0[lut_i], lut[i], bit_depth); +} + +/* amdgpu_dm_atomic_lut3d - set DRM 3D LUT to DC stream + * @drm_lut3d: user 3D LUT + * @drm_lut3d_size: size of 3D LUT + * @lut3d: DC 3D LUT + * + * Map user 3D LUT data to DC 3D LUT and all necessary bits to program it + * on DCN accordingly. + */ +static void amdgpu_dm_atomic_lut3d(const struct drm_color_lut *drm_lut3d, + uint32_t drm_lut3d_size, + struct dc_3dlut *lut) +{ + if (!drm_lut3d_size) { + lut->state.bits.initialized = 0; + } else { + /* Stride and bit depth are not programmable by API yet. + * Therefore, only supports 17x17x17 3D LUT (12-bit). + */ + lut->lut_3d.use_tetrahedral_9 = false; + lut->lut_3d.use_12bits = true; + lut->state.bits.initialized = 1; + __drm_3dlut_to_dc_3dlut(drm_lut3d, drm_lut3d_size, &lut->lut_3d, + lut->lut_3d.use_tetrahedral_9, + MAX_COLOR_3DLUT_BITDEPTH); + } +} + +static int amdgpu_dm_atomic_shaper_lut(const struct drm_color_lut *shaper_lut, + bool has_rom, + enum dc_transfer_func_predefined tf, + uint32_t shaper_size, + struct dc_transfer_func *func_shaper) +{ + int ret = 0; + + if (shaper_size || tf != TRANSFER_FUNCTION_LINEAR) { + /* + * If user shaper LUT is set, we assume a linear color space + * (linearized by degamma 1D LUT or not). + */ + func_shaper->type = TF_TYPE_DISTRIBUTED_POINTS; + func_shaper->tf = tf; + func_shaper->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + + ret = __set_output_tf(func_shaper, shaper_lut, shaper_size, has_rom); + } else { + func_shaper->type = TF_TYPE_BYPASS; + func_shaper->tf = TRANSFER_FUNCTION_LINEAR; + } + + return ret; +} + +static int amdgpu_dm_atomic_blend_lut(const struct drm_color_lut *blend_lut, + bool has_rom, + enum dc_transfer_func_predefined tf, + uint32_t blend_size, + struct dc_transfer_func *func_blend) +{ + int ret = 0; + + if (blend_size || tf != TRANSFER_FUNCTION_LINEAR) { + /* + * DRM plane gamma LUT or TF means we are linearizing color + * space before blending (similar to degamma programming). As + * we don't have hardcoded curve support, or we use AMD color + * module to fill the parameters that will be translated to HW + * points. + */ + func_blend->type = TF_TYPE_DISTRIBUTED_POINTS; + func_blend->tf = tf; + func_blend->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + + ret = __set_input_tf(NULL, func_blend, blend_lut, blend_size); + } else { + func_blend->type = TF_TYPE_BYPASS; + func_blend->tf = TRANSFER_FUNCTION_LINEAR; + } + + return ret; +} + +/** + * amdgpu_dm_verify_lut3d_size - verifies if 3D LUT is supported and if user + * shaper and 3D LUTs match the hw supported size + * @adev: amdgpu device + * @plane_state: the DRM plane state + * + * Verifies if pre-blending (DPP) 3D LUT is supported by the HW (DCN 2.0 or + * newer) and if the user shaper and 3D LUTs match the supported size. + * + * Returns: + * 0 on success. -EINVAL if lut size are invalid. + */ +int amdgpu_dm_verify_lut3d_size(struct amdgpu_device *adev, + struct drm_plane_state *plane_state) +{ + struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state); + const struct drm_color_lut *shaper = NULL, *lut3d = NULL; + uint32_t exp_size, size, dim_size = MAX_COLOR_3DLUT_SIZE; + bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut; + + /* shaper LUT is only available if 3D LUT color caps */ + exp_size = has_3dlut ? MAX_COLOR_LUT_ENTRIES : 0; + shaper = __extract_blob_lut(dm_plane_state->shaper_lut, &size); + + if (shaper && size != exp_size) { + drm_dbg(&adev->ddev, + "Invalid Shaper LUT size. Should be %u but got %u.\n", + exp_size, size); + return -EINVAL; + } + + /* The number of 3D LUT entries is the dimension size cubed */ + exp_size = has_3dlut ? dim_size * dim_size * dim_size : 0; + lut3d = __extract_blob_lut(dm_plane_state->lut3d, &size); + + if (lut3d && size != exp_size) { + drm_dbg(&adev->ddev, + "Invalid 3D LUT size. Should be %u but got %u.\n", + exp_size, size); + return -EINVAL; + } + + return 0; +} + /** * amdgpu_dm_verify_lut_sizes - verifies if DRM luts match the hw supported sizes * @crtc_state: the DRM CRTC state @@ -401,9 +915,12 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) const struct drm_color_lut *degamma_lut, *regamma_lut; uint32_t degamma_size, regamma_size; bool has_regamma, has_degamma; + enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_LINEAR; bool is_legacy; int r; + tf = amdgpu_tf_to_dc_tf(crtc->regamma_tf); + r = amdgpu_dm_verify_lut_sizes(&crtc->base); if (r) return r; @@ -439,27 +956,23 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) crtc->cm_is_degamma_srgb = true; stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; - + /* + * Note: although we pass has_rom as parameter here, we never + * actually use ROM because the color module only takes the ROM + * path if transfer_func->type == PREDEFINED. + * + * See more in mod_color_calculate_regamma_params() + */ r = __set_legacy_tf(stream->out_transfer_func, regamma_lut, regamma_size, has_rom); if (r) return r; - } else if (has_regamma) { - /* If atomic regamma, CRTC RGM goes into RGM LUT. */ - stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; - stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; - - r = __set_output_tf(stream->out_transfer_func, regamma_lut, - regamma_size, has_rom); + } else { + regamma_size = has_regamma ? regamma_size : 0; + r = amdgpu_dm_set_atomic_regamma(stream, regamma_lut, + regamma_size, has_rom, tf); if (r) return r; - } else { - /* - * No CRTC RGM means we can just put the block into bypass - * since we don't have any plane level adjustments using it. - */ - stream->out_transfer_func->type = TF_TYPE_BYPASS; - stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; } /* @@ -495,20 +1008,10 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) return 0; } -/** - * amdgpu_dm_update_plane_color_mgmt: Maps DRM color management to DC plane. - * @crtc: amdgpu_dm crtc state - * @dc_plane_state: target DC surface - * - * Update the underlying dc_stream_state's input transfer function (ITF) in - * preparation for hardware commit. The transfer function used depends on - * the preparation done on the stream for color management. - * - * Returns: - * 0 on success. -ENOMEM if mem allocation fails. - */ -int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, - struct dc_plane_state *dc_plane_state) +static int +map_crtc_degamma_to_dc_plane(struct dm_crtc_state *crtc, + struct dc_plane_state *dc_plane_state, + struct dc_color_caps *caps) { const struct drm_color_lut *degamma_lut; enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB; @@ -531,8 +1034,7 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, °amma_size); ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES); - dc_plane_state->in_transfer_func->type = - TF_TYPE_DISTRIBUTED_POINTS; + dc_plane_state->in_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; /* * This case isn't fully correct, but also fairly @@ -564,11 +1066,11 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; - r = __set_input_tf(dc_plane_state->in_transfer_func, + r = __set_input_tf(caps, dc_plane_state->in_transfer_func, degamma_lut, degamma_size); if (r) return r; - } else if (crtc->cm_is_degamma_srgb) { + } else { /* * For legacy gamma support we need the regamma input * in linear space. Assume that the input is sRGB. @@ -577,14 +1079,209 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, dc_plane_state->in_transfer_func->tf = tf; if (tf != TRANSFER_FUNCTION_SRGB && - !mod_color_calculate_degamma_params(NULL, - dc_plane_state->in_transfer_func, NULL, false)) + !mod_color_calculate_degamma_params(caps, + dc_plane_state->in_transfer_func, + NULL, false)) + return -ENOMEM; + } + + return 0; +} + +static int +__set_dm_plane_degamma(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct dc_color_caps *color_caps) +{ + struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state); + const struct drm_color_lut *degamma_lut; + enum amdgpu_transfer_function tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; + uint32_t degamma_size; + bool has_degamma_lut; + int ret; + + degamma_lut = __extract_blob_lut(dm_plane_state->degamma_lut, + °amma_size); + + has_degamma_lut = degamma_lut && + !__is_lut_linear(degamma_lut, degamma_size); + + tf = dm_plane_state->degamma_tf; + + /* If we don't have plane degamma LUT nor TF to set on DC, we have + * nothing to do here, return. + */ + if (!has_degamma_lut && tf == AMDGPU_TRANSFER_FUNCTION_DEFAULT) + return -EINVAL; + + dc_plane_state->in_transfer_func->tf = amdgpu_tf_to_dc_tf(tf); + + if (has_degamma_lut) { + ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES); + + dc_plane_state->in_transfer_func->type = + TF_TYPE_DISTRIBUTED_POINTS; + + ret = __set_input_tf(color_caps, dc_plane_state->in_transfer_func, + degamma_lut, degamma_size); + if (ret) + return ret; + } else { + dc_plane_state->in_transfer_func->type = + TF_TYPE_PREDEFINED; + + if (!mod_color_calculate_degamma_params(color_caps, + dc_plane_state->in_transfer_func, NULL, false)) return -ENOMEM; - } else { - /* ...Otherwise we can just bypass the DGM block. */ - dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS; - dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; + } + return 0; +} + +static int +amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state) +{ + struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state); + enum amdgpu_transfer_function shaper_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; + enum amdgpu_transfer_function blend_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; + const struct drm_color_lut *shaper_lut, *lut3d, *blend_lut; + uint32_t shaper_size, lut3d_size, blend_size; + int ret; + + dc_plane_state->hdr_mult = amdgpu_dm_fixpt_from_s3132(dm_plane_state->hdr_mult); + + shaper_lut = __extract_blob_lut(dm_plane_state->shaper_lut, &shaper_size); + shaper_size = shaper_lut != NULL ? shaper_size : 0; + shaper_tf = dm_plane_state->shaper_tf; + lut3d = __extract_blob_lut(dm_plane_state->lut3d, &lut3d_size); + lut3d_size = lut3d != NULL ? lut3d_size : 0; + + amdgpu_dm_atomic_lut3d(lut3d, lut3d_size, dc_plane_state->lut3d_func); + ret = amdgpu_dm_atomic_shaper_lut(shaper_lut, false, + amdgpu_tf_to_dc_tf(shaper_tf), + shaper_size, + dc_plane_state->in_shaper_func); + if (ret) { + drm_dbg_kms(plane_state->plane->dev, + "setting plane %d shaper LUT failed.\n", + plane_state->plane->index); + + return ret; + } + + blend_tf = dm_plane_state->blend_tf; + blend_lut = __extract_blob_lut(dm_plane_state->blend_lut, &blend_size); + blend_size = blend_lut != NULL ? blend_size : 0; + + ret = amdgpu_dm_atomic_blend_lut(blend_lut, false, + amdgpu_tf_to_dc_tf(blend_tf), + blend_size, dc_plane_state->blend_tf); + if (ret) { + drm_dbg_kms(plane_state->plane->dev, + "setting plane %d gamma lut failed.\n", + plane_state->plane->index); + + return ret; } return 0; } + +/** + * amdgpu_dm_update_plane_color_mgmt: Maps DRM color management to DC plane. + * @crtc: amdgpu_dm crtc state + * @plane_state: DRM plane state + * @dc_plane_state: target DC surface + * + * Update the underlying dc_stream_state's input transfer function (ITF) in + * preparation for hardware commit. The transfer function used depends on + * the preparation done on the stream for color management. + * + * Returns: + * 0 on success. -ENOMEM if mem allocation fails. + */ +int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, + struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state) +{ + struct amdgpu_device *adev = drm_to_adev(crtc->base.state->dev); + struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state); + struct drm_color_ctm_3x4 *ctm = NULL; + struct dc_color_caps *color_caps = NULL; + bool has_crtc_cm_degamma; + int ret; + + ret = amdgpu_dm_verify_lut3d_size(adev, plane_state); + if (ret) { + drm_dbg_driver(&adev->ddev, "amdgpu_dm_verify_lut3d_size() failed\n"); + return ret; + } + + if (dc_plane_state->ctx && dc_plane_state->ctx->dc) + color_caps = &dc_plane_state->ctx->dc->caps.color; + + /* Initially, we can just bypass the DGM block. */ + dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS; + dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; + + /* After, we start to update values according to color props */ + has_crtc_cm_degamma = (crtc->cm_has_degamma || crtc->cm_is_degamma_srgb); + + ret = __set_dm_plane_degamma(plane_state, dc_plane_state, color_caps); + if (ret == -ENOMEM) + return ret; + + /* We only have one degamma block available (pre-blending) for the + * whole color correction pipeline, so that we can't actually perform + * plane and CRTC degamma at the same time. Explicitly reject atomic + * updates when userspace sets both plane and CRTC degamma properties. + */ + if (has_crtc_cm_degamma && ret != -EINVAL) { + drm_dbg_kms(crtc->base.crtc->dev, + "doesn't support plane and CRTC degamma at the same time\n"); + return -EINVAL; + } + + /* If we are here, it means we don't have plane degamma settings, check + * if we have CRTC degamma waiting for mapping to pre-blending degamma + * block + */ + if (has_crtc_cm_degamma) { + /* + * AMD HW doesn't have post-blending degamma caps. When DRM + * CRTC atomic degamma is set, we maps it to DPP degamma block + * (pre-blending) or, on legacy gamma, we use DPP degamma to + * linearize (implicit degamma) from sRGB/BT709 according to + * the input space. + */ + ret = map_crtc_degamma_to_dc_plane(crtc, dc_plane_state, color_caps); + if (ret) + return ret; + } + + /* Setup CRTC CTM. */ + if (dm_plane_state->ctm) { + ctm = (struct drm_color_ctm_3x4 *)dm_plane_state->ctm->data; + /* + * DCN2 and older don't support both pre-blending and + * post-blending gamut remap. For this HW family, if we have + * the plane and CRTC CTMs simultaneously, CRTC CTM takes + * priority, and we discard plane CTM, as implemented in + * dcn10_program_gamut_remap(). However, DCN3+ has DPP + * (pre-blending) and MPC (post-blending) `gamut remap` blocks; + * therefore, we can program plane and CRTC CTMs together by + * mapping CRTC CTM to MPC and keeping plane CTM setup at DPP, + * as it's done by dcn30_program_gamut_remap(). + */ + __drm_ctm_3x4_to_dc_matrix(ctm, dc_plane_state->gamut_remap_matrix.matrix); + + dc_plane_state->gamut_remap_matrix.enable_remap = true; + dc_plane_state->input_csc_color_matrix.enable_adjustment = false; + } else { + /* Bypass CTM. */ + dc_plane_state->gamut_remap_matrix.enable_remap = false; + dc_plane_state->input_csc_color_matrix.enable_adjustment = false; + } + + return amdgpu_dm_plane_set_color_properties(plane_state, dc_plane_state); +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index 52ecfa746b..f936a35fa9 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -326,6 +326,9 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) if (!connector->state || connector->state->crtc != crtc) continue; + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconn = to_amdgpu_dm_connector(connector); break; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index d2834ad85a..6e715ef3a5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -253,6 +253,7 @@ static struct drm_crtc_state *amdgpu_dm_crtc_duplicate_state(struct drm_crtc *cr state->freesync_config = cur->freesync_config; state->cm_has_degamma = cur->cm_has_degamma; state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb; + state->regamma_tf = cur->regamma_tf; state->crc_skip_count = cur->crc_skip_count; state->mpo_requested = cur->mpo_requested; /* TODO Duplicate dc_stream after objects are stream object is flattened */ @@ -289,6 +290,70 @@ static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc) } #endif +#ifdef AMD_PRIVATE_COLOR +/** + * dm_crtc_additional_color_mgmt - enable additional color properties + * @crtc: DRM CRTC + * + * This function lets the driver enable post-blending CRTC regamma transfer + * function property in addition to DRM CRTC gamma LUT. Default value means + * linear transfer function, which is the default CRTC gamma LUT behaviour + * without this property. + */ +static void +dm_crtc_additional_color_mgmt(struct drm_crtc *crtc) +{ + struct amdgpu_device *adev = drm_to_adev(crtc->dev); + + if (adev->dm.dc->caps.color.mpc.ogam_ram) + drm_object_attach_property(&crtc->base, + adev->mode_info.regamma_tf_property, + AMDGPU_TRANSFER_FUNCTION_DEFAULT); +} + +static int +amdgpu_dm_atomic_crtc_set_property(struct drm_crtc *crtc, + struct drm_crtc_state *state, + struct drm_property *property, + uint64_t val) +{ + struct amdgpu_device *adev = drm_to_adev(crtc->dev); + struct dm_crtc_state *acrtc_state = to_dm_crtc_state(state); + + if (property == adev->mode_info.regamma_tf_property) { + if (acrtc_state->regamma_tf != val) { + acrtc_state->regamma_tf = val; + acrtc_state->base.color_mgmt_changed |= 1; + } + } else { + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n", + crtc->base.id, crtc->name, + property->base.id, property->name); + return -EINVAL; + } + + return 0; +} + +static int +amdgpu_dm_atomic_crtc_get_property(struct drm_crtc *crtc, + const struct drm_crtc_state *state, + struct drm_property *property, + uint64_t *val) +{ + struct amdgpu_device *adev = drm_to_adev(crtc->dev); + struct dm_crtc_state *acrtc_state = to_dm_crtc_state(state); + + if (property == adev->mode_info.regamma_tf_property) + *val = acrtc_state->regamma_tf; + else + return -EINVAL; + + return 0; +} +#endif + /* Implemented only the options currently available for the driver */ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { .reset = amdgpu_dm_crtc_reset_state, @@ -307,6 +372,10 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { #if defined(CONFIG_DEBUG_FS) .late_register = amdgpu_dm_crtc_late_register, #endif +#ifdef AMD_PRIVATE_COLOR + .atomic_set_property = amdgpu_dm_atomic_crtc_set_property, + .atomic_get_property = amdgpu_dm_atomic_crtc_get_property, +#endif }; static void amdgpu_dm_crtc_helper_disable(struct drm_crtc *crtc) @@ -482,6 +551,9 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); +#ifdef AMD_PRIVATE_COLOR + dm_crtc_additional_color_mgmt(&acrtc->base); +#endif return 0; fail: diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 417c9cb47b..85fc618130 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -2971,6 +2971,85 @@ static int allow_edp_hotplug_detection_set(void *data, u64 val) return 0; } +static int dmub_trace_mask_set(void *data, u64 val) +{ + struct amdgpu_device *adev = data; + struct dmub_srv *srv = adev->dm.dc->ctx->dmub_srv->dmub; + enum dmub_gpint_command cmd; + u64 mask = 0xffff; + u8 shift = 0; + u32 res; + int i; + + if (!srv->fw_version) + return -EINVAL; + + for (i = 0; i < 4; i++) { + res = (val & mask) >> shift; + + switch (i) { + case 0: + cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD0; + break; + case 1: + cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1; + break; + case 2: + cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD2; + break; + case 3: + cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD3; + break; + } + + if (!dc_wake_and_execute_gpint(adev->dm.dc->ctx, cmd, res, NULL, DM_DMUB_WAIT_TYPE_WAIT)) + return -EIO; + + usleep_range(100, 1000); + + mask <<= 16; + shift += 16; + } + + return 0; +} + +static int dmub_trace_mask_show(void *data, u64 *val) +{ + enum dmub_gpint_command cmd = DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD0; + struct amdgpu_device *adev = data; + struct dmub_srv *srv = adev->dm.dc->ctx->dmub_srv->dmub; + u8 shift = 0; + u64 raw = 0; + u64 res = 0; + int i = 0; + + if (!srv->fw_version) + return -EINVAL; + + while (i < 4) { + uint32_t response; + + if (!dc_wake_and_execute_gpint(adev->dm.dc->ctx, cmd, 0, &response, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + return -EIO; + + raw = response; + usleep_range(100, 1000); + + cmd++; + res |= (raw << shift); + shift += 16; + i++; + } + + *val = res; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(dmub_trace_mask_fops, dmub_trace_mask_show, + dmub_trace_mask_set, "0x%llx\n"); + /* * Set dmcub trace event IRQ enable or disable. * Usage to enable dmcub trace event IRQ: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en @@ -3884,6 +3963,9 @@ void dtn_debugfs_init(struct amdgpu_device *adev) debugfs_create_file_unsafe("amdgpu_dm_force_timing_sync", 0644, root, adev, &force_timing_sync_ops); + debugfs_create_file_unsafe("amdgpu_dm_dmub_trace_mask", 0644, root, + adev, &dmub_trace_mask_fops); + debugfs_create_file_unsafe("amdgpu_dm_dmcub_trace_event_en", 0644, root, adev, &dmcub_trace_event_state_fops); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 7a09a72e18..c27063305a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -31,6 +31,7 @@ #include <drm/drm_probe_helper.h> #include <drm/amdgpu_drm.h> #include <drm/drm_edid.h> +#include <drm/drm_fixed.h> #include "dm_services.h" #include "amdgpu.h" @@ -218,7 +219,7 @@ static void dm_helpers_construct_old_payload( struct drm_dp_mst_atomic_payload *old_payload) { struct drm_dp_mst_atomic_payload *pos; - int pbn_per_slot = mst_state->pbn_div; + int pbn_per_slot = dfixed_trunc(mst_state->pbn_div); u8 next_payload_vc_start = mgr->next_start_slot; u8 payload_vc_start = new_payload->vc_start_slot; u8 allocated_time_slots; @@ -341,15 +342,14 @@ enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( return ACT_SUCCESS; } -bool dm_helpers_dp_mst_send_payload_allocation( +void dm_helpers_dp_mst_send_payload_allocation( struct dc_context *ctx, - const struct dc_stream_state *stream, - bool enable) + const struct dc_stream_state *stream) { struct amdgpu_dm_connector *aconnector; struct drm_dp_mst_topology_state *mst_state; struct drm_dp_mst_topology_mgr *mst_mgr; - struct drm_dp_mst_atomic_payload *new_payload, old_payload; + struct drm_dp_mst_atomic_payload *new_payload; enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD; enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD; int ret = 0; @@ -357,25 +357,13 @@ bool dm_helpers_dp_mst_send_payload_allocation( aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; if (!aconnector || !aconnector->mst_root) - return false; + return; mst_mgr = &aconnector->mst_root->mst_mgr; mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); - new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); - if (!enable) { - set_flag = MST_CLEAR_ALLOCATED_PAYLOAD; - clr_flag = MST_ALLOCATE_NEW_PAYLOAD; - } - - if (enable) { - ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, new_payload); - } else { - dm_helpers_construct_old_payload(mst_mgr, mst_state, - new_payload, &old_payload); - drm_dp_remove_payload_part2(mst_mgr, mst_state, &old_payload, new_payload); - } + ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, new_payload); if (ret) { amdgpu_dm_set_mst_status(&aconnector->mst_status, @@ -386,10 +374,36 @@ bool dm_helpers_dp_mst_send_payload_allocation( amdgpu_dm_set_mst_status(&aconnector->mst_status, clr_flag, false); } - - return true; } +void dm_helpers_dp_mst_update_mst_mgr_for_deallocation( + struct dc_context *ctx, + const struct dc_stream_state *stream) +{ + struct amdgpu_dm_connector *aconnector; + struct drm_dp_mst_topology_state *mst_state; + struct drm_dp_mst_topology_mgr *mst_mgr; + struct drm_dp_mst_atomic_payload *new_payload, old_payload; + enum mst_progress_status set_flag = MST_CLEAR_ALLOCATED_PAYLOAD; + enum mst_progress_status clr_flag = MST_ALLOCATE_NEW_PAYLOAD; + + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + + if (!aconnector || !aconnector->mst_root) + return; + + mst_mgr = &aconnector->mst_root->mst_mgr; + mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); + new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); + dm_helpers_construct_old_payload(mst_mgr, mst_state, + new_payload, &old_payload); + + drm_dp_remove_payload_part2(mst_mgr, mst_state, &old_payload, new_payload); + + amdgpu_dm_set_mst_status(&aconnector->mst_status, set_flag, true); + amdgpu_dm_set_mst_status(&aconnector->mst_status, clr_flag, false); + } + void dm_dtn_log_begin(struct dc_context *ctx, struct dc_log_buffer_ctx *log_ctx) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index d595030c33..3390f0d842 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -897,10 +897,15 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { - struct amdgpu_dm_connector *amdgpu_dm_connector = - to_amdgpu_dm_connector(connector); + struct amdgpu_dm_connector *amdgpu_dm_connector; + const struct dc_link *dc_link; - const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + amdgpu_dm_connector = to_amdgpu_dm_connector(connector); + + dc_link = amdgpu_dm_connector->dc_link; if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { dc_interrupt_set(adev->dm.dc, @@ -933,9 +938,14 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { - struct amdgpu_dm_connector *amdgpu_dm_connector = - to_amdgpu_dm_connector(connector); - const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; + struct amdgpu_dm_connector *amdgpu_dm_connector; + const struct dc_link *dc_link; + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + amdgpu_dm_connector = to_amdgpu_dm_connector(connector); + dc_link = amdgpu_dm_connector->dc_link; if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { dc_interrupt_set(adev->dm.dc, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 602a2ab98a..941e96f100 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -27,6 +27,8 @@ #include <drm/display/drm_dp_mst_helper.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_fixed.h> +#include <drm/drm_edid.h> #include "dm_services.h" #include "amdgpu.h" #include "amdgpu_dm.h" @@ -44,7 +46,7 @@ #include "amdgpu_dm_debugfs.h" #endif -#include "dc/dcn20/dcn20_resource.h" +#include "dc/resource/dcn20/dcn20_resource.h" #define PEAK_FACTOR_X1000 1006 @@ -424,8 +426,7 @@ dm_mst_atomic_best_encoder(struct drm_connector *connector, { struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, connector); - struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_device *adev = drm_to_adev(connector->dev); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc); return &adev->dm.mst_encoders[acrtc->crtc_id].base; @@ -941,10 +942,10 @@ static int increase_dsc_bpp(struct drm_atomic_state *state, link_timeslots_used = 0; for (i = 0; i < count; i++) - link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, mst_state->pbn_div); + link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, dfixed_trunc(mst_state->pbn_div)); fair_pbn_alloc = - (63 - link_timeslots_used) / remaining_to_increase * mst_state->pbn_div; + (63 - link_timeslots_used) / remaining_to_increase * dfixed_trunc(mst_state->pbn_div); if (initial_slack[next_index] > fair_pbn_alloc) { vars[next_index].pbn += fair_pbn_alloc; @@ -1604,9 +1605,8 @@ enum dc_status dm_dp_mst_is_port_support_mode( struct dc_link_settings cur_link_settings; unsigned int end_to_end_bw_in_kbps = 0; unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0; - unsigned int max_compressed_bw_in_kbps = 0; struct dc_dsc_bw_range bw_range = {0}; - uint16_t full_pbn = aconnector->mst_output_port->full_pbn; + struct dc_dsc_config_options dsc_options = {0}; /* * Consider the case with the depth of the mst topology tree is equal or less than 2 @@ -1622,30 +1622,39 @@ enum dc_status dm_dp_mst_is_port_support_mode( (aconnector->mst_output_port->passthrough_aux || aconnector->dsc_aux == &aconnector->mst_output_port->aux)) { cur_link_settings = stream->link->verified_link_cap; + upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings); + down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn); - upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, - &cur_link_settings); - down_link_bw_in_kbps = kbps_from_pbn(full_pbn); - - /* pick the bottleneck */ - end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps, - down_link_bw_in_kbps); - - /* - * use the maximum dsc compression bandwidth as the required - * bandwidth for the mode - */ - max_compressed_bw_in_kbps = bw_range.min_kbps; + /* pick the end to end bw bottleneck */ + end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps, down_link_bw_in_kbps); - if (end_to_end_bw_in_kbps < max_compressed_bw_in_kbps) { - DRM_DEBUG_DRIVER("Mode does not fit into DSC pass-through bandwidth validation\n"); + if (end_to_end_bw_in_kbps < bw_range.min_kbps) { + DRM_DEBUG_DRIVER("maximum dsc compression cannot fit into end-to-end bandwidth\n"); return DC_FAIL_BANDWIDTH_VALIDATE; } + + if (end_to_end_bw_in_kbps < bw_range.stream_kbps) { + dc_dsc_get_default_config_option(stream->link->dc, &dsc_options); + dsc_options.max_target_bpp_limit_override_x16 = aconnector->base.display_info.max_dsc_bpp * 16; + if (dc_dsc_compute_config(stream->sink->ctx->dc->res_pool->dscs[0], + &stream->sink->dsc_caps.dsc_dec_caps, + &dsc_options, + end_to_end_bw_in_kbps, + &stream->timing, + dc_link_get_highest_encoding_format(stream->link), + &stream->timing.dsc_cfg)) { + stream->timing.flags.DSC = 1; + DRM_DEBUG_DRIVER("end-to-end bandwidth require dsc and dsc config found\n"); + } else { + DRM_DEBUG_DRIVER("end-to-end bandwidth require dsc but dsc config not found\n"); + return DC_FAIL_BANDWIDTH_VALIDATE; + } + } } else { /* check if mode could be supported within full_pbn */ bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3; pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp << 4); - if (pbn > full_pbn) + if (pbn > aconnector->mst_output_port->full_pbn) return DC_FAIL_BANDWIDTH_VALIDATE; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index 116121e647..8a4c40b4c2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -1337,8 +1337,14 @@ static void amdgpu_dm_plane_drm_plane_reset(struct drm_plane *plane) amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL); WARN_ON(amdgpu_state == NULL); - if (amdgpu_state) - __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); + if (!amdgpu_state) + return; + + __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); + amdgpu_state->degamma_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; + amdgpu_state->hdr_mult = AMDGPU_HDR_MULT_DEFAULT; + amdgpu_state->shaper_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; + amdgpu_state->blend_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; } static struct drm_plane_state *amdgpu_dm_plane_drm_plane_duplicate_state(struct drm_plane *plane) @@ -1357,6 +1363,27 @@ static struct drm_plane_state *amdgpu_dm_plane_drm_plane_duplicate_state(struct dc_plane_state_retain(dm_plane_state->dc_state); } + if (old_dm_plane_state->degamma_lut) + dm_plane_state->degamma_lut = + drm_property_blob_get(old_dm_plane_state->degamma_lut); + if (old_dm_plane_state->ctm) + dm_plane_state->ctm = + drm_property_blob_get(old_dm_plane_state->ctm); + if (old_dm_plane_state->shaper_lut) + dm_plane_state->shaper_lut = + drm_property_blob_get(old_dm_plane_state->shaper_lut); + if (old_dm_plane_state->lut3d) + dm_plane_state->lut3d = + drm_property_blob_get(old_dm_plane_state->lut3d); + if (old_dm_plane_state->blend_lut) + dm_plane_state->blend_lut = + drm_property_blob_get(old_dm_plane_state->blend_lut); + + dm_plane_state->degamma_tf = old_dm_plane_state->degamma_tf; + dm_plane_state->hdr_mult = old_dm_plane_state->hdr_mult; + dm_plane_state->shaper_tf = old_dm_plane_state->shaper_tf; + dm_plane_state->blend_tf = old_dm_plane_state->blend_tf; + return &dm_plane_state->base; } @@ -1424,12 +1451,206 @@ static void amdgpu_dm_plane_drm_plane_destroy_state(struct drm_plane *plane, { struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); + if (dm_plane_state->degamma_lut) + drm_property_blob_put(dm_plane_state->degamma_lut); + if (dm_plane_state->ctm) + drm_property_blob_put(dm_plane_state->ctm); + if (dm_plane_state->lut3d) + drm_property_blob_put(dm_plane_state->lut3d); + if (dm_plane_state->shaper_lut) + drm_property_blob_put(dm_plane_state->shaper_lut); + if (dm_plane_state->blend_lut) + drm_property_blob_put(dm_plane_state->blend_lut); + if (dm_plane_state->dc_state) dc_plane_state_release(dm_plane_state->dc_state); drm_atomic_helper_plane_destroy_state(plane, state); } +#ifdef AMD_PRIVATE_COLOR +static void +dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager *dm, + struct drm_plane *plane) +{ + struct amdgpu_mode_info mode_info = dm->adev->mode_info; + struct dpp_color_caps dpp_color_caps = dm->dc->caps.color.dpp; + + /* Check HW color pipeline capabilities on DPP block (pre-blending) + * before exposing related properties. + */ + if (dpp_color_caps.dgam_ram || dpp_color_caps.gamma_corr) { + drm_object_attach_property(&plane->base, + mode_info.plane_degamma_lut_property, + 0); + drm_object_attach_property(&plane->base, + mode_info.plane_degamma_lut_size_property, + MAX_COLOR_LUT_ENTRIES); + drm_object_attach_property(&plane->base, + dm->adev->mode_info.plane_degamma_tf_property, + AMDGPU_TRANSFER_FUNCTION_DEFAULT); + } + /* HDR MULT is always available */ + drm_object_attach_property(&plane->base, + dm->adev->mode_info.plane_hdr_mult_property, + AMDGPU_HDR_MULT_DEFAULT); + + /* Only enable plane CTM if both DPP and MPC gamut remap is available. */ + if (dm->dc->caps.color.mpc.gamut_remap) + drm_object_attach_property(&plane->base, + dm->adev->mode_info.plane_ctm_property, 0); + + if (dpp_color_caps.hw_3d_lut) { + drm_object_attach_property(&plane->base, + mode_info.plane_shaper_lut_property, 0); + drm_object_attach_property(&plane->base, + mode_info.plane_shaper_lut_size_property, + MAX_COLOR_LUT_ENTRIES); + drm_object_attach_property(&plane->base, + mode_info.plane_shaper_tf_property, + AMDGPU_TRANSFER_FUNCTION_DEFAULT); + drm_object_attach_property(&plane->base, + mode_info.plane_lut3d_property, 0); + drm_object_attach_property(&plane->base, + mode_info.plane_lut3d_size_property, + MAX_COLOR_3DLUT_SIZE); + } + + if (dpp_color_caps.ogam_ram) { + drm_object_attach_property(&plane->base, + mode_info.plane_blend_lut_property, 0); + drm_object_attach_property(&plane->base, + mode_info.plane_blend_lut_size_property, + MAX_COLOR_LUT_ENTRIES); + drm_object_attach_property(&plane->base, + mode_info.plane_blend_tf_property, + AMDGPU_TRANSFER_FUNCTION_DEFAULT); + } +} + +static int +dm_atomic_plane_set_property(struct drm_plane *plane, + struct drm_plane_state *state, + struct drm_property *property, + uint64_t val) +{ + struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); + struct amdgpu_device *adev = drm_to_adev(plane->dev); + bool replaced = false; + int ret; + + if (property == adev->mode_info.plane_degamma_lut_property) { + ret = drm_property_replace_blob_from_id(plane->dev, + &dm_plane_state->degamma_lut, + val, -1, + sizeof(struct drm_color_lut), + &replaced); + dm_plane_state->base.color_mgmt_changed |= replaced; + return ret; + } else if (property == adev->mode_info.plane_degamma_tf_property) { + if (dm_plane_state->degamma_tf != val) { + dm_plane_state->degamma_tf = val; + dm_plane_state->base.color_mgmt_changed = 1; + } + } else if (property == adev->mode_info.plane_hdr_mult_property) { + if (dm_plane_state->hdr_mult != val) { + dm_plane_state->hdr_mult = val; + dm_plane_state->base.color_mgmt_changed = 1; + } + } else if (property == adev->mode_info.plane_ctm_property) { + ret = drm_property_replace_blob_from_id(plane->dev, + &dm_plane_state->ctm, + val, + sizeof(struct drm_color_ctm_3x4), -1, + &replaced); + dm_plane_state->base.color_mgmt_changed |= replaced; + return ret; + } else if (property == adev->mode_info.plane_shaper_lut_property) { + ret = drm_property_replace_blob_from_id(plane->dev, + &dm_plane_state->shaper_lut, + val, -1, + sizeof(struct drm_color_lut), + &replaced); + dm_plane_state->base.color_mgmt_changed |= replaced; + return ret; + } else if (property == adev->mode_info.plane_shaper_tf_property) { + if (dm_plane_state->shaper_tf != val) { + dm_plane_state->shaper_tf = val; + dm_plane_state->base.color_mgmt_changed = 1; + } + } else if (property == adev->mode_info.plane_lut3d_property) { + ret = drm_property_replace_blob_from_id(plane->dev, + &dm_plane_state->lut3d, + val, -1, + sizeof(struct drm_color_lut), + &replaced); + dm_plane_state->base.color_mgmt_changed |= replaced; + return ret; + } else if (property == adev->mode_info.plane_blend_lut_property) { + ret = drm_property_replace_blob_from_id(plane->dev, + &dm_plane_state->blend_lut, + val, -1, + sizeof(struct drm_color_lut), + &replaced); + dm_plane_state->base.color_mgmt_changed |= replaced; + return ret; + } else if (property == adev->mode_info.plane_blend_tf_property) { + if (dm_plane_state->blend_tf != val) { + dm_plane_state->blend_tf = val; + dm_plane_state->base.color_mgmt_changed = 1; + } + } else { + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n", + plane->base.id, plane->name, + property->base.id, property->name); + return -EINVAL; + } + + return 0; +} + +static int +dm_atomic_plane_get_property(struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, + uint64_t *val) +{ + struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); + struct amdgpu_device *adev = drm_to_adev(plane->dev); + + if (property == adev->mode_info.plane_degamma_lut_property) { + *val = (dm_plane_state->degamma_lut) ? + dm_plane_state->degamma_lut->base.id : 0; + } else if (property == adev->mode_info.plane_degamma_tf_property) { + *val = dm_plane_state->degamma_tf; + } else if (property == adev->mode_info.plane_hdr_mult_property) { + *val = dm_plane_state->hdr_mult; + } else if (property == adev->mode_info.plane_ctm_property) { + *val = (dm_plane_state->ctm) ? + dm_plane_state->ctm->base.id : 0; + } else if (property == adev->mode_info.plane_shaper_lut_property) { + *val = (dm_plane_state->shaper_lut) ? + dm_plane_state->shaper_lut->base.id : 0; + } else if (property == adev->mode_info.plane_shaper_tf_property) { + *val = dm_plane_state->shaper_tf; + } else if (property == adev->mode_info.plane_lut3d_property) { + *val = (dm_plane_state->lut3d) ? + dm_plane_state->lut3d->base.id : 0; + } else if (property == adev->mode_info.plane_blend_lut_property) { + *val = (dm_plane_state->blend_lut) ? + dm_plane_state->blend_lut->base.id : 0; + } else if (property == adev->mode_info.plane_blend_tf_property) { + *val = dm_plane_state->blend_tf; + + } else { + return -EINVAL; + } + + return 0; +} +#endif + static const struct drm_plane_funcs dm_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, @@ -1438,6 +1659,10 @@ static const struct drm_plane_funcs dm_plane_funcs = { .atomic_duplicate_state = amdgpu_dm_plane_drm_plane_duplicate_state, .atomic_destroy_state = amdgpu_dm_plane_drm_plane_destroy_state, .format_mod_supported = amdgpu_dm_plane_format_mod_supported, +#ifdef AMD_PRIVATE_COLOR + .atomic_set_property = dm_atomic_plane_set_property, + .atomic_get_property = dm_atomic_plane_get_property, +#endif }; int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, @@ -1517,6 +1742,9 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, drm_plane_helper_add(plane, &dm_plane_helper_funcs); +#ifdef AMD_PRIVATE_COLOR + dm_atomic_plane_attach_color_mgmt_properties(dm, plane); +#endif /* Create (reset) the plane state */ if (plane->funcs->reset) plane->funcs->reset(plane); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index 08ce3bb8f6..286ecd28cc 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -51,6 +51,9 @@ static bool link_supports_psrsu(struct dc_link *link) !link->dpcd_caps.psr_info.psr2_su_y_granularity_cap) return false; + if (amdgpu_dc_debug_mask & DC_DISABLE_PSR_SU) + return false; + return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub); } @@ -138,9 +141,8 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) * amdgpu_dm_psr_enable() - enable psr f/w * @stream: stream state * - * Return: true if success */ -bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) +void amdgpu_dm_psr_enable(struct dc_stream_state *stream) { struct dc_link *link = stream->link; unsigned int vsync_rate_hz = 0; @@ -187,7 +189,10 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1) power_opt |= psr_power_opt_z10_static_screen; - return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt); + dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt); + + if (link->ctx->dc->caps.ips_support) + dc_allow_idle_optimizations(link->ctx->dc, true); } /* diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h index 6806b3c9c8..1fdfd183c0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h @@ -32,7 +32,7 @@ #define AMDGPU_DM_PSR_ENTRY_DELAY 5 void amdgpu_dm_set_psr_caps(struct dc_link *link); -bool amdgpu_dm_psr_enable(struct dc_stream_state *stream); +void amdgpu_dm_psr_enable(struct dc_stream_state *stream); bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream); bool amdgpu_dm_psr_disable(struct dc_stream_state *stream); bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c new file mode 100644 index 0000000000..08c494a7a2 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services_types.h" + +#include "amdgpu.h" +#include "amdgpu_dm.h" +#include "amdgpu_dm_wb.h" +#include "amdgpu_display.h" +#include "dc.h" + +#include <drm/drm_edid.h> +#include <drm/drm_atomic_state_helper.h> +#include <drm/drm_modeset_helper_vtables.h> + +static const u32 amdgpu_dm_wb_formats[] = { + DRM_FORMAT_XRGB2101010, +}; + +static int amdgpu_dm_wb_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct drm_framebuffer *fb; + const struct drm_display_mode *mode = &crtc_state->mode; + bool found = false; + uint8_t i; + + if (!conn_state->writeback_job || !conn_state->writeback_job->fb) + return 0; + + fb = conn_state->writeback_job->fb; + if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) { + DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n", + fb->width, fb->height); + return -EINVAL; + } + + for (i = 0; i < sizeof(amdgpu_dm_wb_formats) / sizeof(u32); i++) { + if (fb->format->format == amdgpu_dm_wb_formats[i]) + found = true; + } + + if (!found) { + DRM_DEBUG_KMS("Invalid pixel format %p4cc\n", + &fb->format->format); + return -EINVAL; + } + + return 0; +} + + +static int amdgpu_dm_wb_connector_get_modes(struct drm_connector *connector) +{ + /* Maximum resolution supported by DWB */ + return drm_add_modes_noedid(connector, 3840, 2160); +} + +static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector, + struct drm_writeback_job *job) +{ + struct amdgpu_framebuffer *afb; + struct drm_gem_object *obj; + struct amdgpu_device *adev; + struct amdgpu_bo *rbo; + uint32_t domain; + int r; + + if (!job->fb) { + DRM_DEBUG_KMS("No FB bound\n"); + return 0; + } + + afb = to_amdgpu_framebuffer(job->fb); + obj = job->fb->obj[0]; + rbo = gem_to_amdgpu_bo(obj); + adev = amdgpu_ttm_adev(rbo->tbo.bdev); + + r = amdgpu_bo_reserve(rbo, true); + if (r) { + dev_err(adev->dev, "fail to reserve bo (%d)\n", r); + return r; + } + + r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1); + if (r) { + dev_err(adev->dev, "reserving fence slot failed (%d)\n", r); + goto error_unlock; + } + + domain = amdgpu_display_supported_domains(adev, rbo->flags); + + r = amdgpu_bo_pin(rbo, domain); + if (unlikely(r != 0)) { + if (r != -ERESTARTSYS) + DRM_ERROR("Failed to pin framebuffer with error %d\n", r); + goto error_unlock; + } + + r = amdgpu_ttm_alloc_gart(&rbo->tbo); + if (unlikely(r != 0)) { + DRM_ERROR("%p bind failed\n", rbo); + goto error_unpin; + } + + amdgpu_bo_unreserve(rbo); + + afb->address = amdgpu_bo_gpu_offset(rbo); + + amdgpu_bo_ref(rbo); + + return 0; + +error_unpin: + amdgpu_bo_unpin(rbo); + +error_unlock: + amdgpu_bo_unreserve(rbo); + return r; +} + +static void amdgpu_dm_wb_cleanup_job(struct drm_writeback_connector *connector, + struct drm_writeback_job *job) +{ + struct amdgpu_bo *rbo; + int r; + + if (!job->fb) + return; + + rbo = gem_to_amdgpu_bo(job->fb->obj[0]); + r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r)) { + DRM_ERROR("failed to reserve rbo before unpin\n"); + return; + } + + amdgpu_bo_unpin(rbo); + amdgpu_bo_unreserve(rbo); + amdgpu_bo_unref(&rbo); +} + +static const struct drm_encoder_helper_funcs amdgpu_dm_wb_encoder_helper_funcs = { + .atomic_check = amdgpu_dm_wb_encoder_atomic_check, +}; + +static const struct drm_connector_funcs amdgpu_dm_wb_connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .reset = amdgpu_dm_connector_funcs_reset, + .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static const struct drm_connector_helper_funcs amdgpu_dm_wb_conn_helper_funcs = { + .get_modes = amdgpu_dm_wb_connector_get_modes, + .prepare_writeback_job = amdgpu_dm_wb_prepare_job, + .cleanup_writeback_job = amdgpu_dm_wb_cleanup_job, +}; + +int amdgpu_dm_wb_connector_init(struct amdgpu_display_manager *dm, + struct amdgpu_dm_wb_connector *wbcon, + uint32_t link_index) +{ + struct dc *dc = dm->dc; + struct dc_link *link = dc_get_link_at_index(dc, link_index); + int res = 0; + + wbcon->link = link; + + drm_connector_helper_add(&wbcon->base.base, &amdgpu_dm_wb_conn_helper_funcs); + + res = drm_writeback_connector_init(&dm->adev->ddev, &wbcon->base, + &amdgpu_dm_wb_connector_funcs, + &amdgpu_dm_wb_encoder_helper_funcs, + amdgpu_dm_wb_formats, + ARRAY_SIZE(amdgpu_dm_wb_formats), + amdgpu_dm_get_encoder_crtc_mask(dm->adev)); + + if (res) + return res; + /* + * Some of the properties below require access to state, like bpc. + * Allocate some default initial connector state with our reset helper. + */ + if (wbcon->base.base.funcs->reset) + wbcon->base.base.funcs->reset(&wbcon->base.base); + + return 0; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h new file mode 100644 index 0000000000..13d31c857d --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __AMDGPU_DM_WB_H__ +#define __AMDGPU_DM_WB_H__ + +#include <drm/drm_writeback.h> + +int amdgpu_dm_wb_connector_init(struct amdgpu_display_manager *dm, + struct amdgpu_dm_wb_connector *dm_wbcon, + uint32_t link_index); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index 3a169b78e7..7991ae468f 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -22,7 +22,7 @@ # # Makefile for Display Core (dc) component. -DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc +DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc resource optc ifdef CONFIG_DRM_AMD_DC_FP @@ -34,12 +34,8 @@ DC_LIBS += dcn21 DC_LIBS += dcn201 DC_LIBS += dcn30 DC_LIBS += dcn301 -DC_LIBS += dcn302 -DC_LIBS += dcn303 DC_LIBS += dcn31 DC_LIBS += dcn314 -DC_LIBS += dcn315 -DC_LIBS += dcn316 DC_LIBS += dcn32 DC_LIBS += dcn321 DC_LIBS += dcn35 @@ -51,7 +47,6 @@ DC_LIBS += dce120 DC_LIBS += dce112 DC_LIBS += dce110 -DC_LIBS += dce100 DC_LIBS += dce80 ifdef CONFIG_DRM_AMD_DC_SI @@ -65,7 +60,7 @@ AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LI include $(AMD_DC) DISPLAY_CORE = dc.o dc_stat.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ -dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o +dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o dc_state.o DISPLAY_CORE += dc_vm_helper.o diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c index e295a839ab..1090d23508 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c +++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c @@ -103,7 +103,8 @@ void convert_float_matrix( static uint32_t find_gcd(uint32_t a, uint32_t b) { - uint32_t remainder = 0; + uint32_t remainder; + while (b != 0) { remainder = a % b; a = b; diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c index f2dfa96f9e..39530b2ea4 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c @@ -94,7 +94,7 @@ static void calculate_bandwidth( const uint32_t s_high = 7; const uint32_t dmif_chunk_buff_margin = 1; - uint32_t max_chunks_fbc_mode; + uint32_t max_chunks_fbc_mode = 0; int32_t num_cursor_lines; int32_t i, j, k; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index bc7a375f43..05f392501c 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -2223,22 +2223,22 @@ static enum bp_result bios_parser_get_disp_connector_caps_info( switch (bp->object_info_tbl.revision.minor) { case 4: - default: - object = get_bios_object(bp, object_id); - - if (!object) - return BP_RESULT_BADINPUT; - - record = get_disp_connector_caps_record(bp, object); - if (!record) - return BP_RESULT_NORECORD; - - info->INTERNAL_DISPLAY = - (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY) ? 1 : 0; - info->INTERNAL_DISPLAY_BL = - (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY_BL) ? 1 : 0; - break; - case 5: + default: + object = get_bios_object(bp, object_id); + + if (!object) + return BP_RESULT_BADINPUT; + + record = get_disp_connector_caps_record(bp, object); + if (!record) + return BP_RESULT_NORECORD; + + info->INTERNAL_DISPLAY = + (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY) ? 1 : 0; + info->INTERNAL_DISPLAY_BL = + (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY_BL) ? 1 : 0; + break; + case 5: object_path_v3 = get_bios_object_from_path_v3(bp, object_id); if (!object_path_v3) @@ -2400,7 +2400,6 @@ static enum bp_result get_vram_info_v30( return result; } - /* * get_integrated_info_v11 * @@ -3336,27 +3335,28 @@ static enum bp_result get_bracket_layout_record( DC_LOG_DETECTION_EDID_PARSER("Invalid slot_layout_info\n"); return BP_RESULT_BADINPUT; } + tbl = &bp->object_info_tbl; v1_4 = tbl->v1_4; v1_5 = tbl->v1_5; result = BP_RESULT_NORECORD; switch (bp->object_info_tbl.revision.minor) { - case 4: - default: - for (i = 0; i < v1_4->number_of_path; ++i) { - if (bracket_layout_id == - v1_4->display_path[i].display_objid) { - result = update_slot_layout_info(dcb, i, slot_layout_info); - break; - } + case 4: + default: + for (i = 0; i < v1_4->number_of_path; ++i) { + if (bracket_layout_id == v1_4->display_path[i].display_objid) { + result = update_slot_layout_info(dcb, i, slot_layout_info); + break; } - break; - case 5: - for (i = 0; i < v1_5->number_of_path; ++i) - result = update_slot_layout_info_v2(dcb, i, slot_layout_info); - break; + } + break; + case 5: + for (i = 0; i < v1_5->number_of_path; ++i) + result = update_slot_layout_info_v2(dcb, i, slot_layout_info); + break; } + return result; } @@ -3365,9 +3365,7 @@ static enum bp_result bios_get_board_layout_info( struct board_layout_info *board_layout_info) { unsigned int i; - struct bios_parser *bp; - static enum bp_result record_result; unsigned int max_slots; @@ -3377,7 +3375,6 @@ static enum bp_result bios_get_board_layout_info( 0, 0 }; - bp = BP_FROM_DCB(dcb); if (board_layout_info == NULL) { @@ -3558,7 +3555,6 @@ static const struct dc_vbios_funcs vbios_funcs = { .bios_parser_destroy = firmware_parser_destroy, .get_board_layout_info = bios_get_board_layout_info, - /* TODO: use this fn in hw init?*/ .pack_data_tables = bios_parser_pack_data_tables, .get_atom_dc_golden_table = bios_get_atom_dc_golden_table, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 3e73c4e59d..28a2a837d2 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -29,6 +29,7 @@ #include "dc_types.h" #include "dccg.h" #include "clk_mgr_internal.h" +#include "dc_state_priv.h" #include "link.h" #include "dce100/dce_clk_mgr.h" @@ -63,7 +64,7 @@ int clk_mgr_helper_get_active_display_cnt( /* Don't count SubVP phantom pipes as part of active * display count */ - if (stream->mall_stream_config.type == SUBVP_PHANTOM) + if (dc_state_get_stream_subvp_type(context, stream) == SUBVP_PHANTOM) continue; /* @@ -368,7 +369,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p } break; -#endif /* CONFIG_DRM_AMD_DC_FP - Family RV */ +#endif /* CONFIG_DRM_AMD_DC_FP */ default: ASSERT(0); /* Unknown Asic */ break; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c index a5489fe687..aa9fd1dc55 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c @@ -546,6 +546,8 @@ static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_ta int i; for (i = 0; i < VG_NUM_SOC_VOLTAGE_LEVELS; i++) { + if (i >= VG_NUM_DCFCLK_DPM_LEVELS) + break; if (clock_table->SocVoltage[i] == voltage) return clock_table->DcfClocks[i]; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index 12f3e8aa46..6ad4f4efec 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -99,20 +99,25 @@ static int dcn316_get_active_display_cnt_wa( return display_count; } -static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable) +static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, + bool safe_to_lower, bool disable) { struct dc *dc = clk_mgr_base->ctx->dc; int i; for (i = 0; i < dc->res_pool->pipe_count; ++i) { - struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe = safe_to_lower + ? &context->res_ctx.pipe_ctx[i] + : &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->top_pipe || pipe->prev_odm_pipe) continue; - if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL || - dc_is_virtual_signal(pipe->stream->signal))) { + if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) || + !pipe->stream->link_enc)) { if (disable) { - pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); + if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc) + pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); + reset_sync_context_for_pipe(dc, context, i); } else pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); @@ -207,11 +212,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, } if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { - dcn316_disable_otg_wa(clk_mgr_base, context, true); + dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); - dcn316_disable_otg_wa(clk_mgr_base, context, false); + dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false); update_dispclk = true; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index 289918ea72..bbdbc78161 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -25,7 +25,6 @@ #include "dccg.h" #include "clk_mgr_internal.h" - #include "dcn32/dcn32_clk_mgr_smu_msg.h" #include "dcn20/dcn20_clk_mgr.h" #include "dce100/dce_clk_mgr.h" @@ -34,7 +33,7 @@ #include "core_types.h" #include "dm_helpers.h" #include "link.h" - +#include "dc_state_priv.h" #include "atomfirmware.h" #include "smu13_driver_if.h" @@ -472,20 +471,56 @@ static int dcn32_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base) return 0; } -static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr_internal *clk_mgr) +static bool dcn32_check_native_scaling(struct pipe_ctx *pipe) { - unsigned int dispclk_khz_reg = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK - unsigned int dppclk_khz_reg = REG_READ(CLK1_CLK1_CURRENT_CNT); // DPPCLK - unsigned int dprefclk_khz_reg = REG_READ(CLK1_CLK2_CURRENT_CNT); // DPREFCLK - unsigned int dcfclk_khz_reg = REG_READ(CLK1_CLK3_CURRENT_CNT); // DCFCLK - unsigned int dtbclk_khz_reg = REG_READ(CLK1_CLK4_CURRENT_CNT); // DTBCLK - unsigned int fclk_khz_reg = REG_READ(CLK4_CLK0_CURRENT_CNT); // FCLK + bool is_native_scaling = false; + int width = pipe->plane_state->src_rect.width; + int height = pipe->plane_state->src_rect.height; + + if (pipe->stream->timing.h_addressable == width && + pipe->stream->timing.v_addressable == height && + pipe->plane_state->dst_rect.width == width && + pipe->plane_state->dst_rect.height == height) + is_native_scaling = true; + + return is_native_scaling; +} + +static void dcn32_auto_dpm_test_log( + struct dc_clocks *new_clocks, + struct clk_mgr_internal *clk_mgr, + struct dc_state *context) +{ + unsigned int dispclk_khz_reg, dppclk_khz_reg, dprefclk_khz_reg, dcfclk_khz_reg, dtbclk_khz_reg, + fclk_khz_reg, mall_ss_size_bytes; + int dramclk_khz_override, fclk_khz_override, num_fclk_levels; + + struct pipe_ctx *pipe_ctx_list[MAX_PIPES]; + int active_pipe_count = 0; + + for (int i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) { + pipe_ctx_list[active_pipe_count] = pipe_ctx; + active_pipe_count++; + } + } + + mall_ss_size_bytes = context->bw_ctx.bw.dcn.mall_ss_size_bytes; + + dispclk_khz_reg = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK + dppclk_khz_reg = REG_READ(CLK1_CLK1_CURRENT_CNT); // DPPCLK + dprefclk_khz_reg = REG_READ(CLK1_CLK2_CURRENT_CNT); // DPREFCLK + dcfclk_khz_reg = REG_READ(CLK1_CLK3_CURRENT_CNT); // DCFCLK + dtbclk_khz_reg = REG_READ(CLK1_CLK4_CURRENT_CNT); // DTBCLK + fclk_khz_reg = REG_READ(CLK4_CLK0_CURRENT_CNT); // FCLK // Overrides for these clocks in case there is no p_state change support - int dramclk_khz_override = new_clocks->dramclk_khz; - int fclk_khz_override = new_clocks->fclk_khz; + dramclk_khz_override = new_clocks->dramclk_khz; + fclk_khz_override = new_clocks->fclk_khz; - int num_fclk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1; + num_fclk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1; if (!new_clocks->p_state_change_support) { dramclk_khz_override = clk_mgr->base.bw_params->max_memclk_mhz * 1000; @@ -502,16 +537,49 @@ static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr // // AutoDPMTest: clk1:%d - clk2:%d - clk3:%d - clk4:%d\n" //////////////////////////////////////////////////////////////////////////// - if (new_clocks && + if (new_clocks && active_pipe_count > 0 && new_clocks->dramclk_khz > 0 && new_clocks->fclk_khz > 0 && new_clocks->dcfclk_khz > 0 && new_clocks->dppclk_khz > 0) { + uint32_t pix_clk_list[MAX_PIPES] = {0}; + int p_state_list[MAX_PIPES] = {0}; + int disp_src_width_list[MAX_PIPES] = {0}; + int disp_src_height_list[MAX_PIPES] = {0}; + uint64_t disp_src_refresh_list[MAX_PIPES] = {0}; + bool is_scaled_list[MAX_PIPES] = {0}; + + for (int i = 0; i < active_pipe_count; i++) { + struct pipe_ctx *curr_pipe_ctx = pipe_ctx_list[i]; + uint64_t refresh_rate; + + pix_clk_list[i] = curr_pipe_ctx->stream->timing.pix_clk_100hz; + p_state_list[i] = curr_pipe_ctx->p_state_type; + + refresh_rate = (curr_pipe_ctx->stream->timing.pix_clk_100hz * (uint64_t)100 + + curr_pipe_ctx->stream->timing.v_total * curr_pipe_ctx->stream->timing.h_total - (uint64_t)1); + refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.v_total); + refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.h_total); + disp_src_refresh_list[i] = refresh_rate; + + if (curr_pipe_ctx->plane_state) { + is_scaled_list[i] = !(dcn32_check_native_scaling(curr_pipe_ctx)); + disp_src_width_list[i] = curr_pipe_ctx->plane_state->src_rect.width; + disp_src_height_list[i] = curr_pipe_ctx->plane_state->src_rect.height; + } + } + DC_LOG_AUTO_DPM_TEST("AutoDPMTest: dramclk:%d - fclk:%d - " "dcfclk:%d - dppclk:%d - dispclk_hw:%d - " "dppclk_hw:%d - dprefclk_hw:%d - dcfclk_hw:%d - " - "dtbclk_hw:%d - fclk_hw:%d\n", + "dtbclk_hw:%d - fclk_hw:%d - pix_clk_0:%d - pix_clk_1:%d - " + "pix_clk_2:%d - pix_clk_3:%d - mall_ss_size:%d - p_state_type_0:%d - " + "p_state_type_1:%d - p_state_type_2:%d - p_state_type_3:%d - " + "pix_width_0:%d - pix_height_0:%d - refresh_rate_0:%lld - is_scaled_0:%d - " + "pix_width_1:%d - pix_height_1:%d - refresh_rate_1:%lld - is_scaled_1:%d - " + "pix_width_2:%d - pix_height_2:%d - refresh_rate_2:%lld - is_scaled_2:%d - " + "pix_width_3:%d - pix_height_3:%d - refresh_rate_3:%lld - is_scaled_3:%d - LOG_END\n", dramclk_khz_override, fclk_khz_override, new_clocks->dcfclk_khz, @@ -521,7 +589,14 @@ static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr dprefclk_khz_reg, dcfclk_khz_reg, dtbclk_khz_reg, - fclk_khz_reg); + fclk_khz_reg, + pix_clk_list[0], pix_clk_list[1], pix_clk_list[3], pix_clk_list[2], + mall_ss_size_bytes, + p_state_list[0], p_state_list[1], p_state_list[2], p_state_list[3], + disp_src_width_list[0], disp_src_height_list[0], disp_src_refresh_list[0], is_scaled_list[0], + disp_src_width_list[1], disp_src_height_list[1], disp_src_refresh_list[1], is_scaled_list[1], + disp_src_width_list[2], disp_src_height_list[2], disp_src_refresh_list[2], is_scaled_list[2], + disp_src_width_list[3], disp_src_height_list[3], disp_src_refresh_list[3], is_scaled_list[3]); } } @@ -694,6 +769,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, /* DCCG requires KHz precision for DTBCLK */ clk_mgr_base->clks.ref_dtbclk_khz = dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DTBCLK, khz_to_mhz_ceil(new_clocks->ref_dtbclk_khz)); + dcn32_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz); } @@ -722,7 +798,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, clk_mgr_base->clks.dispclk_khz / 1000 / 7); if (dc->config.enable_auto_dpm_test_logs) { - dcn32_auto_dpm_test_log(new_clocks, clk_mgr); + dcn32_auto_dpm_test_log(new_clocks, clk_mgr, context); } } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index 353d5fb9e3..9cbab880c6 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -50,6 +50,7 @@ #include "dc_dmub_srv.h" #include "link.h" #include "logger_types.h" + #undef DC_LOGGER #define DC_LOGGER \ clk_mgr->base.base.ctx->logger @@ -80,12 +81,12 @@ static int dcn35_get_active_display_cnt_wa( struct dc *dc, - struct dc_state *context) + struct dc_state *context, + int *all_active_disps) { - int i, display_count; + int i, display_count = 0; bool tmds_present = false; - display_count = 0; for (i = 0; i < context->stream_count; i++) { const struct dc_stream_state *stream = context->streams[i]; @@ -103,7 +104,8 @@ static int dcn35_get_active_display_cnt_wa( link->link_enc->funcs->is_dig_enabled(link->link_enc)) display_count++; } - + if (all_active_disps != NULL) + *all_active_disps = display_count; /* WA for hang on HDMI after display off back on*/ if (display_count == 0 && tmds_present) display_count = 1; @@ -216,15 +218,16 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct dc *dc = clk_mgr_base->ctx->dc; - int display_count; + int display_count = 0; bool update_dppclk = false; bool update_dispclk = false; bool dpp_clock_lowered = false; + int all_active_disps = 0; if (dc->work_arounds.skip_clock_update) return; - /* DTBCLK is fixed, so set a default if unspecified. */ + display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps); if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz) new_clocks->ref_dtbclk_khz = 600000; @@ -246,7 +249,6 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, } /* check that we're not already in lower */ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { - display_count = dcn35_get_active_display_cnt_wa(dc, context); /* if we can go lower, go lower */ if (display_count == 0) clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; @@ -382,19 +384,6 @@ static void dcn35_enable_pme_wa(struct clk_mgr *clk_mgr_base) dcn35_smu_enable_pme_wa(clk_mgr); } -void dcn35_init_clocks(struct clk_mgr *clk_mgr) -{ - uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; - - memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); - - // Assumption is that boot state always supports pstate - clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk - clk_mgr->clks.p_state_change_support = true; - clk_mgr->clks.prev_p_state_change_support = true; - clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; - clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN; -} bool dcn35_are_clock_states_equal(struct dc_clocks *a, struct dc_clocks *b) @@ -416,11 +405,22 @@ bool dcn35_are_clock_states_equal(struct dc_clocks *a, } static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, - struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) + struct clk_mgr_dcn35 *clk_mgr) { - } +void dcn35_init_clocks(struct clk_mgr *clk_mgr) +{ + uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; + + memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); + // Assumption is that boot state always supports pstate + clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk + clk_mgr->clks.p_state_change_support = true; + clk_mgr->clks.prev_p_state_change_support = true; + clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; + clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN; +} static struct clk_bw_params dcn35_bw_params = { .vram_type = Ddr4MemType, .num_channels = 1, @@ -436,32 +436,32 @@ static struct wm_table ddr5_wm_table = { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, } @@ -473,32 +473,32 @@ static struct wm_table lpddr5_wm_table = { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, } @@ -825,7 +825,7 @@ static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base) struct dc_state *context = dc->current_state; if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { - display_count = dcn35_get_active_display_cnt_wa(dc, context); + display_count = dcn35_get_active_display_cnt_wa(dc, context, NULL); /* if we can go lower, go lower */ if (display_count == 0) clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; @@ -992,7 +992,6 @@ void dcn35_clk_mgr_construct( struct dccg *dccg) { struct dcn35_smu_dpm_clks smu_dpm_clks = { 0 }; - struct clk_log_info log_info = {0}; clk_mgr->base.base.ctx = ctx; clk_mgr->base.base.funcs = &dcn35_funcs; @@ -1045,7 +1044,7 @@ void dcn35_clk_mgr_construct( dcn35_bw_params.wm_table = ddr5_wm_table; } /* Saved clocks configured at boot for debug purposes */ - dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info); + dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr); clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base); clk_mgr->base.base.clks.ref_dtbclk_khz = 600000; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c index b6b8c3ca15..6d4a1ffab5 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c @@ -116,6 +116,9 @@ static uint32_t dcn35_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un msleep(delay_us/1000); else if (delay_us > 0) udelay(delay_us); + + if (clk_mgr->base.ctx->dc->debug.disable_timeout) + max_retries++; } while (max_retries--); return res_val; @@ -276,7 +279,7 @@ void dcn35_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr, u clk_mgr, VBIOSSMC_MSG_SetDisplayIdleOptimizations, idle_info); - smu_print("VBIOSSMC_MSG_SetDisplayIdleOptimizations idle_info = %d\n", idle_info); + smu_print("%s: VBIOSSMC_MSG_SetDisplayIdleOptimizations idle_info = %x\n", __func__, idle_info); } void dcn35_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable) @@ -295,7 +298,7 @@ void dcn35_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool e clk_mgr, VBIOSSMC_MSG_SetDisplayIdleOptimizations, idle_info.data); - smu_print("dcn35_smu_enable_phy_refclk_pwrdwn = %d\n", enable ? 1 : 0); + smu_print("%s smu_enable_phy_refclk_pwrdwn = %d\n", __func__, enable ? 1 : 0); } void dcn35_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr) @@ -307,6 +310,7 @@ void dcn35_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr) clk_mgr, VBIOSSMC_MSG_UpdatePmeRestore, 0); + smu_print("%s: SMC_MSG_UpdatePmeRestore\n", __func__); } void dcn35_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high) @@ -347,7 +351,7 @@ void dcn35_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr) void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zstate_support_state support) { - unsigned int msg_id, param; + unsigned int msg_id, param, retv; if (!clk_mgr->smu_present) return; @@ -357,27 +361,32 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst case DCN_ZSTATE_SUPPORT_ALLOW: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = (1 << 10) | (1 << 9) | (1 << 8); + smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW, param = %d\n", __func__, param); break; case DCN_ZSTATE_SUPPORT_DISALLOW: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = 0; + smu_print("%s: SMC_MSG_AllowZstatesEntry msg_id = DISALLOW, param = %d\n", __func__, param); break; case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = (1 << 10); + smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z10_ONLY, param = %d\n", __func__, param); break; case DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = (1 << 10) | (1 << 8); + smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_Z10_ONLY, param = %d\n", __func__, param); break; case DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = (1 << 8); + smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_ONLY, param = %d\n", __func__, param); break; default: //DCN_ZSTATE_SUPPORT_UNKNOWN @@ -387,11 +396,11 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst } - dcn35_smu_send_msg_with_param( + retv = dcn35_smu_send_msg_with_param( clk_mgr, msg_id, param); - smu_print("dcn35_smu_set_zstate_support msg_id = %d, param = %d\n", msg_id, param); + smu_print("%s: msg_id = %d, param = 0x%x, return = %d\n", __func__, msg_id, param, retv); } int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr) @@ -405,7 +414,7 @@ int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr) VBIOSSMC_MSG_GetDprefclkFreq, 0); - smu_print("dcn35_smu_get_DPREF clk = %d mhz\n", dprefclk); + smu_print("%s: SMU DPREF clk = %d mhz\n", __func__, dprefclk); return dprefclk * 1000; } @@ -420,7 +429,7 @@ int dcn35_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr) VBIOSSMC_MSG_GetDtbclkFreq, 0); - smu_print("dcn35_smu_get_dtbclk = %d mhz\n", dtbclk); + smu_print("%s: get_dtbclk = %dmhz\n", __func__, dtbclk); return dtbclk * 1000; } /* Arg = 1: Turn DTB on; 0: Turn DTB CLK OFF. when it is on, it is 600MHZ */ @@ -433,7 +442,7 @@ void dcn35_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable) clk_mgr, VBIOSSMC_MSG_SetDtbClk, enable); - smu_print("dcn35_smu_set_dtbclk = %d \n", enable ? 1 : 0); + smu_print("%s: smu_set_dtbclk = %d\n", __func__, enable ? 1 : 0); } void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable) @@ -442,30 +451,45 @@ void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *cl clk_mgr, VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown, enable); + smu_print("%s: smu_enable_48mhz_tmdp_refclk_pwrdwn = %d\n", __func__, enable ? 1 : 0); } int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr) { - return dcn35_smu_send_msg_with_param( + int retv; + + retv = dcn35_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_DispPsrExit, 0); + smu_print("%s: smu_exit_low_power_state return = %d\n", __func__, retv); + return retv; } int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr) { - return dcn35_smu_send_msg_with_param( + int retv; + + retv = dcn35_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_QueryIPS2Support, 0); + + //smu_print("%s: VBIOSSMC_MSG_QueryIPS2Support return = %x\n", __func__, retv); + return retv; } void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param) { REG_WRITE(MP1_SMN_C2PMSG_71, param); + //smu_print("%s: write_ips_scratch = %x\n", __func__, param); } uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr) { - return REG_READ(MP1_SMN_C2PMSG_71); + uint32_t retv; + + retv = REG_READ(MP1_SMN_C2PMSG_71); + //smu_print("%s: dcn35_smu_read_ips_scratch = %x\n", __func__, retv); + return retv; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index b51208f44c..3c3d613c5f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -34,6 +34,8 @@ #include "dce/dce_hwseq.h" #include "resource.h" +#include "dc_state.h" +#include "dc_state_priv.h" #include "gpio_service_interface.h" #include "clk_mgr.h" @@ -409,9 +411,14 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, * avoid conflicting with firmware updates. */ if (dc->ctx->dce_version > DCE_VERSION_MAX) - if (dc->optimized_required || dc->wm_optimized_required) + if (dc->optimized_required) return false; + if (!memcmp(&stream->adjust, adjust, sizeof(*adjust))) + return true; + + dc_exit_ips_for_hw_access(dc); + stream->adjust.v_total_max = adjust->v_total_max; stream->adjust.v_total_mid = adjust->v_total_mid; stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; @@ -452,6 +459,8 @@ bool dc_stream_get_last_used_drr_vtotal(struct dc *dc, int i = 0; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -482,6 +491,8 @@ bool dc_stream_get_crtc_position(struct dc *dc, bool ret = false; struct crtc_position position; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -601,6 +612,8 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, if (pipe == NULL) return false; + dc_exit_ips_for_hw_access(dc); + /* By default, capture the full frame */ param.windowa_x_start = 0; param.windowa_y_start = 0; @@ -660,6 +673,8 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, struct pipe_ctx *pipe; struct timing_generator *tg; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream == stream) @@ -684,6 +699,8 @@ void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, int i; struct pipe_ctx *pipe_ctx; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { @@ -719,6 +736,8 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream, if (option > DITHER_OPTION_MAX) return; + dc_exit_ips_for_hw_access(stream->ctx->dc); + stream->dither_option = option; memset(¶ms, 0, sizeof(params)); @@ -743,6 +762,8 @@ bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stre bool ret = false; struct pipe_ctx *pipes; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { pipes = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -760,6 +781,8 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) bool ret = false; struct pipe_ctx *pipes; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { @@ -786,6 +809,8 @@ void dc_stream_set_static_screen_params(struct dc *dc, struct pipe_ctx *pipes_affected[MAX_PIPES]; int num_pipes_affected = 0; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < num_streams; i++) { struct dc_stream_state *stream = streams[i]; @@ -808,7 +833,7 @@ static void dc_destruct(struct dc *dc) link_enc_cfg_init(dc, dc->current_state); if (dc->current_state) { - dc_release_state(dc->current_state); + dc_state_release(dc->current_state); dc->current_state = NULL; } @@ -1020,29 +1045,27 @@ static bool dc_construct(struct dc *dc, } #endif + if (!create_links(dc, init_params->num_virtual_links)) + goto fail; + + /* Create additional DIG link encoder objects if fewer than the platform + * supports were created during link construction. + */ + if (!create_link_encoders(dc)) + goto fail; + /* Creation of current_state must occur after dc->dml * is initialized in dc_create_resource_pool because * on creation it copies the contents of dc->dml */ - dc->current_state = dc_create_state(dc); + dc->current_state = dc_state_create(dc); if (!dc->current_state) { dm_error("%s: failed to create validate ctx\n", __func__); goto fail; } - if (!create_links(dc, init_params->num_virtual_links)) - goto fail; - - /* Create additional DIG link encoder objects if fewer than the platform - * supports were created during link construction. - */ - if (!create_link_encoders(dc)) - goto fail; - - dc_resource_state_construct(dc, dc->current_state); - return true; fail: @@ -1085,7 +1108,7 @@ static void apply_ctx_interdependent_lock(struct dc *dc, } } -static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) +static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) { if (dc->ctx->dce_version >= DCN_VERSION_1_0) { memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color)); @@ -1105,9 +1128,9 @@ static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *conte if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP) - get_subvp_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); + get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) - get_mclk_switch_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); + get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); } } } @@ -1115,7 +1138,7 @@ static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *conte static void disable_dangling_plane(struct dc *dc, struct dc_state *context) { int i, j; - struct dc_state *dangling_context = dc_create_state(dc); + struct dc_state *dangling_context = dc_state_create_current_copy(dc); struct dc_state *current_ctx; struct pipe_ctx *pipe; struct timing_generator *tg; @@ -1123,8 +1146,6 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) if (dangling_context == NULL) return; - dc_resource_state_copy_construct(dc->current_state, dangling_context); - for (i = 0; i < dc->res_pool->pipe_count; i++) { struct dc_stream_state *old_stream = dc->current_state->res_ctx.pipe_ctx[i].stream; @@ -1161,6 +1182,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) } if (should_disable && old_stream) { + bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM; pipe = &dc->current_state->res_ctx.pipe_ctx[i]; tg = pipe->stream_res.tg; /* When disabling plane for a phantom pipe, we must turn on the @@ -1169,22 +1191,29 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) * state that can result in underflow or hang when enabling it * again for different use. */ - if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (is_phantom) { if (tg->funcs->enable_crtc) { int main_pipe_width, main_pipe_height; + struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream); - main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width; - main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height; + main_pipe_width = old_paired_stream->dst.width; + main_pipe_height = old_paired_stream->dst.height; if (dc->hwss.blank_phantom) dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height); tg->funcs->enable_crtc(tg); } } - dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); + + if (is_phantom) + dc_state_rem_all_phantom_planes_for_stream(dc, old_stream, dangling_context, true); + else + dc_state_rem_all_planes_for_stream(dc, old_stream, dangling_context); disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); - if (pipe->stream && pipe->plane_state) - dc_update_viusal_confirm_color(dc, context, pipe); + if (pipe->stream && pipe->plane_state) { + set_p_state_switch_method(dc, context, pipe); + dc_update_visual_confirm_color(dc, context, pipe); + } if (dc->hwss.apply_ctx_for_surface) { apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true); @@ -1203,7 +1232,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) * The OTG is set to disable on falling edge of VUPDATE so the plane disable * will still get it's double buffer update. */ - if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (is_phantom) { if (tg->funcs->disable_phantom_crtc) tg->funcs->disable_phantom_crtc(tg); } @@ -1212,7 +1241,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) current_ctx = dc->current_state; dc->current_state = dangling_context; - dc_release_state(current_ctx); + dc_state_release(current_ctx); } static void disable_vbios_mode_if_required( @@ -1276,6 +1305,54 @@ static void disable_vbios_mode_if_required( } } +/** + * wait_for_blank_complete - wait for all active OPPs to finish pending blank + * pattern updates + * + * @dc: [in] dc reference + * @context: [in] hardware context in use + */ +static void wait_for_blank_complete(struct dc *dc, + struct dc_state *context) +{ + struct pipe_ctx *opp_head; + struct dce_hwseq *hws = dc->hwseq; + int i; + + if (!hws->funcs.wait_for_blank_complete) + return; + + for (i = 0; i < MAX_PIPES; i++) { + opp_head = &context->res_ctx.pipe_ctx[i]; + + if (!resource_is_pipe_type(opp_head, OPP_HEAD) || + dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM) + continue; + + hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp); + } +} + +static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context) +{ + struct pipe_ctx *otg_master; + struct timing_generator *tg; + int i; + + for (i = 0; i < MAX_PIPES; i++) { + otg_master = &context->res_ctx.pipe_ctx[i]; + if (!resource_is_pipe_type(otg_master, OTG_MASTER) || + dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM) + continue; + tg = otg_master->stream_res.tg; + if (tg->funcs->wait_odm_doublebuffer_pending_clear) + tg->funcs->wait_odm_doublebuffer_pending_clear(tg); + } + + /* ODM update may require to reprogram blank pattern for each OPP */ + wait_for_blank_complete(dc, context); +} + static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) { int i; @@ -1284,7 +1361,7 @@ static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) int count = 0; struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) + if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) continue; /* Timeout 100 ms */ @@ -1510,7 +1587,7 @@ static void program_timing_sync( } for (k = 0; k < group_size; k++) { - struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream); + struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream); status->timing_sync_info.group_id = num_group; status->timing_sync_info.group_size = group_size; @@ -1521,7 +1598,7 @@ static void program_timing_sync( } - /* remove any other pipes that are already been synced */ + /* remove any other unblanked pipes as they have already been synced */ if (dc->config.use_pipe_ctx_sync_logic) { /* check pipe's syncd to decide which pipe to be removed */ for (j = 1; j < group_size; j++) { @@ -1534,6 +1611,7 @@ static void program_timing_sync( pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd; } } else { + /* remove any other pipes by checking valid plane */ for (j = j + 1; j < group_size; j++) { bool is_blanked; @@ -1554,7 +1632,7 @@ static void program_timing_sync( if (group_size > 1) { if (sync_type == TIMING_SYNCHRONIZABLE) { dc->hwss.enable_timing_synchronization( - dc, group_index, group_size, pipe_set); + dc, ctx, group_index, group_size, pipe_set); } else if (sync_type == VBLANK_SYNCHRONIZABLE) { dc->hwss.enable_vblanks_synchronization( @@ -1759,6 +1837,8 @@ void dc_enable_stereo( int i, j; struct pipe_ctx *pipe; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { if (context != NULL) { pipe = &context->res_ctx.pipe_ctx[i]; @@ -1778,6 +1858,8 @@ void dc_enable_stereo( void dc_trigger_sync(struct dc *dc, struct dc_state *context) { if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { + dc_exit_ips_for_hw_access(dc); + enable_timing_multisync(dc, context); program_timing_sync(dc, context); } @@ -1836,7 +1918,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; /* Check old context for SubVP */ - subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM); + subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM); if (subvp_prev_use) break; } @@ -1962,6 +2044,11 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c context->stream_count == 0) { /* Must wait for no flips to be pending before doing optimize bw */ wait_for_no_pipes_pending(dc, context); + /* + * optimized dispclk depends on ODM setup. Need to wait for ODM + * update pending complete before optimizing bandwidth. + */ + wait_for_odm_update_pending_complete(dc, context); /* pplib is notified if disp_num changed */ dc->hwss.optimize_bandwidth(dc, context); /* Need to do otg sync again as otg could be out of sync due to otg @@ -1994,9 +2081,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c old_state = dc->current_state; dc->current_state = context; - dc_release_state(old_state); + dc_state_release(old_state); - dc_retain_state(dc->current_state); + dc_state_retain(dc->current_state); return result; } @@ -2034,6 +2121,8 @@ enum dc_status dc_commit_streams(struct dc *dc, if (!streams_changed(dc, streams, stream_count)) return res; + dc_exit_ips_for_hw_access(dc); + DC_LOG_DC("%s: %d streams\n", __func__, stream_count); for (i = 0; i < stream_count; i++) { @@ -2067,12 +2156,10 @@ enum dc_status dc_commit_streams(struct dc *dc, if (handle_exit_odm2to1) res = commit_minimal_transition_state(dc, dc->current_state); - context = dc_create_state(dc); + context = dc_state_create_current_copy(dc); if (!context) goto context_alloc_fail; - dc_resource_state_copy_construct_current(dc, context); - res = dc_validate_with_context(dc, set, stream_count, context, false); if (res != DC_OK) { BREAK_TO_DEBUGGER(); @@ -2087,7 +2174,7 @@ enum dc_status dc_commit_streams(struct dc *dc, streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst; if (dc_is_embedded_signal(streams[i]->signal)) { - struct dc_stream_status *status = dc_stream_get_status_from_state(context, streams[i]); + struct dc_stream_status *status = dc_state_get_stream_status(context, streams[i]); if (dc->hwss.is_abm_supported) status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]); @@ -2098,7 +2185,7 @@ enum dc_status dc_commit_streams(struct dc *dc, } fail: - dc_release_state(context); + dc_state_release(context); context_alloc_fail: @@ -2152,7 +2239,7 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) pipe = &context->res_ctx.pipe_ctx[i]; // Don't check flip pending on phantom pipes - if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)) + if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)) continue; /* Must set to false to start with, due to OR in update function */ @@ -2210,7 +2297,7 @@ void dc_post_update_surfaces_to_stream(struct dc *dc) if (context->res_ctx.pipe_ctx[i].stream == NULL || context->res_ctx.pipe_ctx[i].plane_state == NULL) { context->res_ctx.pipe_ctx[i].pipe_idx = i; - dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]); + dc->hwss.disable_plane(dc, context, &context->res_ctx.pipe_ctx[i]); } process_deferred_updates(dc); @@ -2222,104 +2309,6 @@ void dc_post_update_surfaces_to_stream(struct dc *dc) } dc->optimized_required = false; - dc->wm_optimized_required = false; -} - -static void init_state(struct dc *dc, struct dc_state *context) -{ - /* Each context must have their own instance of VBA and in order to - * initialize and obtain IP and SOC the base DML instance from DC is - * initially copied into every context - */ - memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); -} - -struct dc_state *dc_create_state(struct dc *dc) -{ - struct dc_state *context = kvzalloc(sizeof(struct dc_state), - GFP_KERNEL); - - if (!context) - return NULL; - - init_state(dc, context); - -#ifdef CONFIG_DRM_AMD_DC_FP - if (dc->debug.using_dml2) { - dml2_create(dc, &dc->dml2_options, &context->bw_ctx.dml2); - } -#endif - kref_init(&context->refcount); - - return context; -} - -struct dc_state *dc_copy_state(struct dc_state *src_ctx) -{ - int i, j; - struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL); - - if (!new_ctx) - return NULL; - memcpy(new_ctx, src_ctx, sizeof(struct dc_state)); - -#ifdef CONFIG_DRM_AMD_DC_FP - if (new_ctx->bw_ctx.dml2 && !dml2_create_copy(&new_ctx->bw_ctx.dml2, src_ctx->bw_ctx.dml2)) { - dc_release_state(new_ctx); - return NULL; - } -#endif - - for (i = 0; i < MAX_PIPES; i++) { - struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i]; - - if (cur_pipe->top_pipe) - cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx]; - - if (cur_pipe->bottom_pipe) - cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx]; - - if (cur_pipe->prev_odm_pipe) - cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx]; - - if (cur_pipe->next_odm_pipe) - cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx]; - - } - - for (i = 0; i < new_ctx->stream_count; i++) { - dc_stream_retain(new_ctx->streams[i]); - for (j = 0; j < new_ctx->stream_status[i].plane_count; j++) - dc_plane_state_retain( - new_ctx->stream_status[i].plane_states[j]); - } - - kref_init(&new_ctx->refcount); - - return new_ctx; -} - -void dc_retain_state(struct dc_state *context) -{ - kref_get(&context->refcount); -} - -static void dc_state_free(struct kref *kref) -{ - struct dc_state *context = container_of(kref, struct dc_state, refcount); - dc_resource_state_destruct(context); - -#ifdef CONFIG_DRM_AMD_DC_FP - dml2_destroy(context->bw_ctx.dml2); - context->bw_ctx.dml2 = 0; -#endif - - kvfree(context); -} - -void dc_release_state(struct dc_state *context) -{ - kref_put(&context->refcount, dc_state_free); } bool dc_set_generic_gpio_for_stereo(bool enable, @@ -2742,8 +2731,6 @@ enum surface_update_type dc_check_update_surfaces_for_stream( } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { dc->optimized_required = true; } - - dc->optimized_required |= dc->wm_optimized_required; } return type; @@ -2951,9 +2938,6 @@ static void copy_stream_update_to_stream(struct dc *dc, if (update->vrr_active_fixed) stream->vrr_active_fixed = *update->vrr_active_fixed; - if (update->crtc_timing_adjust) - stream->adjust = *update->crtc_timing_adjust; - if (update->dpms_off) stream->dpms_off = *update->dpms_off; @@ -2994,11 +2978,9 @@ static void copy_stream_update_to_stream(struct dc *dc, update->dsc_config->num_slices_v != 0); /* Use temporarry context for validating new DSC config */ - struct dc_state *dsc_validate_context = dc_create_state(dc); + struct dc_state *dsc_validate_context = dc_state_create_copy(dc->current_state); if (dsc_validate_context) { - dc_resource_state_copy_construct(dc->current_state, dsc_validate_context); - stream->timing.dsc_cfg = *update->dsc_config; stream->timing.flags.DSC = enable_dsc; if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) { @@ -3007,7 +2989,7 @@ static void copy_stream_update_to_stream(struct dc *dc, update->dsc_config = NULL; } - dc_release_state(dsc_validate_context); + dc_state_release(dsc_validate_context); } else { DC_ERROR("Failed to allocate new validate context for DSC change\n"); update->dsc_config = NULL; @@ -3106,30 +3088,27 @@ static bool update_planes_and_stream_state(struct dc *dc, new_planes[i] = srf_updates[i].surface; /* initialize scratch memory for building context */ - context = dc_create_state(dc); + context = dc_state_create_copy(dc->current_state); if (context == NULL) { DC_ERROR("Failed to allocate new validate context!\n"); return false; } - dc_resource_state_copy_construct( - dc->current_state, context); - /* For each full update, remove all existing phantom pipes first. * Ensures that we have enough pipes for newly added MPO planes */ - if (dc->res_pool->funcs->remove_phantom_pipes) - dc->res_pool->funcs->remove_phantom_pipes(dc, context, false); + dc_state_remove_phantom_streams_and_planes(dc, context); + dc_state_release_phantom_streams_and_planes(dc, context); /*remove old surfaces from context */ - if (!dc_rem_all_planes_for_stream(dc, stream, context)) { + if (!dc_state_rem_all_planes_for_stream(dc, stream, context)) { BREAK_TO_DEBUGGER(); goto fail; } /* add surface to context */ - if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) { + if (!dc_state_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) { BREAK_TO_DEBUGGER(); goto fail; @@ -3154,19 +3133,6 @@ static bool update_planes_and_stream_state(struct dc *dc, if (update_type == UPDATE_TYPE_FULL) { if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { - /* For phantom pipes we remove and create a new set of phantom pipes - * for each full update (because we don't know if we'll need phantom - * pipes until after the first round of validation). However, if validation - * fails we need to keep the existing phantom pipes (because we don't update - * the dc->current_state). - * - * The phantom stream/plane refcount is decremented for validation because - * we assume it'll be removed (the free comes when the dc_state is freed), - * but if validation fails we have to increment back the refcount so it's - * consistent. - */ - if (dc->res_pool->funcs->retain_phantom_pipes) - dc->res_pool->funcs->retain_phantom_pipes(dc, dc->current_state); BREAK_TO_DEBUGGER(); goto fail; } @@ -3187,7 +3153,7 @@ static bool update_planes_and_stream_state(struct dc *dc, return true; fail: - dc_release_state(context); + dc_state_release(context); return false; @@ -3488,18 +3454,26 @@ static void commit_planes_for_stream_fast(struct dc *dc, { int i, j; struct pipe_ctx *top_pipe_to_program = NULL; + struct dc_stream_status *stream_status = NULL; + dc_exit_ips_for_hw_access(dc); + dc_z10_restore(dc); top_pipe_to_program = resource_get_otg_master_for_stream( &context->res_ctx, stream); - if (dc->debug.visual_confirm) { - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + if (!top_pipe_to_program) + return; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream && pipe->plane_state) { + set_p_state_switch_method(dc, context, pipe); - if (pipe->stream && pipe->plane_state) - dc_update_viusal_confirm_color(dc, context, pipe); + if (dc->debug.visual_confirm) + dc_update_visual_confirm_color(dc, context, pipe); } } @@ -3523,6 +3497,8 @@ static void commit_planes_for_stream_fast(struct dc *dc, } } + stream_status = dc_state_get_stream_status(context, stream); + build_dmub_cmd_list(dc, srf_updates, surface_count, @@ -3535,7 +3511,8 @@ static void commit_planes_for_stream_fast(struct dc *dc, context->dmub_cmd_count, context->block_sequence, &(context->block_sequence_steps), - top_pipe_to_program); + top_pipe_to_program, + stream_status); hwss_execute_sequence(dc, context->block_sequence, context->block_sequence_steps); @@ -3548,7 +3525,7 @@ static void commit_planes_for_stream_fast(struct dc *dc, top_pipe_to_program->stream->update_flags.raw = 0; } -static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context) +static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context) { /* * This function calls HWSS to wait for any potentially double buffered @@ -3586,6 +3563,7 @@ static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state } } } + wait_for_odm_update_pending_complete(dc, dc_context); } static void commit_planes_for_stream(struct dc *dc, @@ -3607,6 +3585,8 @@ static void commit_planes_for_stream(struct dc *dc, // dc->current_state anymore, so we have to cache it before we apply // the new SubVP context subvp_prev_use = false; + dc_exit_ips_for_hw_access(dc); + dc_z10_restore(dc); if (update_type == UPDATE_TYPE_FULL) wait_for_outstanding_hw_updates(dc, context); @@ -3631,7 +3611,7 @@ static void commit_planes_for_stream(struct dc *dc, struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; // Check old context for SubVP - subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM); + subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM); if (subvp_prev_use) break; } @@ -3639,19 +3619,22 @@ static void commit_planes_for_stream(struct dc *dc, for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { subvp_curr_use = true; break; } } - if (dc->debug.visual_confirm) - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream && pipe->plane_state) { + set_p_state_switch_method(dc, context, pipe); - if (pipe->stream && pipe->plane_state) - dc_update_viusal_confirm_color(dc, context, pipe); + if (dc->debug.visual_confirm) + dc_update_visual_confirm_color(dc, context, pipe); } + } if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { struct pipe_ctx *mpcc_pipe; @@ -3918,7 +3901,9 @@ static void commit_planes_for_stream(struct dc *dc, * programming has completed (we turn on phantom OTG in order * to complete the plane disable for phantom pipes). */ - dc->hwss.apply_ctx_to_hw(dc, context); + + if (dc->hwss.disable_phantom_streams) + dc->hwss.disable_phantom_streams(dc, context); } if (update_type != UPDATE_TYPE_FAST) @@ -4024,7 +4009,7 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc, for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) { + if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_NONE) { subvp_active = true; break; } @@ -4061,7 +4046,7 @@ struct pipe_split_policy_backup { static void release_minimal_transition_state(struct dc *dc, struct dc_state *context, struct pipe_split_policy_backup *policy) { - dc_release_state(context); + dc_state_release(context); /* restore previous pipe split and odm policy */ if (!dc->config.is_vmin_only_asic) dc->debug.pipe_split_policy = policy->mpc_policy; @@ -4072,7 +4057,7 @@ static void release_minimal_transition_state(struct dc *dc, static struct dc_state *create_minimal_transition_state(struct dc *dc, struct dc_state *base_context, struct pipe_split_policy_backup *policy) { - struct dc_state *minimal_transition_context = dc_create_state(dc); + struct dc_state *minimal_transition_context = NULL; unsigned int i, j; if (!dc->config.is_vmin_only_asic) { @@ -4084,7 +4069,9 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc, policy->subvp_policy = dc->debug.force_disable_subvp; dc->debug.force_disable_subvp = true; - dc_resource_state_copy_construct(base_context, minimal_transition_context); + minimal_transition_context = dc_state_create_copy(base_context); + if (!minimal_transition_context) + return NULL; /* commit minimal state */ if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) { @@ -4116,7 +4103,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc, bool success = false; struct dc_state *minimal_transition_context; struct pipe_split_policy_backup policy; - struct mall_temp_config mall_temp_config; /* commit based on new context */ /* Since all phantom pipes are removed in full validation, @@ -4125,8 +4111,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc, * pipe as subvp/phantom will be cleared (dc copy constructor * creates a shallow copy). */ - if (dc->res_pool->funcs->save_mall_state) - dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config); minimal_transition_context = create_minimal_transition_state(dc, context, &policy); if (minimal_transition_context) { @@ -4139,16 +4123,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc, success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK; } release_minimal_transition_state(dc, minimal_transition_context, &policy); - if (dc->res_pool->funcs->restore_mall_state) - dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config); - /* If we do a minimal transition with plane removal and the context - * has subvp we also have to retain back the phantom stream / planes - * since the refcount is decremented as part of the min transition - * (we commit a state with no subvp, so the phantom streams / planes - * had to be removed). - */ - if (dc->res_pool->funcs->retain_phantom_pipes) - dc->res_pool->funcs->retain_phantom_pipes(dc, context); } if (!success) { @@ -4216,7 +4190,7 @@ static bool commit_minimal_transition_state(struct dc *dc, for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) { subvp_in_use = true; break; } @@ -4403,8 +4377,7 @@ static bool full_update_required(struct dc *dc, stream_update->mst_bw_update || stream_update->func_shaper || stream_update->lut3d_func || - stream_update->pending_test_pattern || - stream_update->crtc_timing_adjust)) + stream_update->pending_test_pattern)) return true; if (stream) { @@ -4484,7 +4457,6 @@ bool dc_update_planes_and_stream(struct dc *dc, struct dc_state *context; enum surface_update_type update_type; int i; - struct mall_temp_config mall_temp_config; struct dc_fast_update fast_update[MAX_SURFACES] = {0}; /* In cases where MPO and split or ODM are used transitions can @@ -4495,6 +4467,8 @@ bool dc_update_planes_and_stream(struct dc *dc, bool is_plane_addition = 0; bool is_fast_update_only; + dc_exit_ips_for_hw_access(dc); + populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); is_fast_update_only = fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream); @@ -4528,23 +4502,10 @@ bool dc_update_planes_and_stream(struct dc *dc, * pipe as subvp/phantom will be cleared (dc copy constructor * creates a shallow copy). */ - if (dc->res_pool->funcs->save_mall_state) - dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config); if (!commit_minimal_transition_state(dc, context)) { - dc_release_state(context); + dc_state_release(context); return false; } - if (dc->res_pool->funcs->restore_mall_state) - dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config); - - /* If we do a minimal transition with plane removal and the context - * has subvp we also have to retain back the phantom stream / planes - * since the refcount is decremented as part of the min transition - * (we commit a state with no subvp, so the phantom streams / planes - * had to be removed). - */ - if (dc->res_pool->funcs->retain_phantom_pipes) - dc->res_pool->funcs->retain_phantom_pipes(dc, context); update_type = UPDATE_TYPE_FULL; } @@ -4601,7 +4562,7 @@ bool dc_update_planes_and_stream(struct dc *dc, struct dc_state *old = dc->current_state; dc->current_state = context; - dc_release_state(old); + dc_state_release(old); // clear any forced full updates for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -4628,6 +4589,8 @@ void dc_commit_updates_for_stream(struct dc *dc, int i, j; struct dc_fast_update fast_update[MAX_SURFACES] = {0}; + dc_exit_ips_for_hw_access(dc); + populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); stream_status = dc_stream_get_status(stream); context = dc->current_state; @@ -4660,14 +4623,12 @@ void dc_commit_updates_for_stream(struct dc *dc, if (update_type >= UPDATE_TYPE_FULL) { /* initialize scratch memory for building context */ - context = dc_create_state(dc); + context = dc_state_create_copy(state); if (context == NULL) { DC_ERROR("Failed to allocate new validate context!\n"); return; } - dc_resource_state_copy_construct(state, context); - for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -4706,7 +4667,7 @@ void dc_commit_updates_for_stream(struct dc *dc, if (update_type >= UPDATE_TYPE_FULL) { if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { DC_ERROR("Mode validation failed for stream update!\n"); - dc_release_state(context); + dc_state_release(context); return; } } @@ -4739,7 +4700,7 @@ void dc_commit_updates_for_stream(struct dc *dc, struct dc_state *old = dc->current_state; dc->current_state = context; - dc_release_state(old); + dc_state_release(old); for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; @@ -4812,7 +4773,9 @@ void dc_set_power_state( switch (power_state) { case DC_ACPI_CM_POWER_STATE_D0: - dc_resource_state_construct(dc, dc->current_state); + dc_state_construct(dc, dc->current_state); + + dc_exit_ips_for_hw_access(dc); dc_z10_restore(dc); @@ -4827,7 +4790,7 @@ void dc_set_power_state( default: ASSERT(dc->current_state->stream_count == 0); - dc_resource_state_destruct(dc->current_state); + dc_state_destruct(dc->current_state); break; } @@ -4904,6 +4867,38 @@ bool dc_set_psr_allow_active(struct dc *dc, bool enable) return true; } +/* enable/disable eDP Replay without specify stream for eDP */ +bool dc_set_replay_allow_active(struct dc *dc, bool active) +{ + int i; + bool allow_active; + + for (i = 0; i < dc->current_state->stream_count; i++) { + struct dc_link *link; + struct dc_stream_state *stream = dc->current_state->streams[i]; + + link = stream->link; + if (!link) + continue; + + if (link->replay_settings.replay_feature_enabled) { + if (active && !link->replay_settings.replay_allow_active) { + allow_active = true; + if (!dc_link_set_replay_allow_active(link, &allow_active, + false, false, NULL)) + return false; + } else if (!active && link->replay_settings.replay_allow_active) { + allow_active = false; + if (!dc_link_set_replay_allow_active(link, &allow_active, + true, false, NULL)) + return false; + } + } + } + + return true; +} + void dc_allow_idle_optimizations(struct dc *dc, bool allow) { if (dc->debug.disable_idle_power_optimizations) @@ -4923,6 +4918,12 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow) dc->idle_optimizations_allowed = allow; } +void dc_exit_ips_for_hw_access(struct dc *dc) +{ + if (dc->caps.ips_support) + dc_allow_idle_optimizations(dc, false); +} + bool dc_dmub_is_ips_idle_state(struct dc *dc) { if (dc->debug.disable_idle_power_optimizations) @@ -5443,6 +5444,8 @@ bool dc_abm_save_restore( struct dc_link *link = stream->sink->link; struct dc_link *edp_links[MAX_NUM_EDP]; + if (link->replay_settings.replay_feature_enabled) + return false; /*find primary pipe associated with stream*/ for (i = 0; i < MAX_PIPES; i++) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index fc18b9dc94..9c05b1a071 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -31,6 +31,7 @@ #include "basics/dc_common.h" #include "resource.h" #include "dc_dmub_srv.h" +#include "dc_state_priv.h" #define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) @@ -425,45 +426,130 @@ void get_hdr_visual_confirm_color( } void get_subvp_visual_confirm_color( - struct dc *dc, - struct dc_state *context, struct pipe_ctx *pipe_ctx, struct tg_color *color) { uint32_t color_value = MAX_TG_COLOR_VALUE; - bool enable_subvp = false; - int i; - - if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !context) - return; + if (pipe_ctx) { + switch (pipe_ctx->p_state_type) { + case P_STATE_SUB_VP: + color->color_r_cr = color_value; + color->color_g_y = 0; + color->color_b_cb = 0; + break; + case P_STATE_DRR_SUB_VP: + color->color_r_cr = 0; + color->color_g_y = color_value; + color->color_b_cb = 0; + break; + case P_STATE_V_BLANK_SUB_VP: + color->color_r_cr = 0; + color->color_g_y = 0; + color->color_b_cb = color_value; + break; + default: + break; + } + } +} - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; +void get_mclk_switch_visual_confirm_color( + struct pipe_ctx *pipe_ctx, + struct tg_color *color) +{ + uint32_t color_value = MAX_TG_COLOR_VALUE; - if (pipe->stream && pipe->stream->mall_stream_config.paired_stream && - pipe->stream->mall_stream_config.type == SUBVP_MAIN) { - /* SubVP enable - red */ - color->color_g_y = 0; + if (pipe_ctx) { + switch (pipe_ctx->p_state_type) { + case P_STATE_V_BLANK: + color->color_r_cr = color_value; + color->color_g_y = color_value; color->color_b_cb = 0; + break; + case P_STATE_FPO: + color->color_r_cr = 0; + color->color_g_y = color_value; + color->color_b_cb = color_value; + break; + case P_STATE_V_ACTIVE: color->color_r_cr = color_value; - enable_subvp = true; - - if (pipe_ctx->stream == pipe->stream) - return; + color->color_g_y = 0; + color->color_b_cb = color_value; + break; + case P_STATE_SUB_VP: + color->color_r_cr = color_value; + color->color_g_y = 0; + color->color_b_cb = 0; + break; + case P_STATE_DRR_SUB_VP: + color->color_r_cr = 0; + color->color_g_y = color_value; + color->color_b_cb = 0; + break; + case P_STATE_V_BLANK_SUB_VP: + color->color_r_cr = 0; + color->color_g_y = 0; + color->color_b_cb = color_value; + break; + default: break; } } +} - if (enable_subvp && pipe_ctx->stream->mall_stream_config.type == SUBVP_NONE) { - color->color_r_cr = 0; - if (pipe_ctx->stream->allow_freesync == 1) { - /* SubVP enable and DRR on - green */ - color->color_b_cb = 0; - color->color_g_y = color_value; +void set_p_state_switch_method( + struct dc *dc, + struct dc_state *context, + struct pipe_ctx *pipe_ctx) +{ + struct vba_vars_st *vba = &context->bw_ctx.dml.vba; + bool enable_subvp; + + if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba || !context) + return; + + if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] != + dm_dram_clock_change_unsupported) { + /* MCLK switching is supported */ + if (!pipe_ctx->has_vactive_margin) { + /* In Vblank - yellow */ + pipe_ctx->p_state_type = P_STATE_V_BLANK; + + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { + /* FPO + Vblank - cyan */ + pipe_ctx->p_state_type = P_STATE_FPO; + } } else { - /* SubVP enable and No DRR - blue */ - color->color_g_y = 0; - color->color_b_cb = color_value; + /* In Vactive - pink */ + pipe_ctx->p_state_type = P_STATE_V_ACTIVE; + } + + /* SubVP */ + enable_subvp = false; + + for (int i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream && dc_state_get_paired_subvp_stream(context, pipe->stream) && + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) { + /* SubVP enable - red */ + pipe_ctx->p_state_type = P_STATE_SUB_VP; + enable_subvp = true; + + if (pipe_ctx->stream == pipe->stream) + return; + break; + } + } + + if (enable_subvp && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_NONE) { + if (pipe_ctx->stream->allow_freesync == 1) { + /* SubVP enable and DRR on - green */ + pipe_ctx->p_state_type = P_STATE_DRR_SUB_VP; + } else { + /* SubVP enable and No DRR - blue */ + pipe_ctx->p_state_type = P_STATE_V_BLANK_SUB_VP; + } } } } @@ -473,7 +559,8 @@ void hwss_build_fast_sequence(struct dc *dc, unsigned int dmub_cmd_count, struct block_sequence block_sequence[], int *num_steps, - struct pipe_ctx *pipe_ctx) + struct pipe_ctx *pipe_ctx, + struct dc_stream_status *stream_status) { struct dc_plane_state *plane = pipe_ctx->plane_state; struct dc_stream_state *stream = pipe_ctx->stream; @@ -490,7 +577,8 @@ void hwss_build_fast_sequence(struct dc *dc, if (dc->hwss.subvp_pipe_control_lock_fast) { block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc; block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = true; - block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx; + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.subvp_immediate_flip = + plane->flip_immediate && stream_status->mall_stream_config.type == SUBVP_MAIN; block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST; (*num_steps)++; } @@ -529,7 +617,7 @@ void hwss_build_fast_sequence(struct dc *dc, } if (dc->hwss.update_plane_addr && current_mpc_pipe->plane_state->update_flags.bits.addr_update) { if (resource_is_pipe_type(current_mpc_pipe, OTG_MASTER) && - current_mpc_pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + stream_status->mall_stream_config.type == SUBVP_MAIN) { block_sequence[*num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv; block_sequence[*num_steps].params.subvp_save_surf_addr.addr = ¤t_mpc_pipe->plane_state->address; block_sequence[*num_steps].params.subvp_save_surf_addr.subvp_index = current_mpc_pipe->subvp_index; @@ -612,7 +700,8 @@ void hwss_build_fast_sequence(struct dc *dc, if (dc->hwss.subvp_pipe_control_lock_fast) { block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc; block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = false; - block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx; + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.subvp_immediate_flip = + plane->flip_immediate && stream_status->mall_stream_config.type == SUBVP_MAIN; block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST; (*num_steps)++; } @@ -812,42 +901,6 @@ void hwss_subvp_save_surf_addr(union block_sequence_params *params) dc_dmub_srv_subvp_save_surf_addr(dc_dmub_srv, addr, subvp_index); } -void get_mclk_switch_visual_confirm_color( - struct dc *dc, - struct dc_state *context, - struct pipe_ctx *pipe_ctx, - struct tg_color *color) -{ - uint32_t color_value = MAX_TG_COLOR_VALUE; - struct vba_vars_st *vba = &context->bw_ctx.dml.vba; - - if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba || !context) - return; - - if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] != - dm_dram_clock_change_unsupported) { - /* MCLK switching is supported */ - if (!pipe_ctx->has_vactive_margin) { - /* In Vblank - yellow */ - color->color_r_cr = color_value; - color->color_g_y = color_value; - - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { - /* FPO + Vblank - cyan */ - color->color_r_cr = 0; - color->color_g_y = color_value; - color->color_b_cb = color_value; - } - } else { - /* In Vactive - pink */ - color->color_r_cr = color_value; - color->color_b_cb = color_value; - } - /* SubVP */ - get_subvp_visual_confirm_color(dc, context, pipe_ctx, color); - } -} - void get_surface_tile_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c index f365773d57..c6c35037bd 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c @@ -467,6 +467,13 @@ bool dc_link_setup_psr(struct dc_link *link, return link->dc->link_srv->edp_setup_psr(link, stream, psr_config, psr_context); } +bool dc_link_set_replay_allow_active(struct dc_link *link, const bool *allow_active, + bool wait, bool force_static, const unsigned int *power_opts) +{ + return link->dc->link_srv->edp_set_replay_allow_active(link, allow_active, wait, + force_static, power_opts); +} + bool dc_link_get_replay_state(const struct dc_link *link, uint64_t *state) { return link->dc->link_srv->edp_get_replay_state(link, state); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 990d775e4c..9fbdb09697 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -42,6 +42,7 @@ #include "link_enc_cfg.h" #include "link.h" #include "clk_mgr.h" +#include "dc_state_priv.h" #include "virtual/virtual_link_hwss.h" #include "link/hwss/link_hwss_dio.h" #include "link/hwss/link_hwss_dpia.h" @@ -69,8 +70,8 @@ #include "dcn314/dcn314_resource.h" #include "dcn315/dcn315_resource.h" #include "dcn316/dcn316_resource.h" -#include "../dcn32/dcn32_resource.h" -#include "../dcn321/dcn321_resource.h" +#include "dcn32/dcn32_resource.h" +#include "dcn321/dcn321_resource.h" #include "dcn35/dcn35_resource.h" #define VISUAL_CONFIRM_BASE_DEFAULT 3 @@ -1764,6 +1765,29 @@ int recource_find_free_pipe_not_used_in_cur_res_ctx( return free_pipe_idx; } +int recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool) +{ + int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND; + const struct pipe_ctx *new_pipe, *cur_pipe; + int i; + + for (i = 0; i < pool->pipe_count; i++) { + cur_pipe = &cur_res_ctx->pipe_ctx[i]; + new_pipe = &new_res_ctx->pipe_ctx[i]; + + if (resource_is_pipe_type(cur_pipe, OTG_MASTER) && + resource_is_pipe_type(new_pipe, FREE_PIPE)) { + free_pipe_idx = i; + break; + } + } + + return free_pipe_idx; +} + int resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine( const struct resource_context *cur_res_ctx, struct resource_context *new_res_ctx, @@ -2440,6 +2464,9 @@ void resource_remove_otg_master_for_stream_output(struct dc_state *context, struct pipe_ctx *otg_master = resource_get_otg_master_for_stream( &context->res_ctx, stream); + if (!otg_master) + return; + ASSERT(resource_get_odm_slice_count(otg_master) == 1); ASSERT(otg_master->plane_state == NULL); ASSERT(otg_master->stream_res.stream_enc); @@ -2974,190 +3001,6 @@ bool resource_update_pipes_for_plane_with_slice_count( return result; } -bool dc_add_plane_to_context( - const struct dc *dc, - struct dc_stream_state *stream, - struct dc_plane_state *plane_state, - struct dc_state *context) -{ - struct resource_pool *pool = dc->res_pool; - struct pipe_ctx *otg_master_pipe; - struct dc_stream_status *stream_status = NULL; - bool added = false; - - stream_status = dc_stream_get_status_from_state(context, stream); - if (stream_status == NULL) { - dm_error("Existing stream not found; failed to attach surface!\n"); - goto out; - } else if (stream_status->plane_count == MAX_SURFACE_NUM) { - dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n", - plane_state, MAX_SURFACE_NUM); - goto out; - } - - otg_master_pipe = resource_get_otg_master_for_stream( - &context->res_ctx, stream); - if (otg_master_pipe) - added = resource_append_dpp_pipes_for_plane_composition(context, - dc->current_state, pool, otg_master_pipe, plane_state); - - if (added) { - stream_status->plane_states[stream_status->plane_count] = - plane_state; - stream_status->plane_count++; - dc_plane_state_retain(plane_state); - } - -out: - return added; -} - -bool dc_remove_plane_from_context( - const struct dc *dc, - struct dc_stream_state *stream, - struct dc_plane_state *plane_state, - struct dc_state *context) -{ - int i; - struct dc_stream_status *stream_status = NULL; - struct resource_pool *pool = dc->res_pool; - - if (!plane_state) - return true; - - for (i = 0; i < context->stream_count; i++) - if (context->streams[i] == stream) { - stream_status = &context->stream_status[i]; - break; - } - - if (stream_status == NULL) { - dm_error("Existing stream not found; failed to remove plane.\n"); - return false; - } - - resource_remove_dpp_pipes_for_plane_composition( - context, pool, plane_state); - - for (i = 0; i < stream_status->plane_count; i++) { - if (stream_status->plane_states[i] == plane_state) { - dc_plane_state_release(stream_status->plane_states[i]); - break; - } - } - - if (i == stream_status->plane_count) { - dm_error("Existing plane_state not found; failed to detach it!\n"); - return false; - } - - stream_status->plane_count--; - - /* Start at the plane we've just released, and move all the planes one index forward to "trim" the array */ - for (; i < stream_status->plane_count; i++) - stream_status->plane_states[i] = stream_status->plane_states[i + 1]; - - stream_status->plane_states[stream_status->plane_count] = NULL; - - if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm) - /* ODM combine could prevent us from supporting more planes - * we will reset ODM slice count back to 1 when all planes have - * been removed to maximize the amount of planes supported when - * new planes are added. - */ - resource_update_pipes_for_stream_with_slice_count( - context, dc->current_state, dc->res_pool, stream, 1); - - return true; -} - -/** - * dc_rem_all_planes_for_stream - Remove planes attached to the target stream. - * - * @dc: Current dc state. - * @stream: Target stream, which we want to remove the attached plans. - * @context: New context. - * - * Return: - * Return true if DC was able to remove all planes from the target - * stream, otherwise, return false. - */ -bool dc_rem_all_planes_for_stream( - const struct dc *dc, - struct dc_stream_state *stream, - struct dc_state *context) -{ - int i, old_plane_count; - struct dc_stream_status *stream_status = NULL; - struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 }; - - for (i = 0; i < context->stream_count; i++) - if (context->streams[i] == stream) { - stream_status = &context->stream_status[i]; - break; - } - - if (stream_status == NULL) { - dm_error("Existing stream %p not found!\n", stream); - return false; - } - - old_plane_count = stream_status->plane_count; - - for (i = 0; i < old_plane_count; i++) - del_planes[i] = stream_status->plane_states[i]; - - for (i = 0; i < old_plane_count; i++) - if (!dc_remove_plane_from_context(dc, stream, del_planes[i], context)) - return false; - - return true; -} - -static bool add_all_planes_for_stream( - const struct dc *dc, - struct dc_stream_state *stream, - const struct dc_validation_set set[], - int set_count, - struct dc_state *context) -{ - int i, j; - - for (i = 0; i < set_count; i++) - if (set[i].stream == stream) - break; - - if (i == set_count) { - dm_error("Stream %p not found in set!\n", stream); - return false; - } - - for (j = 0; j < set[i].plane_count; j++) - if (!dc_add_plane_to_context(dc, stream, set[i].plane_states[j], context)) - return false; - - return true; -} - -bool dc_add_all_planes_for_stream( - const struct dc *dc, - struct dc_stream_state *stream, - struct dc_plane_state * const *plane_states, - int plane_count, - struct dc_state *context) -{ - struct dc_validation_set set; - int i; - - set.stream = stream; - set.plane_count = plane_count; - - for (i = 0; i < plane_count; i++) - set.plane_states[i] = plane_states[i]; - - return add_all_planes_for_stream(dc, stream, &set, 1, context); -} - bool dc_is_timing_changed(struct dc_stream_state *cur_stream, struct dc_stream_state *new_stream) { @@ -3309,84 +3152,6 @@ static struct audio *find_first_free_audio( return NULL; } -/* - * dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state. - */ -enum dc_status dc_add_stream_to_ctx( - struct dc *dc, - struct dc_state *new_ctx, - struct dc_stream_state *stream) -{ - enum dc_status res; - DC_LOGGER_INIT(dc->ctx->logger); - - if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) { - DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream); - return DC_ERROR_UNEXPECTED; - } - - new_ctx->streams[new_ctx->stream_count] = stream; - dc_stream_retain(stream); - new_ctx->stream_count++; - - res = resource_add_otg_master_for_stream_output( - new_ctx, dc->res_pool, stream); - if (res != DC_OK) - DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res); - - return res; -} - -/* - * dc_remove_stream_from_ctx() - Remove a stream from a dc_state. - */ -enum dc_status dc_remove_stream_from_ctx( - struct dc *dc, - struct dc_state *new_ctx, - struct dc_stream_state *stream) -{ - int i; - struct dc_context *dc_ctx = dc->ctx; - struct pipe_ctx *del_pipe = resource_get_otg_master_for_stream( - &new_ctx->res_ctx, stream); - - if (!del_pipe) { - DC_ERROR("Pipe not found for stream %p !\n", stream); - return DC_ERROR_UNEXPECTED; - } - - resource_update_pipes_for_stream_with_slice_count(new_ctx, - dc->current_state, dc->res_pool, stream, 1); - resource_remove_otg_master_for_stream_output( - new_ctx, dc->res_pool, stream); - - for (i = 0; i < new_ctx->stream_count; i++) - if (new_ctx->streams[i] == stream) - break; - - if (new_ctx->streams[i] != stream) { - DC_ERROR("Context doesn't have stream %p !\n", stream); - return DC_ERROR_UNEXPECTED; - } - - dc_stream_release(new_ctx->streams[i]); - new_ctx->stream_count--; - - /* Trim back arrays */ - for (; i < new_ctx->stream_count; i++) { - new_ctx->streams[i] = new_ctx->streams[i + 1]; - new_ctx->stream_status[i] = new_ctx->stream_status[i + 1]; - } - - new_ctx->streams[new_ctx->stream_count] = NULL; - memset( - &new_ctx->stream_status[new_ctx->stream_count], - 0, - sizeof(new_ctx->stream_status[0])); - - return DC_OK; -} - static struct dc_stream_state *find_pll_sharable_stream( struct dc_stream_state *stream_needs_pll, struct dc_state *context) @@ -3594,6 +3359,7 @@ static void mark_seamless_boot_stream( * |________|_______________|___________|_____________| */ static bool acquire_otg_master_pipe_for_stream( + const struct dc_state *cur_ctx, struct dc_state *new_ctx, const struct resource_pool *pool, struct dc_stream_state *stream) @@ -3607,7 +3373,22 @@ static bool acquire_otg_master_pipe_for_stream( int pipe_idx; struct pipe_ctx *pipe_ctx = NULL; - pipe_idx = resource_find_any_free_pipe(&new_ctx->res_ctx, pool); + /* + * Upper level code is responsible to optimize unnecessary addition and + * removal for unchanged streams. So unchanged stream will keep the same + * OTG master instance allocated. When current stream is removed and a + * new stream is added, we want to reuse the OTG instance made available + * by the removed stream first. If not found, we try to avoid of using + * any free pipes already used in current context as this could tear + * down exiting ODM/MPC/MPO configuration unnecessarily. + */ + pipe_idx = recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx( + &cur_ctx->res_ctx, &new_ctx->res_ctx, pool); + if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) + pipe_idx = recource_find_free_pipe_not_used_in_cur_res_ctx( + &cur_ctx->res_ctx, &new_ctx->res_ctx, pool); + if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) + pipe_idx = resource_find_any_free_pipe(&new_ctx->res_ctx, pool); if (pipe_idx != FREE_PIPE_INDEX_NOT_FOUND) { pipe_ctx = &new_ctx->res_ctx.pipe_ctx[pipe_idx]; memset(pipe_ctx, 0, sizeof(*pipe_ctx)); @@ -3667,7 +3448,7 @@ enum dc_status resource_map_pool_resources( if (!acquired) /* acquire new resources */ - acquired = acquire_otg_master_pipe_for_stream( + acquired = acquire_otg_master_pipe_for_stream(dc->current_state, context, pool, stream); pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); @@ -3750,35 +3531,6 @@ enum dc_status resource_map_pool_resources( return DC_ERROR_UNEXPECTED; } -/** - * dc_resource_state_copy_construct_current() - Creates a new dc_state from existing state - * - * @dc: copy out of dc->current_state - * @dst_ctx: copy into this - * - * This function makes a shallow copy of the current DC state and increments - * refcounts on existing streams and planes. - */ -void dc_resource_state_copy_construct_current( - const struct dc *dc, - struct dc_state *dst_ctx) -{ - dc_resource_state_copy_construct(dc->current_state, dst_ctx); -} - - -void dc_resource_state_construct( - const struct dc *dc, - struct dc_state *dst_ctx) -{ - dst_ctx->clk_mgr = dc->clk_mgr; - - /* Initialise DIG link encoder resource tracking variables. */ - if (dc->res_pool) - link_enc_cfg_init(dc, dst_ctx); -} - - bool dc_resource_is_dsc_encoding_supported(const struct dc *dc) { if (dc->res_pool == NULL) @@ -3822,6 +3574,31 @@ static bool planes_changed_for_existing_stream(struct dc_state *context, return false; } +static bool add_all_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *stream, + const struct dc_validation_set set[], + int set_count, + struct dc_state *state) +{ + int i, j; + + for (i = 0; i < set_count; i++) + if (set[i].stream == stream) + break; + + if (i == set_count) { + dm_error("Stream %p not found in set!\n", stream); + return false; + } + + for (j = 0; j < set[i].plane_count; j++) + if (!dc_state_add_plane(dc, stream, set[i].plane_states[j], state)) + return false; + + return true; +} + /** * dc_validate_with_context - Validate and update the potential new stream in the context object * @@ -3927,7 +3704,8 @@ enum dc_status dc_validate_with_context(struct dc *dc, unchanged_streams[i], set, set_count)) { - if (!dc_rem_all_planes_for_stream(dc, + + if (!dc_state_rem_all_planes_for_stream(dc, unchanged_streams[i], context)) { res = DC_FAIL_DETACH_SURFACES; @@ -3949,12 +3727,24 @@ enum dc_status dc_validate_with_context(struct dc *dc, } } - if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) { - res = DC_FAIL_DETACH_SURFACES; - goto fail; + if (dc_state_get_stream_subvp_type(context, del_streams[i]) == SUBVP_PHANTOM) { + /* remove phantoms specifically */ + if (!dc_state_rem_all_phantom_planes_for_stream(dc, del_streams[i], context, true)) { + res = DC_FAIL_DETACH_SURFACES; + goto fail; + } + + res = dc_state_remove_phantom_stream(dc, context, del_streams[i]); + dc_state_release_phantom_stream(dc, context, del_streams[i]); + } else { + if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) { + res = DC_FAIL_DETACH_SURFACES; + goto fail; + } + + res = dc_state_remove_stream(dc, context, del_streams[i]); } - res = dc_remove_stream_from_ctx(dc, context, del_streams[i]); if (res != DC_OK) goto fail; } @@ -3977,7 +3767,7 @@ enum dc_status dc_validate_with_context(struct dc *dc, /* Add new streams and then add all planes for the new stream */ for (i = 0; i < add_streams_count; i++) { calculate_phy_pix_clks(add_streams[i]); - res = dc_add_stream_to_ctx(dc, context, add_streams[i]); + res = dc_state_add_stream(dc, context, add_streams[i]); if (res != DC_OK) goto fail; @@ -4483,84 +4273,6 @@ static void set_vtem_info_packet( *info_packet = stream->vtem_infopacket; } -void dc_resource_state_destruct(struct dc_state *context) -{ - int i, j; - - for (i = 0; i < context->stream_count; i++) { - for (j = 0; j < context->stream_status[i].plane_count; j++) - dc_plane_state_release( - context->stream_status[i].plane_states[j]); - - context->stream_status[i].plane_count = 0; - dc_stream_release(context->streams[i]); - context->streams[i] = NULL; - } - context->stream_count = 0; - context->stream_mask = 0; - memset(&context->res_ctx, 0, sizeof(context->res_ctx)); - memset(&context->pp_display_cfg, 0, sizeof(context->pp_display_cfg)); - memset(&context->dcn_bw_vars, 0, sizeof(context->dcn_bw_vars)); - context->clk_mgr = NULL; - memset(&context->bw_ctx.bw, 0, sizeof(context->bw_ctx.bw)); - memset(context->block_sequence, 0, sizeof(context->block_sequence)); - context->block_sequence_steps = 0; - memset(context->dc_dmub_cmd, 0, sizeof(context->dc_dmub_cmd)); - context->dmub_cmd_count = 0; - memset(&context->perf_params, 0, sizeof(context->perf_params)); - memset(&context->scratch, 0, sizeof(context->scratch)); -} - -void dc_resource_state_copy_construct( - const struct dc_state *src_ctx, - struct dc_state *dst_ctx) -{ - int i, j; - struct kref refcount = dst_ctx->refcount; -#ifdef CONFIG_DRM_AMD_DC_FP - struct dml2_context *dml2 = NULL; - - // Need to preserve allocated dml2 context - if (src_ctx->clk_mgr && src_ctx->clk_mgr->ctx->dc->debug.using_dml2) - dml2 = dst_ctx->bw_ctx.dml2; -#endif - - *dst_ctx = *src_ctx; - -#ifdef CONFIG_DRM_AMD_DC_FP - // Preserve allocated dml2 context - if (src_ctx->clk_mgr && src_ctx->clk_mgr->ctx->dc->debug.using_dml2) - dst_ctx->bw_ctx.dml2 = dml2; -#endif - - for (i = 0; i < MAX_PIPES; i++) { - struct pipe_ctx *cur_pipe = &dst_ctx->res_ctx.pipe_ctx[i]; - - if (cur_pipe->top_pipe) - cur_pipe->top_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx]; - - if (cur_pipe->bottom_pipe) - cur_pipe->bottom_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx]; - - if (cur_pipe->next_odm_pipe) - cur_pipe->next_odm_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx]; - - if (cur_pipe->prev_odm_pipe) - cur_pipe->prev_odm_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx]; - } - - for (i = 0; i < dst_ctx->stream_count; i++) { - dc_stream_retain(dst_ctx->streams[i]); - for (j = 0; j < dst_ctx->stream_status[i].plane_count; j++) - dc_plane_state_retain( - dst_ctx->stream_status[i].plane_states[j]); - } - - /* context refcount should not be overridden */ - dst_ctx->refcount = refcount; - -} - struct clock_source *dc_resource_find_first_free_pll( struct resource_context *res_ctx, const struct resource_pool *pool) @@ -4740,7 +4452,7 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream, option = DITHER_OPTION_SPATIAL8; break; case COLOR_DEPTH_101010: - option = DITHER_OPTION_SPATIAL10; + option = DITHER_OPTION_TRUN10; break; default: option = DITHER_OPTION_DISABLE; @@ -4766,6 +4478,8 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream, option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) { fmt_bit_depth->flags.TRUNCATE_ENABLED = 1; fmt_bit_depth->flags.TRUNCATE_DEPTH = 2; + if (option == DITHER_OPTION_TRUN10) + fmt_bit_depth->flags.TRUNCATE_MODE = 1; } /* special case - Formatter can only reduce by 4 bits at most. @@ -5283,7 +4997,7 @@ bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_st if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 && ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120) return true; - else if (dc->current_state->stream_count > 1 && stream->timing.v_addressable >= 2160 && + else if (dc->current_state->stream_count > 1 && stream->timing.v_addressable >= 1080 && ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120) return true; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c new file mode 100644 index 0000000000..61986e5cb4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -0,0 +1,880 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "core_types.h" +#include "core_status.h" +#include "dc_state.h" +#include "dc_state_priv.h" +#include "dc_stream_priv.h" +#include "dc_plane_priv.h" + +#include "dm_services.h" +#include "resource.h" +#include "link_enc_cfg.h" + +#include "dml2/dml2_wrapper.h" +#include "dml2/dml2_internal_types.h" + +#define DC_LOGGER \ + dc->ctx->logger +#define DC_LOGGER_INIT(logger) + +/* Private dc_state helper functions */ +static bool dc_state_track_phantom_stream(struct dc_state *state, + struct dc_stream_state *phantom_stream) +{ + if (state->phantom_stream_count >= MAX_PHANTOM_PIPES) + return false; + + state->phantom_streams[state->phantom_stream_count++] = phantom_stream; + + return true; +} + +static bool dc_state_untrack_phantom_stream(struct dc_state *state, struct dc_stream_state *phantom_stream) +{ + bool res = false; + int i; + + /* first find phantom stream in the dc_state */ + for (i = 0; i < state->phantom_stream_count; i++) { + if (state->phantom_streams[i] == phantom_stream) { + state->phantom_streams[i] = NULL; + res = true; + break; + } + } + + /* failed to find stream in state */ + if (!res) + return res; + + /* trim back phantom streams */ + state->phantom_stream_count--; + for (; i < state->phantom_stream_count; i++) + state->phantom_streams[i] = state->phantom_streams[i + 1]; + + return res; +} + +static bool dc_state_is_phantom_stream_tracked(struct dc_state *state, struct dc_stream_state *phantom_stream) +{ + int i; + + for (i = 0; i < state->phantom_stream_count; i++) { + if (state->phantom_streams[i] == phantom_stream) + return true; + } + + return false; +} + +static bool dc_state_track_phantom_plane(struct dc_state *state, + struct dc_plane_state *phantom_plane) +{ + if (state->phantom_plane_count >= MAX_PHANTOM_PIPES) + return false; + + state->phantom_planes[state->phantom_plane_count++] = phantom_plane; + + return true; +} + +static bool dc_state_untrack_phantom_plane(struct dc_state *state, struct dc_plane_state *phantom_plane) +{ + bool res = false; + int i; + + /* first find phantom plane in the dc_state */ + for (i = 0; i < state->phantom_plane_count; i++) { + if (state->phantom_planes[i] == phantom_plane) { + state->phantom_planes[i] = NULL; + res = true; + break; + } + } + + /* failed to find plane in state */ + if (!res) + return res; + + /* trim back phantom planes */ + state->phantom_plane_count--; + for (; i < state->phantom_plane_count; i++) + state->phantom_planes[i] = state->phantom_planes[i + 1]; + + return res; +} + +static bool dc_state_is_phantom_plane_tracked(struct dc_state *state, struct dc_plane_state *phantom_plane) +{ + int i; + + for (i = 0; i < state->phantom_plane_count; i++) { + if (state->phantom_planes[i] == phantom_plane) + return true; + } + + return false; +} + +static void dc_state_copy_internal(struct dc_state *dst_state, struct dc_state *src_state) +{ + int i, j; + + memcpy(dst_state, src_state, sizeof(struct dc_state)); + + for (i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx *cur_pipe = &dst_state->res_ctx.pipe_ctx[i]; + + if (cur_pipe->top_pipe) + cur_pipe->top_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx]; + + if (cur_pipe->bottom_pipe) + cur_pipe->bottom_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx]; + + if (cur_pipe->prev_odm_pipe) + cur_pipe->prev_odm_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx]; + + if (cur_pipe->next_odm_pipe) + cur_pipe->next_odm_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx]; + } + + /* retain phantoms */ + for (i = 0; i < dst_state->phantom_stream_count; i++) + dc_stream_retain(dst_state->phantom_streams[i]); + + for (i = 0; i < dst_state->phantom_plane_count; i++) + dc_plane_state_retain(dst_state->phantom_planes[i]); + + /* retain streams and planes */ + for (i = 0; i < dst_state->stream_count; i++) { + dc_stream_retain(dst_state->streams[i]); + for (j = 0; j < dst_state->stream_status[i].plane_count; j++) + dc_plane_state_retain( + dst_state->stream_status[i].plane_states[j]); + } + +} + +static void init_state(struct dc *dc, struct dc_state *state) +{ + /* Each context must have their own instance of VBA and in order to + * initialize and obtain IP and SOC the base DML instance from DC is + * initially copied into every context + */ + memcpy(&state->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); +} + +/* Public dc_state functions */ +struct dc_state *dc_state_create(struct dc *dc) +{ + struct dc_state *state = kvzalloc(sizeof(struct dc_state), + GFP_KERNEL); + + if (!state) + return NULL; + + init_state(dc, state); + dc_state_construct(dc, state); + +#ifdef CONFIG_DRM_AMD_DC_FP + if (dc->debug.using_dml2) + dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2); +#endif + + kref_init(&state->refcount); + + return state; +} + +void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state) +{ + struct kref refcount = dst_state->refcount; +#ifdef CONFIG_DRM_AMD_DC_FP + struct dml2_context *dst_dml2 = dst_state->bw_ctx.dml2; +#endif + + dc_state_copy_internal(dst_state, src_state); + +#ifdef CONFIG_DRM_AMD_DC_FP + dst_state->bw_ctx.dml2 = dst_dml2; + if (src_state->bw_ctx.dml2) + dml2_copy(dst_state->bw_ctx.dml2, src_state->bw_ctx.dml2); +#endif + + /* context refcount should not be overridden */ + dst_state->refcount = refcount; +} + +struct dc_state *dc_state_create_copy(struct dc_state *src_state) +{ + struct dc_state *new_state; + + new_state = kvmalloc(sizeof(struct dc_state), + GFP_KERNEL); + if (!new_state) + return NULL; + + dc_state_copy_internal(new_state, src_state); + +#ifdef CONFIG_DRM_AMD_DC_FP + if (src_state->bw_ctx.dml2 && + !dml2_create_copy(&new_state->bw_ctx.dml2, src_state->bw_ctx.dml2)) { + dc_state_release(new_state); + return NULL; + } +#endif + + kref_init(&new_state->refcount); + + return new_state; +} + +void dc_state_copy_current(struct dc *dc, struct dc_state *dst_state) +{ + dc_state_copy(dst_state, dc->current_state); +} + +struct dc_state *dc_state_create_current_copy(struct dc *dc) +{ + return dc_state_create_copy(dc->current_state); +} + +void dc_state_construct(struct dc *dc, struct dc_state *state) +{ + state->clk_mgr = dc->clk_mgr; + + /* Initialise DIG link encoder resource tracking variables. */ + if (dc->res_pool) + link_enc_cfg_init(dc, state); +} + +void dc_state_destruct(struct dc_state *state) +{ + int i, j; + + for (i = 0; i < state->stream_count; i++) { + for (j = 0; j < state->stream_status[i].plane_count; j++) + dc_plane_state_release( + state->stream_status[i].plane_states[j]); + + state->stream_status[i].plane_count = 0; + dc_stream_release(state->streams[i]); + state->streams[i] = NULL; + } + state->stream_count = 0; + + /* release tracked phantoms */ + for (i = 0; i < state->phantom_stream_count; i++) { + dc_stream_release(state->phantom_streams[i]); + state->phantom_streams[i] = NULL; + } + state->phantom_stream_count = 0; + + for (i = 0; i < state->phantom_plane_count; i++) { + dc_plane_state_release(state->phantom_planes[i]); + state->phantom_planes[i] = NULL; + } + state->phantom_plane_count = 0; + + state->stream_mask = 0; + memset(&state->res_ctx, 0, sizeof(state->res_ctx)); + memset(&state->pp_display_cfg, 0, sizeof(state->pp_display_cfg)); + memset(&state->dcn_bw_vars, 0, sizeof(state->dcn_bw_vars)); + state->clk_mgr = NULL; + memset(&state->bw_ctx.bw, 0, sizeof(state->bw_ctx.bw)); + memset(state->block_sequence, 0, sizeof(state->block_sequence)); + state->block_sequence_steps = 0; + memset(state->dc_dmub_cmd, 0, sizeof(state->dc_dmub_cmd)); + state->dmub_cmd_count = 0; + memset(&state->perf_params, 0, sizeof(state->perf_params)); + memset(&state->scratch, 0, sizeof(state->scratch)); +} + +void dc_state_retain(struct dc_state *state) +{ + kref_get(&state->refcount); +} + +static void dc_state_free(struct kref *kref) +{ + struct dc_state *state = container_of(kref, struct dc_state, refcount); + + dc_state_destruct(state); + +#ifdef CONFIG_DRM_AMD_DC_FP + dml2_destroy(state->bw_ctx.dml2); + state->bw_ctx.dml2 = 0; +#endif + + kvfree(state); +} + +void dc_state_release(struct dc_state *state) +{ + if (state != NULL) + kref_put(&state->refcount, dc_state_free); +} +/* + * dc_state_add_stream() - Add a new dc_stream_state to a dc_state. + */ +enum dc_status dc_state_add_stream( + struct dc *dc, + struct dc_state *state, + struct dc_stream_state *stream) +{ + enum dc_status res; + + DC_LOGGER_INIT(dc->ctx->logger); + + if (state->stream_count >= dc->res_pool->timing_generator_count) { + DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream); + return DC_ERROR_UNEXPECTED; + } + + state->streams[state->stream_count] = stream; + dc_stream_retain(stream); + state->stream_count++; + + res = resource_add_otg_master_for_stream_output( + state, dc->res_pool, stream); + if (res != DC_OK) + DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res); + + return res; +} + +/* + * dc_state_remove_stream() - Remove a stream from a dc_state. + */ +enum dc_status dc_state_remove_stream( + struct dc *dc, + struct dc_state *state, + struct dc_stream_state *stream) +{ + int i; + struct pipe_ctx *del_pipe = resource_get_otg_master_for_stream( + &state->res_ctx, stream); + + if (!del_pipe) { + dm_error("Pipe not found for stream %p !\n", stream); + return DC_ERROR_UNEXPECTED; + } + + resource_update_pipes_for_stream_with_slice_count(state, + dc->current_state, dc->res_pool, stream, 1); + resource_remove_otg_master_for_stream_output( + state, dc->res_pool, stream); + + for (i = 0; i < state->stream_count; i++) + if (state->streams[i] == stream) + break; + + if (state->streams[i] != stream) { + dm_error("Context doesn't have stream %p !\n", stream); + return DC_ERROR_UNEXPECTED; + } + + dc_stream_release(state->streams[i]); + state->stream_count--; + + /* Trim back arrays */ + for (; i < state->stream_count; i++) { + state->streams[i] = state->streams[i + 1]; + state->stream_status[i] = state->stream_status[i + 1]; + } + + state->streams[state->stream_count] = NULL; + memset( + &state->stream_status[state->stream_count], + 0, + sizeof(state->stream_status[0])); + + return DC_OK; +} + +bool dc_state_add_plane( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_plane_state *plane_state, + struct dc_state *state) +{ + struct resource_pool *pool = dc->res_pool; + struct pipe_ctx *otg_master_pipe; + struct dc_stream_status *stream_status = NULL; + bool added = false; + + stream_status = dc_state_get_stream_status(state, stream); + if (stream_status == NULL) { + dm_error("Existing stream not found; failed to attach surface!\n"); + goto out; + } else if (stream_status->plane_count == MAX_SURFACE_NUM) { + dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n", + plane_state, MAX_SURFACE_NUM); + goto out; + } + + if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm) + /* ODM combine could prevent us from supporting more planes + * we will reset ODM slice count back to 1 when all planes have + * been removed to maximize the amount of planes supported when + * new planes are added. + */ + resource_update_pipes_for_stream_with_slice_count( + state, dc->current_state, dc->res_pool, stream, 1); + + otg_master_pipe = resource_get_otg_master_for_stream( + &state->res_ctx, stream); + if (otg_master_pipe) + added = resource_append_dpp_pipes_for_plane_composition(state, + dc->current_state, pool, otg_master_pipe, plane_state); + + if (added) { + stream_status->plane_states[stream_status->plane_count] = + plane_state; + stream_status->plane_count++; + dc_plane_state_retain(plane_state); + } + +out: + return added; +} + +bool dc_state_remove_plane( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_plane_state *plane_state, + struct dc_state *state) +{ + int i; + struct dc_stream_status *stream_status = NULL; + struct resource_pool *pool = dc->res_pool; + + if (!plane_state) + return true; + + for (i = 0; i < state->stream_count; i++) + if (state->streams[i] == stream) { + stream_status = &state->stream_status[i]; + break; + } + + if (stream_status == NULL) { + dm_error("Existing stream not found; failed to remove plane.\n"); + return false; + } + + resource_remove_dpp_pipes_for_plane_composition( + state, pool, plane_state); + + for (i = 0; i < stream_status->plane_count; i++) { + if (stream_status->plane_states[i] == plane_state) { + dc_plane_state_release(stream_status->plane_states[i]); + break; + } + } + + if (i == stream_status->plane_count) { + dm_error("Existing plane_state not found; failed to detach it!\n"); + return false; + } + + stream_status->plane_count--; + + /* Start at the plane we've just released, and move all the planes one index forward to "trim" the array */ + for (; i < stream_status->plane_count; i++) + stream_status->plane_states[i] = stream_status->plane_states[i + 1]; + + stream_status->plane_states[stream_status->plane_count] = NULL; + + if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm) + /* ODM combine could prevent us from supporting more planes + * we will reset ODM slice count back to 1 when all planes have + * been removed to maximize the amount of planes supported when + * new planes are added. + */ + resource_update_pipes_for_stream_with_slice_count( + state, dc->current_state, dc->res_pool, stream, 1); + + return true; +} + +/** + * dc_state_rem_all_planes_for_stream - Remove planes attached to the target stream. + * + * @dc: Current dc state. + * @stream: Target stream, which we want to remove the attached plans. + * @state: context from which the planes are to be removed. + * + * Return: + * Return true if DC was able to remove all planes from the target + * stream, otherwise, return false. + */ +bool dc_state_rem_all_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_state *state) +{ + int i, old_plane_count; + struct dc_stream_status *stream_status = NULL; + struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 }; + + for (i = 0; i < state->stream_count; i++) + if (state->streams[i] == stream) { + stream_status = &state->stream_status[i]; + break; + } + + if (stream_status == NULL) { + dm_error("Existing stream %p not found!\n", stream); + return false; + } + + old_plane_count = stream_status->plane_count; + + for (i = 0; i < old_plane_count; i++) + del_planes[i] = stream_status->plane_states[i]; + + for (i = 0; i < old_plane_count; i++) + if (!dc_state_remove_plane(dc, stream, del_planes[i], state)) + return false; + + return true; +} + +bool dc_state_add_all_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_plane_state * const *plane_states, + int plane_count, + struct dc_state *state) +{ + int i; + bool result = true; + + for (i = 0; i < plane_count; i++) + if (!dc_state_add_plane(dc, stream, plane_states[i], state)) { + result = false; + break; + } + + return result; +} + +/* Private dc_state functions */ + +/** + * dc_state_get_stream_status - Get stream status from given dc state + * @state: DC state to find the stream status in + * @stream: The stream to get the stream status for + * + * The given stream is expected to exist in the given dc state. Otherwise, NULL + * will be returned. + */ +struct dc_stream_status *dc_state_get_stream_status( + struct dc_state *state, + struct dc_stream_state *stream) +{ + uint8_t i; + + if (state == NULL) + return NULL; + + for (i = 0; i < state->stream_count; i++) { + if (stream == state->streams[i]) + return &state->stream_status[i]; + } + + return NULL; +} + +enum mall_stream_type dc_state_get_pipe_subvp_type(const struct dc_state *state, + const struct pipe_ctx *pipe_ctx) +{ + return dc_state_get_stream_subvp_type(state, pipe_ctx->stream); +} + +enum mall_stream_type dc_state_get_stream_subvp_type(const struct dc_state *state, + const struct dc_stream_state *stream) +{ + int i; + + enum mall_stream_type type = SUBVP_NONE; + + for (i = 0; i < state->stream_count; i++) { + if (state->streams[i] == stream) { + type = state->stream_status[i].mall_stream_config.type; + break; + } + } + + return type; +} + +struct dc_stream_state *dc_state_get_paired_subvp_stream(const struct dc_state *state, + const struct dc_stream_state *stream) +{ + int i; + + struct dc_stream_state *paired_stream = NULL; + + for (i = 0; i < state->stream_count; i++) { + if (state->streams[i] == stream) { + paired_stream = state->stream_status[i].mall_stream_config.paired_stream; + break; + } + } + + return paired_stream; +} + +struct dc_stream_state *dc_state_create_phantom_stream(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *main_stream) +{ + struct dc_stream_state *phantom_stream; + + DC_LOGGER_INIT(dc->ctx->logger); + + phantom_stream = dc_create_stream_for_sink(main_stream->sink); + + if (!phantom_stream) { + DC_LOG_ERROR("Failed to allocate phantom stream.\n"); + return NULL; + } + + /* track phantom stream in dc_state */ + dc_state_track_phantom_stream(state, phantom_stream); + + phantom_stream->is_phantom = true; + phantom_stream->signal = SIGNAL_TYPE_VIRTUAL; + phantom_stream->dpms_off = true; + + return phantom_stream; +} + +void dc_state_release_phantom_stream(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *phantom_stream) +{ + DC_LOGGER_INIT(dc->ctx->logger); + + if (!dc_state_untrack_phantom_stream(state, phantom_stream)) { + DC_LOG_ERROR("Failed to free phantom stream %p in dc state %p.\n", phantom_stream, state); + return; + } + + dc_stream_release(phantom_stream); +} + +struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc, + struct dc_state *state, + struct dc_plane_state *main_plane) +{ + struct dc_plane_state *phantom_plane = dc_create_plane_state(dc); + + DC_LOGGER_INIT(dc->ctx->logger); + + if (!phantom_plane) { + DC_LOG_ERROR("Failed to allocate phantom plane.\n"); + return NULL; + } + + /* track phantom inside dc_state */ + dc_state_track_phantom_plane(state, phantom_plane); + + phantom_plane->is_phantom = true; + + return phantom_plane; +} + +void dc_state_release_phantom_plane(const struct dc *dc, + struct dc_state *state, + struct dc_plane_state *phantom_plane) +{ + DC_LOGGER_INIT(dc->ctx->logger); + + if (!dc_state_untrack_phantom_plane(state, phantom_plane)) { + DC_LOG_ERROR("Failed to free phantom plane %p in dc state %p.\n", phantom_plane, state); + return; + } + + dc_plane_state_release(phantom_plane); +} + +/* add phantom streams to context and generate correct meta inside dc_state */ +enum dc_status dc_state_add_phantom_stream(struct dc *dc, + struct dc_state *state, + struct dc_stream_state *phantom_stream, + struct dc_stream_state *main_stream) +{ + struct dc_stream_status *main_stream_status; + struct dc_stream_status *phantom_stream_status; + enum dc_status res = dc_state_add_stream(dc, state, phantom_stream); + + /* check if stream is tracked */ + if (res == DC_OK && !dc_state_is_phantom_stream_tracked(state, phantom_stream)) { + /* stream must be tracked if added to state */ + dc_state_track_phantom_stream(state, phantom_stream); + } + + /* setup subvp meta */ + main_stream_status = dc_state_get_stream_status(state, main_stream); + phantom_stream_status = dc_state_get_stream_status(state, phantom_stream); + phantom_stream_status->mall_stream_config.type = SUBVP_PHANTOM; + phantom_stream_status->mall_stream_config.paired_stream = main_stream; + main_stream_status->mall_stream_config.type = SUBVP_MAIN; + main_stream_status->mall_stream_config.paired_stream = phantom_stream; + + return res; +} + +enum dc_status dc_state_remove_phantom_stream(struct dc *dc, + struct dc_state *state, + struct dc_stream_state *phantom_stream) +{ + struct dc_stream_status *main_stream_status; + struct dc_stream_status *phantom_stream_status; + + /* reset subvp meta */ + phantom_stream_status = dc_state_get_stream_status(state, phantom_stream); + main_stream_status = dc_state_get_stream_status(state, phantom_stream_status->mall_stream_config.paired_stream); + phantom_stream_status->mall_stream_config.type = SUBVP_NONE; + phantom_stream_status->mall_stream_config.paired_stream = NULL; + if (main_stream_status) { + main_stream_status->mall_stream_config.type = SUBVP_NONE; + main_stream_status->mall_stream_config.paired_stream = NULL; + } + + /* remove stream from state */ + return dc_state_remove_stream(dc, state, phantom_stream); +} + +bool dc_state_add_phantom_plane( + const struct dc *dc, + struct dc_stream_state *phantom_stream, + struct dc_plane_state *phantom_plane, + struct dc_state *state) +{ + bool res = dc_state_add_plane(dc, phantom_stream, phantom_plane, state); + + /* check if stream is tracked */ + if (res && !dc_state_is_phantom_plane_tracked(state, phantom_plane)) { + /* stream must be tracked if added to state */ + dc_state_track_phantom_plane(state, phantom_plane); + } + + return res; +} + +bool dc_state_remove_phantom_plane( + const struct dc *dc, + struct dc_stream_state *phantom_stream, + struct dc_plane_state *phantom_plane, + struct dc_state *state) +{ + return dc_state_remove_plane(dc, phantom_stream, phantom_plane, state); +} + +bool dc_state_rem_all_phantom_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *phantom_stream, + struct dc_state *state, + bool should_release_planes) +{ + int i, old_plane_count; + struct dc_stream_status *stream_status = NULL; + struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 }; + + for (i = 0; i < state->stream_count; i++) + if (state->streams[i] == phantom_stream) { + stream_status = &state->stream_status[i]; + break; + } + + if (stream_status == NULL) { + dm_error("Existing stream %p not found!\n", phantom_stream); + return false; + } + + old_plane_count = stream_status->plane_count; + + for (i = 0; i < old_plane_count; i++) + del_planes[i] = stream_status->plane_states[i]; + + for (i = 0; i < old_plane_count; i++) { + if (!dc_state_remove_plane(dc, phantom_stream, del_planes[i], state)) + return false; + if (should_release_planes) + dc_state_release_phantom_plane(dc, state, del_planes[i]); + } + + return true; +} + +bool dc_state_add_all_phantom_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *phantom_stream, + struct dc_plane_state * const *phantom_planes, + int plane_count, + struct dc_state *state) +{ + return dc_state_add_all_planes_for_stream(dc, phantom_stream, phantom_planes, plane_count, state); +} + +bool dc_state_remove_phantom_streams_and_planes( + struct dc *dc, + struct dc_state *state) +{ + int i; + bool removed_phantom = false; + struct dc_stream_state *phantom_stream = NULL; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i]; + + if (pipe->plane_state && pipe->stream && dc_state_get_pipe_subvp_type(state, pipe) == SUBVP_PHANTOM) { + phantom_stream = pipe->stream; + + dc_state_rem_all_phantom_planes_for_stream(dc, phantom_stream, state, false); + dc_state_remove_phantom_stream(dc, state, phantom_stream); + removed_phantom = true; + } + } + return removed_phantom; +} + +void dc_state_release_phantom_streams_and_planes( + struct dc *dc, + struct dc_state *state) +{ + int i; + + for (i = 0; i < state->phantom_stream_count; i++) + dc_state_release_phantom_stream(dc, state, state->phantom_streams[i]); + + for (i = 0; i < state->phantom_plane_count; i++) + dc_state_release_phantom_plane(dc, state, state->phantom_planes[i]); +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 4bdf105d1d..51a970fcb5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -31,6 +31,8 @@ #include "ipp.h" #include "timing_generator.h" #include "dc_dmub_srv.h" +#include "dc_state_priv.h" +#include "dc_stream_priv.h" #define DC_LOGGER dc->ctx->logger @@ -54,7 +56,7 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink) } } -static bool dc_stream_construct(struct dc_stream_state *stream, +bool dc_stream_construct(struct dc_stream_state *stream, struct dc_sink *dc_sink_data) { uint32_t i = 0; @@ -121,13 +123,12 @@ static bool dc_stream_construct(struct dc_stream_state *stream, } stream->out_transfer_func->type = TF_TYPE_BYPASS; - stream->stream_id = stream->ctx->dc_stream_id_count; - stream->ctx->dc_stream_id_count++; + dc_stream_assign_stream_id(stream); return true; } -static void dc_stream_destruct(struct dc_stream_state *stream) +void dc_stream_destruct(struct dc_stream_state *stream) { dc_sink_release(stream->sink); if (stream->out_transfer_func != NULL) { @@ -136,6 +137,13 @@ static void dc_stream_destruct(struct dc_stream_state *stream) } } +void dc_stream_assign_stream_id(struct dc_stream_state *stream) +{ + /* MSB is reserved to indicate phantoms */ + stream->stream_id = stream->ctx->dc_stream_id_count; + stream->ctx->dc_stream_id_count++; +} + void dc_stream_retain(struct dc_stream_state *stream) { kref_get(&stream->refcount); @@ -196,8 +204,7 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream) if (new_stream->out_transfer_func) dc_transfer_func_retain(new_stream->out_transfer_func); - new_stream->stream_id = new_stream->ctx->dc_stream_id_count; - new_stream->ctx->dc_stream_id_count++; + dc_stream_assign_stream_id(new_stream); /* If using dynamic encoder assignment, wait till stream committed to assign encoder. */ if (new_stream->ctx->dc->res_pool->funcs->link_encs_assign) @@ -209,31 +216,6 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream) } /** - * dc_stream_get_status_from_state - Get stream status from given dc state - * @state: DC state to find the stream status in - * @stream: The stream to get the stream status for - * - * The given stream is expected to exist in the given dc state. Otherwise, NULL - * will be returned. - */ -struct dc_stream_status *dc_stream_get_status_from_state( - struct dc_state *state, - struct dc_stream_state *stream) -{ - uint8_t i; - - if (state == NULL) - return NULL; - - for (i = 0; i < state->stream_count; i++) { - if (stream == state->streams[i]) - return &state->stream_status[i]; - } - - return NULL; -} - -/** * dc_stream_get_status() - Get current stream status of the given stream state * @stream: The stream to get the stream status for. * @@ -244,7 +226,7 @@ struct dc_stream_status *dc_stream_get_status( struct dc_stream_state *stream) { struct dc *dc = stream->ctx->dc; - return dc_stream_get_status_from_state(dc->current_state, stream); + return dc_state_get_stream_status(dc->current_state, stream); } static void program_cursor_attributes( @@ -441,6 +423,8 @@ bool dc_stream_add_writeback(struct dc *dc, return false; } + dc_exit_ips_for_hw_access(dc); + wb_info->dwb_params.out_transfer_func = stream->out_transfer_func; dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; @@ -465,16 +449,37 @@ bool dc_stream_add_writeback(struct dc *dc, if (dc->hwss.enable_writeback) { struct dc_stream_status *stream_status = dc_stream_get_status(stream); struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; - dwb->otg_inst = stream_status->primary_otg_inst; + if (stream_status) + dwb->otg_inst = stream_status->primary_otg_inst; + } + + if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { + dm_error("DC: update_bandwidth failed!\n"); + return false; + } + + /* enable writeback */ + if (dc->hwss.enable_writeback) { + struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; + + if (dwb->funcs->is_enabled(dwb)) { + /* writeback pipe already enabled, only need to update */ + dc->hwss.update_writeback(dc, wb_info, dc->current_state); + } else { + /* Enable writeback pipe from scratch*/ + dc->hwss.enable_writeback(dc, wb_info, dc->current_state); + } } + return true; } -bool dc_stream_remove_writeback(struct dc *dc, +bool dc_stream_fc_disable_writeback(struct dc *dc, struct dc_stream_state *stream, uint32_t dwb_pipe_inst) { - int i = 0, j = 0; + struct dwbc *dwb = dc->res_pool->dwbc[dwb_pipe_inst]; + if (stream == NULL) { dm_error("DC: dc_stream is NULL!\n"); return false; @@ -490,27 +495,67 @@ bool dc_stream_remove_writeback(struct dc *dc, return false; } -// stream->writeback_info[dwb_pipe_inst].wb_enabled = false; - for (i = 0; i < stream->num_wb_info; i++) { - /*dynamic update*/ - if (stream->writeback_info[i].wb_enabled && - stream->writeback_info[i].dwb_pipe_inst == dwb_pipe_inst) { - stream->writeback_info[i].wb_enabled = false; - } + dc_exit_ips_for_hw_access(dc); + + if (dwb->funcs->set_fc_enable) + dwb->funcs->set_fc_enable(dwb, DWB_FRAME_CAPTURE_DISABLE); + + return true; +} + +bool dc_stream_remove_writeback(struct dc *dc, + struct dc_stream_state *stream, + uint32_t dwb_pipe_inst) +{ + int i = 0, j = 0; + if (stream == NULL) { + dm_error("DC: dc_stream is NULL!\n"); + return false; + } + + if (dwb_pipe_inst >= MAX_DWB_PIPES) { + dm_error("DC: writeback pipe is invalid!\n"); + return false; + } + + if (stream->num_wb_info > MAX_DWB_PIPES) { + dm_error("DC: num_wb_info is invalid!\n"); + return false; } /* remove writeback info for disabled writeback pipes from stream */ for (i = 0, j = 0; i < stream->num_wb_info; i++) { if (stream->writeback_info[i].wb_enabled) { - if (j < i) - /* trim the array */ + + if (stream->writeback_info[i].dwb_pipe_inst == dwb_pipe_inst) + stream->writeback_info[i].wb_enabled = false; + + /* trim the array */ + if (j < i) { memcpy(&stream->writeback_info[j], &stream->writeback_info[i], sizeof(struct dc_writeback_info)); - j++; + j++; + } } } stream->num_wb_info = j; + /* recalculate and apply DML parameters */ + if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { + dm_error("DC: update_bandwidth failed!\n"); + return false; + } + + dc_exit_ips_for_hw_access(dc); + + /* disable writeback */ + if (dc->hwss.disable_writeback) { + struct dwbc *dwb = dc->res_pool->dwbc[dwb_pipe_inst]; + + if (dwb->funcs->is_enabled(dwb)) + dc->hwss.disable_writeback(dc, dwb_pipe_inst); + } + return true; } @@ -518,6 +563,8 @@ bool dc_stream_warmup_writeback(struct dc *dc, int num_dwb, struct dc_writeback_info *wb_info) { + dc_exit_ips_for_hw_access(dc); + if (dc->hwss.mmhubbub_warmup) return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info); else @@ -530,6 +577,8 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream) struct resource_context *res_ctx = &dc->current_state->res_ctx; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; @@ -558,6 +607,8 @@ bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream, dc = stream->ctx->dc; res_ctx = &dc->current_state->res_ctx; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; @@ -589,6 +640,8 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream, struct resource_context *res_ctx = &dc->current_state->res_ctx; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; @@ -625,6 +678,8 @@ bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream) if (i == MAX_PIPES) return true; + dc_exit_ips_for_hw_access(dc); + return dc->hwss.dmdata_status_done(pipe); } @@ -659,6 +714,8 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc, pipe_ctx->stream->dmdata_address = attr->address; + dc_exit_ips_for_hw_access(dc); + dc->hwss.program_dmdata_engine(pipe_ctx); if (hubp->funcs->dmdata_set_attributes != NULL && diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index a80e453007..19140fb657 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -32,10 +32,12 @@ #include "transform.h" #include "dpp.h" +#include "dc_plane_priv.h" + /******************************************************************************* * Private functions ******************************************************************************/ -static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state) +void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state) { plane_state->ctx = ctx; @@ -63,7 +65,7 @@ static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *pl } -static void dc_plane_destruct(struct dc_plane_state *plane_state) +void dc_plane_destruct(struct dc_plane_state *plane_state) { if (plane_state->gamma_correction != NULL) { dc_gamma_release(&plane_state->gamma_correction); @@ -159,6 +161,8 @@ const struct dc_plane_status *dc_plane_get_status( break; } + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 8164a53404..fc60fa5814 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -27,6 +27,8 @@ #define DC_INTERFACE_H_ #include "dc_types.h" +#include "dc_state.h" +#include "dc_plane.h" #include "grph_object_defs.h" #include "logger_types.h" #include "hdcp_msg_types.h" @@ -49,7 +51,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.259" +#define DC_VER "3.2.266" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -432,6 +434,7 @@ struct dc_config { bool EnableMinDispClkODM; bool enable_auto_dpm_test_logs; unsigned int disable_ips; + unsigned int disable_ips_in_vpb; }; enum visual_confirm { @@ -461,6 +464,12 @@ enum dml_hostvm_override_opts { DML_HOSTVM_OVERRIDE_TRUE = 0x2, }; +enum dc_replay_power_opts { + replay_power_opt_invalid = 0x0, + replay_power_opt_smu_opt_static_screen = 0x1, + replay_power_opt_z10_static_screen = 0x10, +}; + enum dcc_option { DCC_ENABLE = 0, DCC_DISABLE = 1, @@ -956,7 +965,6 @@ struct dc_debug_options { unsigned int min_prefetch_in_strobe_ns; bool disable_unbounded_requesting; bool dig_fifo_off_in_blank; - bool temp_mst_deallocation_sequence; bool override_dispclk_programming; bool otg_crc_db; bool disallow_dispclk_dppclk_ds; @@ -979,6 +987,10 @@ struct dc_debug_options { bool psp_disabled_wa; unsigned int ips2_eval_delay_us; unsigned int ips2_entry_delay_us; + bool disable_dmub_reallow_idle; + bool disable_timeout; + bool disable_extblankadj; + unsigned int static_screen_wait_frames; }; struct gpu_info_soc_bounding_box_v1_0; @@ -1026,7 +1038,6 @@ struct dc { /* Require to optimize clocks and bandwidth for added/removed planes */ bool optimized_required; - bool wm_optimized_required; bool idle_optimizations_allowed; bool enable_c20_dtm_b0; @@ -1389,13 +1400,6 @@ struct dc_surface_update { /* * Create a new surface with default parameters; */ -struct dc_plane_state *dc_create_plane_state(struct dc *dc); -const struct dc_plane_status *dc_plane_get_status( - const struct dc_plane_state *plane_state); - -void dc_plane_state_retain(struct dc_plane_state *plane_state); -void dc_plane_state_release(struct dc_plane_state *plane_state); - void dc_gamma_retain(struct dc_gamma *dc_gamma); void dc_gamma_release(struct dc_gamma **dc_gamma); struct dc_gamma *dc_create_gamma(void); @@ -1459,37 +1463,20 @@ enum dc_status dc_validate_global_state( struct dc_state *new_ctx, bool fast_validate); - -void dc_resource_state_construct( - const struct dc *dc, - struct dc_state *dst_ctx); - bool dc_acquire_release_mpc_3dlut( struct dc *dc, bool acquire, struct dc_stream_state *stream, struct dc_3dlut **lut, struct dc_transfer_func **shaper); -void dc_resource_state_copy_construct( - const struct dc_state *src_ctx, - struct dc_state *dst_ctx); - -void dc_resource_state_copy_construct_current( - const struct dc *dc, - struct dc_state *dst_ctx); - -void dc_resource_state_destruct(struct dc_state *context); - bool dc_resource_is_dsc_encoding_supported(const struct dc *dc); +void get_audio_check(struct audio_info *aud_modes, + struct audio_check *aud_chk); enum dc_status dc_commit_streams(struct dc *dc, struct dc_stream_state *streams[], uint8_t stream_count); -struct dc_state *dc_create_state(struct dc *dc); -struct dc_state *dc_copy_state(struct dc_state *src_ctx); -void dc_retain_state(struct dc_state *context); -void dc_release_state(struct dc_state *context); struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc, struct dc_stream_state *stream, @@ -1541,7 +1528,13 @@ struct dc_link { bool is_dig_mapping_flexible; bool hpd_status; /* HPD status of link without physical HPD pin. */ bool is_hpd_pending; /* Indicates a new received hpd */ - bool is_automated; /* Indicates automated testing */ + + /* USB4 DPIA links skip verifying link cap, instead performing the fallback method + * for every link training. This is incompatible with DP LL compliance automation, + * which expects the same link settings to be used every retry on a link loss. + * This flag is used to skip the fallback when link loss occurs during automation. + */ + bool skip_fallback_on_link_loss; bool edp_sink_present; @@ -2092,6 +2085,20 @@ bool dc_link_setup_psr(struct dc_link *dc_link, const struct dc_stream_state *stream, struct psr_config *psr_config, struct psr_context *psr_context); +/* + * Communicate with DMUB to allow or disallow Panel Replay on the specified link: + * + * @link: pointer to the dc_link struct instance + * @enable: enable(active) or disable(inactive) replay + * @wait: state transition need to wait the active set completed. + * @force_static: force disable(inactive) the replay + * @power_opts: set power optimazation parameters to DMUB. + * + * return: allow Replay active will return true, else will return false. + */ +bool dc_link_set_replay_allow_active(struct dc_link *dc_link, const bool *enable, + bool wait, bool force_static, const unsigned int *power_opts); + bool dc_link_get_replay_state(const struct dc_link *dc_link, uint64_t *state); /* On eDP links this function call will stall until T12 has elapsed. @@ -2318,6 +2325,7 @@ bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_ struct dc_cursor_attributes *cursor_attr); void dc_allow_idle_optimizations(struct dc *dc, bool allow); +void dc_exit_ips_for_hw_access(struct dc *dc); bool dc_dmub_is_ips_idle_state(struct dc *dc); /* set min and max memory clock to lowest and highest DPM level, respectively */ @@ -2336,6 +2344,9 @@ void dc_hardware_release(struct dc *dc); void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc); bool dc_set_psr_allow_active(struct dc *dc, bool enable); + +bool dc_set_replay_allow_active(struct dc *dc, bool active); + void dc_z10_restore(const struct dc *dc); void dc_z10_save_init(struct dc *dc); diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 05b3433cbb..9084b32084 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -33,6 +33,7 @@ #include "cursor_reg_cache.h" #include "resource.h" #include "clk_mgr.h" +#include "dc_state_priv.h" #define CTX dc_dmub_srv->ctx #define DC_LOGGER CTX->logger @@ -141,7 +142,10 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, if (status == DMUB_STATUS_QUEUE_FULL) { /* Execute and wait for queue to become empty again. */ - dmub_srv_cmd_execute(dmub); + status = dmub_srv_cmd_execute(dmub); + if (status == DMUB_STATUS_POWER_STATE_D3) + return false; + dmub_srv_wait_for_idle(dmub, 100000); /* Requeue the command. */ @@ -149,16 +153,20 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, } if (status != DMUB_STATUS_OK) { - DC_ERROR("Error queueing DMUB command: status=%d\n", status); - dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + if (status != DMUB_STATUS_POWER_STATE_D3) { + DC_ERROR("Error queueing DMUB command: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + } return false; } } status = dmub_srv_cmd_execute(dmub); if (status != DMUB_STATUS_OK) { - DC_ERROR("Error starting DMUB execution: status=%d\n", status); - dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + if (status != DMUB_STATUS_POWER_STATE_D3) { + DC_ERROR("Error starting DMUB execution: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + } return false; } @@ -219,7 +227,10 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun if (status == DMUB_STATUS_QUEUE_FULL) { /* Execute and wait for queue to become empty again. */ - dmub_srv_cmd_execute(dmub); + status = dmub_srv_cmd_execute(dmub); + if (status == DMUB_STATUS_POWER_STATE_D3) + return false; + dmub_srv_wait_for_idle(dmub, 100000); /* Requeue the command. */ @@ -227,22 +238,31 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun } if (status != DMUB_STATUS_OK) { - DC_ERROR("Error queueing DMUB command: status=%d\n", status); - dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + if (status != DMUB_STATUS_POWER_STATE_D3) { + DC_ERROR("Error queueing DMUB command: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + } return false; } } status = dmub_srv_cmd_execute(dmub); if (status != DMUB_STATUS_OK) { - DC_ERROR("Error starting DMUB execution: status=%d\n", status); - dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + if (status != DMUB_STATUS_POWER_STATE_D3) { + DC_ERROR("Error starting DMUB execution: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + } return false; } // Wait for DMUB to process command if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) { - status = dmub_srv_wait_for_idle(dmub, 100000); + if (dc_dmub_srv->ctx->dc->debug.disable_timeout) { + do { + status = dmub_srv_wait_for_idle(dmub, 100000); + } while (status != DMUB_STATUS_OK); + } else + status = dmub_srv_wait_for_idle(dmub, 100000); if (status != DMUB_STATUS_OK) { DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); @@ -500,10 +520,11 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi /** * populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command * - * @dc: [in] current dc state + * @dc: [in] pointer to dc object * @subvp_pipe: [in] pipe_ctx for the SubVP pipe * @vblank_pipe: [in] pipe_ctx for the DRR pipe * @pipe_data: [in] Pipe data which stores the VBLANK/DRR info + * @context: [in] DC state for access to phantom stream * * Populate the DMCUB SubVP command with DRR pipe info. All the information * required for calculating the SubVP + DRR microschedule is populated here. @@ -514,12 +535,14 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi * 3. Populate the drr_info with the min and max supported vtotal values */ static void populate_subvp_cmd_drr_info(struct dc *dc, + struct dc_state *context, struct pipe_ctx *subvp_pipe, struct pipe_ctx *vblank_pipe, struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data) { + struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream); struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing; - struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; + struct dc_crtc_timing *phantom_timing = &phantom_stream->timing; struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing; uint16_t drr_frame_us = 0; uint16_t min_drr_supported_us = 0; @@ -607,7 +630,7 @@ static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc, continue; // Find the SubVP pipe - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) break; } @@ -624,7 +647,7 @@ static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc, if (vblank_pipe->stream->ignore_msa_timing_param && (vblank_pipe->stream->allow_freesync || vblank_pipe->stream->vrr_active_variable || vblank_pipe->stream->vrr_active_fixed)) - populate_subvp_cmd_drr_info(dc, pipe, vblank_pipe, pipe_data); + populate_subvp_cmd_drr_info(dc, context, pipe, vblank_pipe, pipe_data); } /** @@ -649,10 +672,17 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc, uint32_t subvp0_prefetch_us = 0; uint32_t subvp1_prefetch_us = 0; uint32_t prefetch_delta_us = 0; - struct dc_crtc_timing *phantom_timing0 = &subvp_pipes[0]->stream->mall_stream_config.paired_stream->timing; - struct dc_crtc_timing *phantom_timing1 = &subvp_pipes[1]->stream->mall_stream_config.paired_stream->timing; + struct dc_stream_state *phantom_stream0 = NULL; + struct dc_stream_state *phantom_stream1 = NULL; + struct dc_crtc_timing *phantom_timing0 = NULL; + struct dc_crtc_timing *phantom_timing1 = NULL; struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL; + phantom_stream0 = dc_state_get_paired_subvp_stream(context, subvp_pipes[0]->stream); + phantom_stream1 = dc_state_get_paired_subvp_stream(context, subvp_pipes[1]->stream); + phantom_timing0 = &phantom_stream0->timing; + phantom_timing1 = &phantom_stream1->timing; + subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) * (uint64_t)phantom_timing0->h_total * 1000000), (((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us)); @@ -702,8 +732,9 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc, uint32_t j; struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index]; + struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream); struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing; - struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; + struct dc_crtc_timing *phantom_timing = &phantom_stream->timing; uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den; pipe_data->mode = SUBVP; @@ -757,7 +788,7 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc, for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j]; - if (phantom_pipe->stream == subvp_pipe->stream->mall_stream_config.paired_stream) { + if (phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) { pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst; if (phantom_pipe->bottom_pipe) { pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst; @@ -791,6 +822,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, union dmub_rb_cmd cmd; struct pipe_ctx *subvp_pipes[2]; uint32_t wm_val_refclk = 0; + enum mall_stream_type pipe_mall_type; memset(&cmd, 0, sizeof(cmd)); // FW command for SUBVP @@ -806,7 +838,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, */ if (resource_is_pipe_type(pipe, OTG_MASTER) && resource_is_pipe_type(pipe, DPP_PIPE) && - pipe->stream->mall_stream_config.type == SUBVP_MAIN) + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) subvp_pipes[subvp_count++] = pipe; } @@ -814,6 +846,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, // For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); if (!pipe->stream) continue; @@ -824,12 +857,11 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, */ if (resource_is_pipe_type(pipe, OTG_MASTER) && resource_is_pipe_type(pipe, DPP_PIPE) && - pipe->stream->mall_stream_config.paired_stream && - pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + pipe_mall_type == SUBVP_MAIN) { populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++); } else if (resource_is_pipe_type(pipe, OTG_MASTER) && resource_is_pipe_type(pipe, DPP_PIPE) && - pipe->stream->mall_stream_config.type == SUBVP_NONE) { + pipe_mall_type == SUBVP_NONE) { // Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where // we run through DML without calculating "natural" P-state support populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++); @@ -1142,10 +1174,16 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait) dc_ctx = dc_dmub_srv->ctx; if (wait) { - status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000); - if (status != DMUB_STATUS_OK) { - DC_ERROR("Error querying DMUB hw power up status: error=%d\n", status); - return false; + if (dc_dmub_srv->ctx->dc->debug.disable_timeout) { + do { + status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000); + } while (status != DMUB_STATUS_OK); + } else { + status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000); + if (status != DMUB_STATUS_OK) { + DC_ERROR("Error querying DMUB hw power up status: error=%d\n", status); + return false; + } } } else return dmub_srv_is_hw_pwr_up(dc_dmub_srv->dmub); @@ -1175,22 +1213,18 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) } /* NOTE: This does not use the "wake" interface since this is part of the wake path. */ - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + /* We also do not perform a wait since DMCUB could enter idle after the notification. */ + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); } static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) { - const uint32_t max_num_polls = 10000; uint32_t allow_state = 0; uint32_t commit_state = 0; - uint32_t i; if (dc->debug.dmcub_emulation) return; - if (!dc->idle_optimizations_allowed) - return; - if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub) return; @@ -1203,8 +1237,16 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) if (!(allow_state & DMUB_IPS2_ALLOW_MASK)) { // Wait for evaluation time - udelay(dc->debug.ips2_eval_delay_us); - commit_state = dc->hwss.get_idle_state(dc); + for (;;) { + udelay(dc->debug.ips2_eval_delay_us); + commit_state = dc->hwss.get_idle_state(dc); + if (commit_state & DMUB_IPS2_ALLOW_MASK) + break; + + /* allow was still set, retry eval delay */ + dc->hwss.set_idle_state(dc, false); + } + if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) { // Tell PMFW to exit low power state dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); @@ -1213,14 +1255,13 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) udelay(dc->debug.ips2_entry_delay_us); dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); - for (i = 0; i < max_num_polls; ++i) { + for (;;) { commit_state = dc->hwss.get_idle_state(dc); if (commit_state & DMUB_IPS2_COMMIT_MASK) break; udelay(1); } - ASSERT(i < max_num_polls); if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) ASSERT(0); @@ -1235,14 +1276,13 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) dc_dmub_srv_notify_idle(dc, false); if (!(allow_state & DMUB_IPS1_ALLOW_MASK)) { - for (i = 0; i < max_num_polls; ++i) { + for (;;) { commit_state = dc->hwss.get_idle_state(dc); if (commit_state & DMUB_IPS1_COMMIT_MASK) break; udelay(1); } - ASSERT(i < max_num_polls); } } @@ -1324,7 +1364,7 @@ bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned in else result = dm_execute_dmub_cmd(ctx, cmd, wait_type); - if (result && reallow_idle) + if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle) dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true); return result; @@ -1373,7 +1413,7 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type); - if (result && reallow_idle) + if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle) dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true); return result; diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h new file mode 100644 index 0000000000..ef380cae81 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h @@ -0,0 +1,38 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DC_PLANE_H_ +#define _DC_PLANE_H_ + +#include "dc.h" +#include "dc_hw_types.h" + +struct dc_plane_state *dc_create_plane_state(struct dc *dc); +const struct dc_plane_status *dc_plane_get_status( + const struct dc_plane_state *plane_state); +void dc_plane_state_retain(struct dc_plane_state *plane_state); +void dc_plane_state_release(struct dc_plane_state *plane_state); + +#endif /* _DC_PLANE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h b/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h new file mode 100644 index 0000000000..9ee184c1df --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h @@ -0,0 +1,34 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DC_PLANE_PRIV_H_ +#define _DC_PLANE_PRIV_H_ + +#include "dc_plane.h" + +void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state); +void dc_plane_destruct(struct dc_plane_state *plane_state); + +#endif /* _DC_PLANE_PRIV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_state.h b/drivers/gpu/drm/amd/display/dc/dc_state.h new file mode 100644 index 0000000000..d167fdbfa8 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_state.h @@ -0,0 +1,78 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DC_STATE_H_ +#define _DC_STATE_H_ + +#include "dc.h" +#include "inc/core_status.h" + +struct dc_state *dc_state_create(struct dc *dc); +void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state); +struct dc_state *dc_state_create_copy(struct dc_state *src_state); +void dc_state_copy_current(struct dc *dc, struct dc_state *dst_state); +struct dc_state *dc_state_create_current_copy(struct dc *dc); +void dc_state_construct(struct dc *dc, struct dc_state *state); +void dc_state_destruct(struct dc_state *state); +void dc_state_retain(struct dc_state *state); +void dc_state_release(struct dc_state *state); + +enum dc_status dc_state_add_stream(struct dc *dc, + struct dc_state *state, + struct dc_stream_state *stream); + +enum dc_status dc_state_remove_stream( + struct dc *dc, + struct dc_state *state, + struct dc_stream_state *stream); + +bool dc_state_add_plane( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_plane_state *plane_state, + struct dc_state *state); + +bool dc_state_remove_plane( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_plane_state *plane_state, + struct dc_state *state); + +bool dc_state_rem_all_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_state *state); + +bool dc_state_add_all_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_plane_state * const *plane_states, + int plane_count, + struct dc_state *state); + +struct dc_stream_status *dc_state_get_stream_status( + struct dc_state *state, + struct dc_stream_state *stream); +#endif /* _DC_STATE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h new file mode 100644 index 0000000000..c1f44e09a6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h @@ -0,0 +1,102 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DC_STATE_PRIV_H_ +#define _DC_STATE_PRIV_H_ + +#include "dc_state.h" +#include "dc_stream.h" + +/* Get the type of the provided resource (none, phantom, main) based on the provided + * context. If the context is unavailable, determine only if phantom or not. + */ +enum mall_stream_type dc_state_get_pipe_subvp_type(const struct dc_state *state, + const struct pipe_ctx *pipe_ctx); +enum mall_stream_type dc_state_get_stream_subvp_type(const struct dc_state *state, + const struct dc_stream_state *stream); + +/* Gets the phantom stream if main is provided, gets the main if phantom is provided.*/ +struct dc_stream_state *dc_state_get_paired_subvp_stream(const struct dc_state *state, + const struct dc_stream_state *stream); + +/* allocate's phantom stream or plane and returns pointer to the object */ +struct dc_stream_state *dc_state_create_phantom_stream(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *main_stream); +struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc, + struct dc_state *state, + struct dc_plane_state *main_plane); + +/* deallocate's phantom stream or plane */ +void dc_state_release_phantom_stream(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *phantom_stream); +void dc_state_release_phantom_plane(const struct dc *dc, + struct dc_state *state, + struct dc_plane_state *phantom_plane); + +/* add/remove phantom stream to context and generate subvp meta data */ +enum dc_status dc_state_add_phantom_stream(struct dc *dc, + struct dc_state *state, + struct dc_stream_state *phantom_stream, + struct dc_stream_state *main_stream); +enum dc_status dc_state_remove_phantom_stream(struct dc *dc, + struct dc_state *state, + struct dc_stream_state *phantom_stream); + +bool dc_state_add_phantom_plane( + const struct dc *dc, + struct dc_stream_state *phantom_stream, + struct dc_plane_state *phantom_plane, + struct dc_state *state); + +bool dc_state_remove_phantom_plane( + const struct dc *dc, + struct dc_stream_state *phantom_stream, + struct dc_plane_state *phantom_plane, + struct dc_state *state); + +bool dc_state_rem_all_phantom_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *phantom_stream, + struct dc_state *state, + bool should_release_planes); + +bool dc_state_add_all_phantom_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *phantom_stream, + struct dc_plane_state * const *phantom_planes, + int plane_count, + struct dc_state *state); + +bool dc_state_remove_phantom_streams_and_planes( + struct dc *dc, + struct dc_state *state); + +void dc_state_release_phantom_streams_and_planes( + struct dc *dc, + struct dc_state *state); + +#endif /* _DC_STATE_PRIV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index e61eea6db2..a23eebd993 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -38,6 +38,14 @@ struct timing_sync_info { bool master; }; +struct mall_stream_config { + /* MALL stream config to indicate if the stream is phantom or not. + * We will use a phantom stream to indicate that the pipe is phantom. + */ + enum mall_stream_type type; + struct dc_stream_state *paired_stream; // master / slave stream +}; + struct dc_stream_status { int primary_otg_inst; int stream_enc_inst; @@ -50,6 +58,7 @@ struct dc_stream_status { struct timing_sync_info timing_sync_info; struct dc_plane_state *plane_states[MAX_SURFACE_NUM]; bool is_abm_supported; + struct mall_stream_config mall_stream_config; }; enum hubp_dmdata_mode { @@ -130,7 +139,6 @@ union stream_update_flags { uint32_t wb_update:1; uint32_t dsc_changed : 1; uint32_t mst_bw : 1; - uint32_t crtc_timing_adjust : 1; uint32_t fams_changed : 1; } bits; @@ -147,31 +155,6 @@ struct test_pattern { #define SUBVP_DRR_MARGIN_US 100 // 100us for DRR margin (SubVP + DRR) -enum mall_stream_type { - SUBVP_NONE, // subvp not in use - SUBVP_MAIN, // subvp in use, this stream is main stream - SUBVP_PHANTOM, // subvp in use, this stream is a phantom stream -}; - -struct mall_stream_config { - /* MALL stream config to indicate if the stream is phantom or not. - * We will use a phantom stream to indicate that the pipe is phantom. - */ - enum mall_stream_type type; - struct dc_stream_state *paired_stream; // master / slave stream -}; - -/* Temp struct used to save and restore MALL config - * during validation. - * - * TODO: Move MALL config into dc_state instead of stream struct - * to avoid needing to save/restore. - */ -struct mall_temp_config { - struct mall_stream_config mall_stream_config[MAX_PIPES]; - bool is_phantom_plane[MAX_PIPES]; -}; - struct dc_stream_debug_options { char force_odm_combine_segments; }; @@ -301,7 +284,7 @@ struct dc_stream_state { bool has_non_synchronizable_pclk; bool vblank_synchronized; bool fpo_in_use; - struct mall_stream_config mall_stream_config; + bool is_phantom; }; #define ABM_LEVEL_IMMEDIATE_DISABLE 255 @@ -342,7 +325,6 @@ struct dc_stream_update { struct dc_3dlut *lut3d_func; struct test_pattern *pending_test_pattern; - struct dc_crtc_timing_adjust *crtc_timing_adjust; }; bool dc_is_stream_unchanged( @@ -415,45 +397,14 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream, uint32_t *h_position, uint32_t *v_position); -enum dc_status dc_add_stream_to_ctx( - struct dc *dc, - struct dc_state *new_ctx, - struct dc_stream_state *stream); - -enum dc_status dc_remove_stream_from_ctx( - struct dc *dc, - struct dc_state *new_ctx, - struct dc_stream_state *stream); - - -bool dc_add_plane_to_context( - const struct dc *dc, - struct dc_stream_state *stream, - struct dc_plane_state *plane_state, - struct dc_state *context); - -bool dc_remove_plane_from_context( - const struct dc *dc, - struct dc_stream_state *stream, - struct dc_plane_state *plane_state, - struct dc_state *context); - -bool dc_rem_all_planes_for_stream( - const struct dc *dc, - struct dc_stream_state *stream, - struct dc_state *context); - -bool dc_add_all_planes_for_stream( - const struct dc *dc, - struct dc_stream_state *stream, - struct dc_plane_state * const *plane_states, - int plane_count, - struct dc_state *context); - bool dc_stream_add_writeback(struct dc *dc, struct dc_stream_state *stream, struct dc_writeback_info *wb_info); +bool dc_stream_fc_disable_writeback(struct dc *dc, + struct dc_stream_state *stream, + uint32_t dwb_pipe_inst); + bool dc_stream_remove_writeback(struct dc *dc, struct dc_stream_state *stream, uint32_t dwb_pipe_inst); @@ -514,9 +465,6 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink); void dc_stream_retain(struct dc_stream_state *dc_stream); void dc_stream_release(struct dc_stream_state *dc_stream); -struct dc_stream_status *dc_stream_get_status_from_state( - struct dc_state *state, - struct dc_stream_state *stream); struct dc_stream_status *dc_stream_get_status( struct dc_stream_state *dc_stream); diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream_priv.h b/drivers/gpu/drm/amd/display/dc/dc_stream_priv.h new file mode 100644 index 0000000000..7476fd52ce --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_stream_priv.h @@ -0,0 +1,37 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DC_STREAM_PRIV_H_ +#define _DC_STREAM_PRIV_H_ + +#include "dc_stream.h" + +bool dc_stream_construct(struct dc_stream_state *stream, + struct dc_sink *dc_sink_data); +void dc_stream_destruct(struct dc_stream_state *stream); + +void dc_stream_assign_stream_id(struct dc_stream_state *stream); + +#endif // _DC_STREAM_PRIV_H_ diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 66d0774bef..be2ac5c442 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -1018,6 +1018,25 @@ enum replay_coasting_vtotal_type { PR_COASTING_TYPE_NUM, }; +enum replay_link_off_frame_count_level { + PR_LINK_OFF_FRAME_COUNT_FAIL = 0x0, + PR_LINK_OFF_FRAME_COUNT_GOOD = 0x2, + PR_LINK_OFF_FRAME_COUNT_BEST = 0x6, +}; + +/* + * This is general Interface for Replay to + * set an 32 bit variable to dmub + * The Message_type indicates which variable + * passed to DMUB. + */ +enum replay_FW_Message_type { + Replay_Msg_Not_Support = -1, + Replay_Set_Timing_Sync_Supported, + Replay_Set_Residency_Frameupdate_Timer, + Replay_Set_Pseudo_VTotal, +}; + union replay_error_status { struct { unsigned char STATE_TRANSITION_ERROR :1; @@ -1029,26 +1048,52 @@ union replay_error_status { }; struct replay_config { - bool replay_supported; // Replay feature is supported - unsigned int replay_power_opt_supported; // Power opt flags that are supported - bool replay_smu_opt_supported; // SMU optimization is supported - unsigned int replay_enable_option; // Replay enablement option - uint32_t debug_flags; // Replay debug flags - bool replay_timing_sync_supported; // Replay desync is supported - bool force_disable_desync_error_check; // Replay desync is supported - bool received_desync_error_hpd; //Replay Received Desync Error HPD. - union replay_error_status replay_error_status; // Replay error status -}; - -/* Replay feature flags */ + /* Replay feature is supported */ + bool replay_supported; + /* Power opt flags that are supported */ + unsigned int replay_power_opt_supported; + /* SMU optimization is supported */ + bool replay_smu_opt_supported; + /* Replay enablement option */ + unsigned int replay_enable_option; + /* Replay debug flags */ + uint32_t debug_flags; + /* Replay sync is supported */ + bool replay_timing_sync_supported; + /* Replay Disable desync error check. */ + bool force_disable_desync_error_check; + /* Replay Received Desync Error HPD. */ + bool received_desync_error_hpd; + /* Replay feature is supported long vblank */ + bool replay_support_fast_resync_in_ultra_sleep_mode; + /* Replay error status */ + union replay_error_status replay_error_status; +}; + +/* Replay feature flags*/ struct replay_settings { - struct replay_config config; // Replay configuration - bool replay_feature_enabled; // Replay feature is ready for activating - bool replay_allow_active; // Replay is currently active - unsigned int replay_power_opt_active; // Power opt flags that are activated currently - bool replay_smu_opt_enable; // SMU optimization is enabled - uint16_t coasting_vtotal; // Current Coasting vtotal - uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM]; // Coasting vtotal table + /* Replay configuration */ + struct replay_config config; + /* Replay feature is ready for activating */ + bool replay_feature_enabled; + /* Replay is currently active */ + bool replay_allow_active; + /* Replay is currently active */ + bool replay_allow_long_vblank; + /* Power opt flags that are activated currently */ + unsigned int replay_power_opt_active; + /* SMU optimization is enabled */ + bool replay_smu_opt_enable; + /* Current Coasting vtotal */ + uint32_t coasting_vtotal; + /* Coasting vtotal table */ + uint32_t coasting_vtotal_table[PR_COASTING_TYPE_NUM]; + /* Maximum link off frame count */ + enum replay_link_off_frame_count_level link_off_frame_count_level; + /* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */ + uint16_t abm_with_ips_on_full_screen_video_pseudo_vtotal; + /* Replay last pseudo vtotal set to DMUB */ + uint16_t last_pseudo_vtotal; }; /* To split out "global" and "per-panel" config settings. @@ -1125,4 +1170,9 @@ enum dc_hpd_enable_select { HPD_EN_FOR_SECONDARY_EDP_ONLY, }; +enum mall_stream_type { + SUBVP_NONE, // subvp not in use + SUBVP_MAIN, // subvp in use, this stream is main stream + SUBVP_PHANTOM, // subvp in use, this stream is a phantom stream +}; #endif /* DC_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index 874b132fe1..a600677633 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c @@ -135,7 +135,7 @@ static void dmcu_set_backlight_level( 0, 1, 80000); } -static void dce_abm_init(struct abm *abm, uint32_t backlight) +static void dce_abm_init(struct abm *abm, uint32_t backlight, uint32_t user_level) { struct dce_abm *abm_dce = TO_DCE_ABM(abm); @@ -162,7 +162,7 @@ static void dce_abm_init(struct abm *abm, uint32_t backlight) BL1_PWM_TARGET_ABM_LEVEL, backlight); REG_UPDATE(BL1_PWM_USER_LEVEL, - BL1_PWM_USER_LEVEL, backlight); + BL1_PWM_USER_LEVEL, user_level); REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM1_LS_MIN_PIXEL_VALUE_THRES, 0, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c index 930fd929e9..ccc154b028 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c @@ -57,18 +57,22 @@ static unsigned int abm_feature_support(struct abm *abm, unsigned int panel_inst return ret; } -static void dmub_abm_init_ex(struct abm *abm, uint32_t backlight) +static void dmub_abm_init_ex(struct abm *abm, uint32_t backlight, uint32_t user_level) { - dmub_abm_init(abm, backlight); + dmub_abm_init(abm, backlight, user_level); } static unsigned int dmub_abm_get_current_backlight_ex(struct abm *abm) { + dc_allow_idle_optimizations(abm->ctx->dc, false); + return dmub_abm_get_current_backlight(abm); } static unsigned int dmub_abm_get_target_backlight_ex(struct abm *abm) { + dc_allow_idle_optimizations(abm->ctx->dc, false); + return dmub_abm_get_target_backlight(abm); } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c index 4cff36351f..f9d6a18116 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c @@ -79,7 +79,7 @@ static void dmub_abm_enable_fractional_pwm(struct dc_context *dc) dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } -void dmub_abm_init(struct abm *abm, uint32_t backlight) +void dmub_abm_init(struct abm *abm, uint32_t backlight, uint32_t user_level) { struct dce_abm *dce_abm = TO_DMUB_ABM(abm); @@ -106,7 +106,7 @@ void dmub_abm_init(struct abm *abm, uint32_t backlight) BL1_PWM_TARGET_ABM_LEVEL, backlight); REG_UPDATE(BL1_PWM_USER_LEVEL, - BL1_PWM_USER_LEVEL, backlight); + BL1_PWM_USER_LEVEL, user_level); REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM1_LS_MIN_PIXEL_VALUE_THRES, 0, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h index 07ea6c8d41..761685e5b8 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h @@ -30,7 +30,7 @@ struct abm_save_restore; -void dmub_abm_init(struct abm *abm, uint32_t backlight); +void dmub_abm_init(struct abm *abm, uint32_t backlight, uint32_t user_level); bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask); unsigned int dmub_abm_get_current_backlight(struct abm *abm); unsigned int dmub_abm_get_target_backlight(struct abm *abm); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c index 28149e53c2..38e4797e94 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c @@ -258,13 +258,97 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst, *residency = 0; } +/** + * Set REPLAY power optimization flags and coasting vtotal. + */ +static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dmub, + unsigned int power_opt, uint8_t panel_inst, uint16_t coasting_vtotal) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = dmub->ctx; + + memset(&cmd, 0, sizeof(cmd)); + cmd.replay_set_power_opt_and_coasting_vtotal.header.type = DMUB_CMD__REPLAY; + cmd.replay_set_power_opt_and_coasting_vtotal.header.sub_type = + DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL; + cmd.replay_set_power_opt_and_coasting_vtotal.header.payload_bytes = + sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal); + cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_power_opt_data.power_opt = power_opt; + cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_power_opt_data.panel_inst = panel_inst; + cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_coasting_vtotal_data.coasting_vtotal = coasting_vtotal; + + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + +/** + * send Replay general cmd to DMUB. + */ +static void dmub_replay_send_cmd(struct dmub_replay *dmub, + enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_element) +{ + union dmub_rb_cmd cmd; + struct dc_context *ctx = NULL; + + if (dmub == NULL || cmd_element == NULL) + return; + + ctx = dmub->ctx; + if (ctx != NULL) { + + if (msg != Replay_Msg_Not_Support) { + memset(&cmd, 0, sizeof(cmd)); + //Header + cmd.replay_set_timing_sync.header.type = DMUB_CMD__REPLAY; + } else + return; + } else + return; + + switch (msg) { + case Replay_Set_Timing_Sync_Supported: + //Header + cmd.replay_set_timing_sync.header.sub_type = + DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED; + cmd.replay_set_timing_sync.header.payload_bytes = + sizeof(struct dmub_rb_cmd_replay_set_timing_sync); + //Cmd Body + cmd.replay_set_timing_sync.replay_set_timing_sync_data.panel_inst = + cmd_element->sync_data.panel_inst; + cmd.replay_set_timing_sync.replay_set_timing_sync_data.timing_sync_supported = + cmd_element->sync_data.timing_sync_supported; + break; + case Replay_Set_Residency_Frameupdate_Timer: + //Header + cmd.replay_set_frameupdate_timer.header.sub_type = + DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER; + cmd.replay_set_frameupdate_timer.header.payload_bytes = + sizeof(struct dmub_rb_cmd_replay_set_frameupdate_timer); + //Cmd Body + cmd.replay_set_frameupdate_timer.data.panel_inst = + cmd_element->panel_inst; + cmd.replay_set_frameupdate_timer.data.enable = + cmd_element->timer_data.enable; + cmd.replay_set_frameupdate_timer.data.frameupdate_count = + cmd_element->timer_data.frameupdate_count; + break; + case Replay_Msg_Not_Support: + default: + return; + break; + } + + dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + static const struct dmub_replay_funcs replay_funcs = { - .replay_copy_settings = dmub_replay_copy_settings, - .replay_enable = dmub_replay_enable, - .replay_get_state = dmub_replay_get_state, - .replay_set_power_opt = dmub_replay_set_power_opt, - .replay_set_coasting_vtotal = dmub_replay_set_coasting_vtotal, - .replay_residency = dmub_replay_residency, + .replay_copy_settings = dmub_replay_copy_settings, + .replay_enable = dmub_replay_enable, + .replay_get_state = dmub_replay_get_state, + .replay_set_power_opt = dmub_replay_set_power_opt, + .replay_set_coasting_vtotal = dmub_replay_set_coasting_vtotal, + .replay_residency = dmub_replay_residency, + .replay_set_power_opt_and_coasting_vtotal = dmub_replay_set_power_opt_and_coasting_vtotal, + .replay_send_cmd = dmub_replay_send_cmd, }; /* diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h index e8385bbf51..3613aff994 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h @@ -45,10 +45,14 @@ struct dmub_replay_funcs { struct replay_context *replay_context, uint8_t panel_inst); void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt, uint8_t panel_inst); + void (*replay_send_cmd)(struct dmub_replay *dmub, + enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_element); void (*replay_set_coasting_vtotal)(struct dmub_replay *dmub, uint16_t coasting_vtotal, uint8_t panel_inst); void (*replay_residency)(struct dmub_replay *dmub, uint8_t panel_inst, uint32_t *residency, const bool is_start, const bool is_alpm); + void (*replay_set_power_opt_and_coasting_vtotal)(struct dmub_replay *dmub, + unsigned int power_opt, uint8_t panel_inst, uint16_t coasting_vtotal); }; struct dmub_replay *dmub_replay_create(struct dc_context *ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dce100/Makefile b/drivers/gpu/drm/amd/display/dc/dce100/Makefile deleted file mode 100644 index 0d2f6bbf75..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dce100/Makefile +++ /dev/null @@ -1,46 +0,0 @@ -# -# Copyright 2017 Advanced Micro Devices, Inc. -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -# -# Makefile for the 'controller' sub-component of DAL. -# It provides the control and status of HW CRTC block. - -CFLAGS_$(AMDDALPATH)/dc/dce100/dce100_resource.o = $(call cc-disable-warning, override-init) - -DCE100 = dce100_resource.o - -AMD_DAL_DCE100 = $(addprefix $(AMDDALPATH)/dc/dce100/,$(DCE100)) - -AMD_DISPLAY_FILES += $(AMD_DAL_DCE100) - - -############################################################################### -# DCE 10x -############################################################################### -ifdef 0#CONFIG_DRM_AMD_DC_DCE11_0 -TG_DCE100 = dce100_resource.o - -AMD_DAL_TG_DCE100 = $(addprefix \ - $(AMDDALPATH)/dc/dce100/,$(TG_DCE100)) - -AMD_DISPLAY_FILES += $(AMD_DAL_TG_DCE100) -endif - diff --git a/drivers/gpu/drm/amd/display/dc/dce110/Makefile b/drivers/gpu/drm/amd/display/dc/dce110/Makefile index 695a50ed5a..c307f040e4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce110/Makefile @@ -23,11 +23,11 @@ # Makefile for the 'controller' sub-component of DAL. # It provides the control and status of HW CRTC block. -CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = $(call cc-disable-warning, override-init) +CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = -Wno-override-init DCE110 = dce110_timing_generator.o \ -dce110_compressor.o dce110_resource.o \ -dce110_opp_regamma_v.o dce110_opp_csc_v.o dce110_timing_generator_v.o \ +dce110_compressor.o dce110_opp_regamma_v.o \ +dce110_opp_csc_v.o dce110_timing_generator_v.o \ dce110_mem_input_v.o dce110_opp_v.o dce110_transform_v.o AMD_DAL_DCE110 = $(addprefix $(AMDDALPATH)/dc/dce110/,$(DCE110)) diff --git a/drivers/gpu/drm/amd/display/dc/dce112/Makefile b/drivers/gpu/drm/amd/display/dc/dce112/Makefile index e846ef58ca..6838667977 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce112/Makefile @@ -23,10 +23,9 @@ # Makefile for the 'controller' sub-component of DAL. # It provides the control and status of HW CRTC block. -CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = $(call cc-disable-warning, override-init) +CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = -Wno-override-init -DCE112 = dce112_compressor.o \ -dce112_resource.o +DCE112 = dce112_compressor.o AMD_DAL_DCE112 = $(addprefix $(AMDDALPATH)/dc/dce112/,$(DCE112)) diff --git a/drivers/gpu/drm/amd/display/dc/dce120/Makefile b/drivers/gpu/drm/amd/display/dc/dce120/Makefile index 097cf407a1..8f508e6627 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce120/Makefile @@ -24,9 +24,9 @@ # It provides the control and status of HW CRTC block. -CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = $(call cc-disable-warning, override-init) +CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = -Wno-override-init -DCE120 = dce120_resource.o dce120_timing_generator.o \ +DCE120 = dce120_timing_generator.o AMD_DAL_DCE120 = $(addprefix $(AMDDALPATH)/dc/dce120/,$(DCE120)) diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile index fee331accc..eede83ad91 100644 --- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile @@ -23,7 +23,7 @@ # Makefile for the 'controller' sub-component of DAL. # It provides the control and status of HW CRTC block. -CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init) +CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = -Wno-override-init DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \ dce60_resource.o diff --git a/drivers/gpu/drm/amd/display/dc/dce80/Makefile b/drivers/gpu/drm/amd/display/dc/dce80/Makefile index 93dd68c312..fba189d266 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce80/Makefile @@ -23,10 +23,9 @@ # Makefile for the 'controller' sub-component of DAL. # It provides the control and status of HW CRTC block. -CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = $(call cc-disable-warning, override-init) +CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = -Wno-override-init -DCE80 = dce80_timing_generator.o \ - dce80_resource.o +DCE80 = dce80_timing_generator.o AMD_DAL_DCE80 = $(addprefix $(AMDDALPATH)/dc/dce80/,$(DCE80)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile index 2d2007c3e2..ae6a131be7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile @@ -22,9 +22,9 @@ # # Makefile for DCN. -DCN10 = dcn10_init.o dcn10_resource.o dcn10_ipp.o \ +DCN10 = dcn10_ipp.o \ dcn10_hw_sequencer_debug.o \ - dcn10_dpp.o dcn10_opp.o dcn10_optc.o \ + dcn10_dpp.o dcn10_opp.o \ dcn10_hubp.o dcn10_mpc.o \ dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \ dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c index 92fdab731f..9033b39e0e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c @@ -32,7 +32,7 @@ #include "dce/dce_hwseq.h" #include "abm.h" #include "dmcu.h" -#include "dcn10_optc.h" +#include "dcn10/dcn10_optc.h" #include "dcn10/dcn10_dpp.h" #include "dcn10/dcn10_mpc.h" #include "timing_generator.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c index 0dec576792..86bfed5dea 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c @@ -377,6 +377,7 @@ static const struct opp_funcs dcn10_opp_funcs = { .opp_set_disp_pattern_generator = NULL, .opp_program_dpg_dimensions = NULL, .dpg_is_blanked = NULL, + .dpg_is_pending = NULL, .opp_destroy = opp1_destroy }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile index d7dc9696a8..3dae3943b0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile @@ -2,13 +2,11 @@ # # Makefile for DCN. -DCN20 = dcn20_resource.o dcn20_init.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \ - dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_optc.o dcn20_mmhubbub.o \ +DCN20 = dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \ + dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_mmhubbub.o \ dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \ dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o -DCN20 += dcn20_dsc.o - AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20)) AMD_DISPLAY_FILES += $(AMD_DAL_DCN20) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h index ab6d09c6fe..ef5c22f415 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h @@ -291,7 +291,43 @@ type SYMCLKB_FE_SRC_SEL;\ type SYMCLKC_FE_SRC_SEL;\ type SYMCLKD_FE_SRC_SEL;\ - type SYMCLKE_FE_SRC_SEL; + type SYMCLKE_FE_SRC_SEL;\ + type DTBCLK_P0_GATE_DISABLE;\ + type DTBCLK_P1_GATE_DISABLE;\ + type DTBCLK_P2_GATE_DISABLE;\ + type DTBCLK_P3_GATE_DISABLE;\ + type DSCCLK0_ROOT_GATE_DISABLE;\ + type DSCCLK1_ROOT_GATE_DISABLE;\ + type DSCCLK2_ROOT_GATE_DISABLE;\ + type DSCCLK3_ROOT_GATE_DISABLE;\ + type SYMCLKA_FE_ROOT_GATE_DISABLE;\ + type SYMCLKB_FE_ROOT_GATE_DISABLE;\ + type SYMCLKC_FE_ROOT_GATE_DISABLE;\ + type SYMCLKD_FE_ROOT_GATE_DISABLE;\ + type SYMCLKE_FE_ROOT_GATE_DISABLE;\ + type DPPCLK0_ROOT_GATE_DISABLE;\ + type DPPCLK1_ROOT_GATE_DISABLE;\ + type DPPCLK2_ROOT_GATE_DISABLE;\ + type DPPCLK3_ROOT_GATE_DISABLE;\ + type HDMISTREAMCLK0_ROOT_GATE_DISABLE;\ + type SYMCLKA_ROOT_GATE_DISABLE;\ + type SYMCLKB_ROOT_GATE_DISABLE;\ + type SYMCLKC_ROOT_GATE_DISABLE;\ + type SYMCLKD_ROOT_GATE_DISABLE;\ + type SYMCLKE_ROOT_GATE_DISABLE;\ + type PHYA_REFCLK_ROOT_GATE_DISABLE;\ + type PHYB_REFCLK_ROOT_GATE_DISABLE;\ + type PHYC_REFCLK_ROOT_GATE_DISABLE;\ + type PHYD_REFCLK_ROOT_GATE_DISABLE;\ + type PHYE_REFCLK_ROOT_GATE_DISABLE;\ + type DPSTREAMCLK0_ROOT_GATE_DISABLE;\ + type DPSTREAMCLK1_ROOT_GATE_DISABLE;\ + type DPSTREAMCLK2_ROOT_GATE_DISABLE;\ + type DPSTREAMCLK3_ROOT_GATE_DISABLE;\ + type DPSTREAMCLK0_GATE_DISABLE;\ + type DPSTREAMCLK1_GATE_DISABLE;\ + type DPSTREAMCLK2_GATE_DISABLE;\ + type DPSTREAMCLK3_GATE_DISABLE;\ struct dccg_shift { DCCG_REG_FIELD_LIST(uint8_t) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c index 0784d01986..fbf1b6370e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c @@ -337,6 +337,19 @@ bool opp2_dpg_is_blanked(struct output_pixel_processor *opp) (double_buffer_pending == 0); } +bool opp2_dpg_is_pending(struct output_pixel_processor *opp) +{ + struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); + uint32_t double_buffer_pending; + uint32_t dpg_en; + + REG_GET(DPG_CONTROL, DPG_EN, &dpg_en); + + REG_GET(DPG_STATUS, DPG_DOUBLE_BUFFER_PENDING, &double_buffer_pending); + + return (dpg_en == 1 && double_buffer_pending == 1); +} + void opp2_program_left_edge_extra_pixel ( struct output_pixel_processor *opp, bool count) @@ -363,6 +376,7 @@ static struct opp_funcs dcn20_opp_funcs = { .opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator, .opp_program_dpg_dimensions = opp2_program_dpg_dimensions, .dpg_is_blanked = opp2_dpg_is_blanked, + .dpg_is_pending = opp2_dpg_is_pending, .opp_dpg_set_blank_color = opp2_dpg_set_blank_color, .opp_destroy = opp1_destroy, .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h index 3ab221bdd2..8f186abd55 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h @@ -159,6 +159,8 @@ void opp2_program_dpg_dimensions( bool opp2_dpg_is_blanked(struct output_pixel_processor *opp); +bool opp2_dpg_is_pending(struct output_pixel_processor *opp); + void opp2_dpg_set_blank_color( struct output_pixel_processor *opp, const struct tg_color *color); diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile index 3a41a97b07..2b0b4f32e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile @@ -1,9 +1,8 @@ # SPDX-License-Identifier: MIT # # Makefile for DCN. -DCN201 = dcn201_init.o dcn201_resource.o \ - dcn201_hubbub.o\ - dcn201_mpc.o dcn201_hubp.o dcn201_opp.o dcn201_optc.o dcn201_dpp.o \ +DCN201 = dcn201_hubbub.o\ + dcn201_mpc.o dcn201_hubp.o dcn201_opp.o dcn201_dpp.o \ dcn201_dccg.o dcn201_link_encoder.o AMD_DAL_DCN201 = $(addprefix $(AMDDALPATH)/dc/dcn201/,$(DCN201)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c index 8e77db46a4..6a71ba3dfc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c @@ -50,6 +50,7 @@ static struct opp_funcs dcn201_opp_funcs = { .opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator, .opp_program_dpg_dimensions = opp2_program_dpg_dimensions, .dpg_is_blanked = opp2_dpg_is_blanked, + .dpg_is_pending = opp2_dpg_is_pending, .opp_dpg_set_blank_color = opp2_dpg_set_blank_color, .opp_destroy = opp1_destroy, .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile index ce1be0afae..ca92f5c8e7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile @@ -2,7 +2,7 @@ # # Makefile for DCN21. -DCN21 = dcn21_init.o dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o \ +DCN21 = dcn21_hubp.o dcn21_hubbub.o \ dcn21_link_encoder.o dcn21_dccg.o AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile index af4d2065d2..b5b2aa3b37 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile @@ -23,12 +23,9 @@ # # -DCN30 := \ - dcn30_init.o \ - dcn30_hubbub.o \ +DCN30 := dcn30_hubbub.o \ dcn30_hubp.o \ dcn30_dpp.o \ - dcn30_optc.o \ dcn30_dccg.o \ dcn30_mpc.o dcn30_vpg.o \ dcn30_afmt.o \ @@ -38,7 +35,6 @@ DCN30 := \ dcn30_dwb_cm.o \ dcn30_cm_common.o \ dcn30_mmhubbub.o \ - dcn30_resource.o \ dcn30_dio_link_encoder.o diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c index e43f77c11c..5f97a868ad 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c @@ -56,16 +56,13 @@ static void dpp3_enable_cm_block( static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base) { - enum dc_lut_mode mode; + enum dc_lut_mode mode = LUT_BYPASS; uint32_t state_mode; uint32_t lut_mode; struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &state_mode); - if (state_mode == 0) - mode = LUT_BYPASS; - if (state_mode == 2) {//Programmable RAM LUT REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &lut_mode); if (lut_mode == 0) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c index 0d98918bf0..1b9d9495f7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c @@ -130,6 +130,28 @@ bool dwb3_disable(struct dwbc *dwbc) return true; } +void dwb3_set_fc_enable(struct dwbc *dwbc, enum dwb_frame_capture_enable enable) +{ + struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); + unsigned int pre_locked; + + REG_GET(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, &pre_locked); + + /* Lock DWB registers */ + if (pre_locked == 0) + REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 1); + + /* Disable FC */ + REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, enable); + + /* Unlock DWB registers */ + if (pre_locked == 0) + REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 0); + + DC_LOG_DWB("%s dwb3_fc_disabled at inst = %d", __func__, dwbc->inst); +} + + bool dwb3_update(struct dwbc *dwbc, struct dc_dwb_params *params) { struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); @@ -226,6 +248,7 @@ static const struct dwbc_funcs dcn30_dwbc_funcs = { .disable = dwb3_disable, .update = dwb3_update, .is_enabled = dwb3_is_enabled, + .set_fc_enable = dwb3_set_fc_enable, .set_stereo = dwb3_set_stereo, .set_new_content = dwb3_set_new_content, .dwb_program_output_csc = NULL, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h index a5d1b81e76..332634b76a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h @@ -877,6 +877,8 @@ bool dwb3_update(struct dwbc *dwbc, struct dc_dwb_params *params); bool dwb3_is_enabled(struct dwbc *dwbc); +void dwb3_set_fc_enable(struct dwbc *dwbc, enum dwb_frame_capture_enable enable); + void dwb3_set_stereo(struct dwbc *dwbc, struct dwb_stereo_params *stereo_params); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c index 701c7d8bc0..03a50c32fc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c @@ -243,6 +243,9 @@ static bool dwb3_program_ogam_lut( return false; } + if (params->hw_points_num == 0) + return false; + REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2); current_mode = dwb3_get_ogam_current(dwbc30); diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile index 30fbc5e06d..d241f665e4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile @@ -10,9 +10,8 @@ # # Makefile for dcn30. -DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \ - dcn301_dio_link_encoder.o dcn301_panel_cntl.o dcn301_hubbub.o \ - dcn301_optc.o +DCN301 = dcn301_dccg.o \ + dcn301_dio_link_encoder.o dcn301_panel_cntl.o dcn301_hubbub.o AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile deleted file mode 100644 index 95b66baf39..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -# -# (c) Copyright 2020 Advanced Micro Devices, Inc. All the rights reserved -# -# Authors: AMD -# -# Makefile for dcn302. - -DCN3_02 = dcn302_init.o dcn302_resource.o - -AMD_DAL_DCN3_02 = $(addprefix $(AMDDALPATH)/dc/dcn302/,$(DCN3_02)) - -AMD_DISPLAY_FILES += $(AMD_DAL_DCN3_02) diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/Makefile b/drivers/gpu/drm/amd/display/dc/dcn303/Makefile index d7b3ad780e..a954e316ac 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn303/Makefile @@ -6,7 +6,7 @@ # # Makefile for dcn303. -DCN3_03 = dcn303_init.o dcn303_resource.o +DCN3_03 = dcn303_init.o AMD_DAL_DCN3_03 = $(addprefix $(AMDDALPATH)/dc/dcn303/,$(DCN3_03)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile index 96e45c9efb..5d93ac16c0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile @@ -10,8 +10,8 @@ # # Makefile for dcn31. -DCN31 = dcn31_resource.o dcn31_hubbub.o dcn31_init.o dcn31_hubp.o \ - dcn31_dccg.o dcn31_optc.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o \ +DCN31 = dcn31_hubbub.o dcn31_hubp.o \ + dcn31_dccg.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o \ dcn31_apg.o dcn31_hpo_dp_stream_encoder.o dcn31_hpo_dp_link_encoder.o \ dcn31_afmt.o dcn31_vpg.o diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile index 72456debb9..b134ab05aa 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile @@ -10,8 +10,7 @@ # # Makefile for dcn314. -DCN314 = dcn314_resource.o dcn314_init.o \ - dcn314_dio_stream_encoder.o dcn314_dccg.o dcn314_optc.o +DCN314 = dcn314_dio_stream_encoder.o dcn314_dccg.o AMD_DAL_DCN314 = $(addprefix $(AMDDALPATH)/dc/dcn314/,$(DCN314)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/Makefile b/drivers/gpu/drm/amd/display/dc/dcn315/Makefile deleted file mode 100644 index 59381d2480..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn315/Makefile +++ /dev/null @@ -1,30 +0,0 @@ -# -# Copyright © 2021 Advanced Micro Devices, Inc. -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -# Authors: AMD -# -# Makefile for dcn315. - -DCN315 = dcn315_resource.o - -AMD_DAL_DCN315 = $(addprefix $(AMDDALPATH)/dc/dcn315/,$(DCN315)) - -AMD_DISPLAY_FILES += $(AMD_DAL_DCN315) diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/Makefile b/drivers/gpu/drm/amd/display/dc/dcn316/Makefile deleted file mode 100644 index 819d44a943..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn316/Makefile +++ /dev/null @@ -1,30 +0,0 @@ -# -# Copyright 2021 Advanced Micro Devices, Inc. -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -# Authors: AMD -# -# Makefile for dcn316. - -DCN316 = dcn316_resource.o - -AMD_DAL_DCN316 = $(addprefix $(AMDDALPATH)/dc/dcn316/,$(DCN316)) - -AMD_DISPLAY_FILES += $(AMD_DAL_DCN316) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile index 8bb2513072..5314770fff 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile @@ -10,10 +10,10 @@ # # Makefile for dcn32. -DCN32 = dcn32_resource.o dcn32_hubbub.o dcn32_init.o dcn32_dccg.o \ - dcn32_dccg.o dcn32_optc.o dcn32_mmhubbub.o dcn32_hubp.o dcn32_dpp.o \ - dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_hpo_dp_link_encoder.o \ - dcn32_resource_helpers.o dcn32_mpc.o +DCN32 = dcn32_hubbub.o dcn32_dccg.o \ + dcn32_mmhubbub.o dcn32_dpp.o dcn32_hubp.o dcn32_mpc.o \ + dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_resource_helpers.o \ + dcn32_hpo_dp_link_encoder.o AMD_DAL_DCN32 = $(addprefix $(AMDDALPATH)/dc/dcn32/,$(DCN32)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c index d761b0df28..8a0460e863 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c @@ -34,6 +34,7 @@ #include "dc_bios_types.h" #include "link_enc_cfg.h" +#include "dc_dmub_srv.h" #include "gpio_service_interface.h" #ifndef MIN @@ -61,6 +62,38 @@ #define AUX_REG_WRITE(reg_name, val) \ dm_write_reg(CTX, AUX_REG(reg_name), val) +static uint8_t phy_id_from_transmitter(enum transmitter t) +{ + uint8_t phy_id; + + switch (t) { + case TRANSMITTER_UNIPHY_A: + phy_id = 0; + break; + case TRANSMITTER_UNIPHY_B: + phy_id = 1; + break; + case TRANSMITTER_UNIPHY_C: + phy_id = 2; + break; + case TRANSMITTER_UNIPHY_D: + phy_id = 3; + break; + case TRANSMITTER_UNIPHY_E: + phy_id = 4; + break; + case TRANSMITTER_UNIPHY_F: + phy_id = 5; + break; + case TRANSMITTER_UNIPHY_G: + phy_id = 6; + break; + default: + phy_id = 0; + break; + } + return phy_id; +} void enc32_hw_init(struct link_encoder *enc) { @@ -117,38 +150,50 @@ void dcn32_link_encoder_enable_dp_output( } } -static bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc) +static bool query_dp_alt_from_dmub(struct link_encoder *enc, + union dmub_rb_cmd *cmd) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); - uint32_t dp_alt_mode_disable = 0; - bool is_usb_c_alt_mode = false; - if (enc->features.flags.bits.DP_IS_USB_C) { - /* if value == 1 alt mode is disabled, otherwise it is enabled */ - REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); - is_usb_c_alt_mode = (dp_alt_mode_disable == 0); - } + memset(cmd, 0, sizeof(*cmd)); + cmd->query_dp_alt.header.type = DMUB_CMD__VBIOS; + cmd->query_dp_alt.header.sub_type = + DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT; + cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data); + cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter); + + if (!dc_wake_and_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + return false; - return is_usb_c_alt_mode; + return true; } -static void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc, +bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc) +{ + union dmub_rb_cmd cmd; + + if (!query_dp_alt_from_dmub(enc, &cmd)) + return false; + + return (cmd.query_dp_alt.data.is_dp_alt_disable == 0); +} + +void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_link_settings *link_settings) { - struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); - uint32_t is_in_usb_c_dp4_mode = 0; + union dmub_rb_cmd cmd; dcn10_link_encoder_get_max_link_cap(enc, link_settings); - /* in usb c dp2 mode, max lane count is 2 */ - if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) { - REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode); - if (!is_in_usb_c_dp4_mode) - link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count); - } + if (!query_dp_alt_from_dmub(enc, &cmd)) + return; + if (cmd.query_dp_alt.data.is_usb && + cmd.query_dp_alt.data.is_dp4 == 0) + link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count); } + static const struct link_encoder_funcs dcn32_link_enc_funcs = { .read_state = link_enc2_read_state, .validate_output_with_stream = @@ -203,12 +248,12 @@ void dcn32_link_encoder_construct( enc10->base.hpd_source = init_data->hpd_source; enc10->base.connector = init_data->connector; + if (enc10->base.connector.id == CONNECTOR_ID_USBC) + enc10->base.features.flags.bits.DP_IS_USB_C = 1; enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; enc10->base.features = *enc_features; - if (enc10->base.connector.id == CONNECTOR_ID_USBC) - enc10->base.features.flags.bits.DP_IS_USB_C = 1; enc10->base.transmitter = init_data->transmitter; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h index bbcfce06be..2d5f25290e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h @@ -53,4 +53,9 @@ void dcn32_link_encoder_enable_dp_output( const struct dc_link_settings *link_settings, enum clock_source_id clock_source); +bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc); + +void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc, + struct dc_link_settings *link_settings); + #endif /* __DC_LINK_ENCODER__DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c index 3279b61022..e408e859b3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c @@ -71,12 +71,13 @@ void mpc32_power_on_blnd_lut( { struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + REG_SET(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], 0, MPCC_MCM_1DLUT_MEM_PWR_DIS, power_on); + if (mpc->ctx->dc->debug.enable_mem_low_power.bits.cm) { if (power_on) { REG_UPDATE(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_FORCE, 0); REG_WAIT(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_STATE, 0, 1, 5); } else if (!mpc->ctx->dc->debug.disable_mem_low_power) { - ASSERT(false); /* TODO: change to mpc * dpp_base->ctx->dc->optimized_required = true; * dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c index 1f89428499..f98def6c8c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c @@ -24,10 +24,11 @@ */ // header file of functions being implemented -#include "dcn32_resource.h" +#include "dcn32/dcn32_resource.h" #include "dcn20/dcn20_resource.h" #include "dml/dcn32/display_mode_vba_util_32.h" #include "dml/dcn32/dcn32_fpu.h" +#include "dc_state_priv.h" static bool is_dual_plane(enum surface_pixel_format format) { @@ -190,7 +191,7 @@ bool dcn32_subvp_in_use(struct dc *dc, for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) + if (dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE) return true; } return false; @@ -264,18 +265,17 @@ static void override_det_for_subvp(struct dc *dc, struct dc_state *context, uint // Do not override if a stream has multiple planes for (i = 0; i < context->stream_count; i++) { - if (context->stream_status[i].plane_count > 1) { + if (context->stream_status[i].plane_count > 1) return; - } - if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) { + + if (dc_state_get_stream_subvp_type(context, context->streams[i]) != SUBVP_PHANTOM) stream_count++; - } } for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - if (pipe_ctx->stream && pipe_ctx->plane_state && pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) { + if (pipe_ctx->stream && pipe_ctx->plane_state && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) { if (dcn32_allow_subvp_high_refresh_rate(dc, context, pipe_ctx)) { if (pipe_ctx->stream->timing.v_addressable == 1080 && pipe_ctx->stream->timing.h_addressable == 1920) { @@ -290,7 +290,7 @@ static void override_det_for_subvp(struct dc *dc, struct dc_state *context, uint for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - if (pipe_ctx->stream && pipe_ctx->plane_state && pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) { + if (pipe_ctx->stream && pipe_ctx->plane_state && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) { if (pipe_ctx->stream->timing.v_addressable == 1080 && pipe_ctx->stream->timing.h_addressable == 1920) { if (pipe_segments[i] > 4) pipe_segments[i] = 4; @@ -337,14 +337,14 @@ void dcn32_determine_det_override(struct dc *dc, for (i = 0; i < context->stream_count; i++) { /* Don't count SubVP streams for DET allocation */ - if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) + if (dc_state_get_stream_subvp_type(context, context->streams[i]) != SUBVP_PHANTOM) stream_count++; } if (stream_count > 0) { stream_segments = 18 / stream_count; for (i = 0; i < context->stream_count; i++) { - if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM) + if (dc_state_get_stream_subvp_type(context, context->streams[i]) == SUBVP_PHANTOM) continue; if (context->stream_status[i].plane_count > 0) @@ -430,71 +430,6 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context, dcn32_determine_det_override(dc, context, pipes); } -/** - * dcn32_save_mall_state(): Save MALL (SubVP) state for fast validation cases - * - * This function saves the MALL (SubVP) case for fast validation cases. For fast validation, - * there are situations where a shallow copy of the dc->current_state is created for the - * validation. In this case we want to save and restore the mall config because we always - * teardown subvp at the beginning of validation (and don't attempt to add it back if it's - * fast validation). If we don't restore the subvp config in cases of fast validation + - * shallow copy of the dc->current_state, the dc->current_state will have a partially - * removed subvp state when we did not intend to remove it. - * - * NOTE: This function ONLY works if the streams are not moved to a different pipe in the - * validation. We don't expect this to happen in fast_validation=1 cases. - * - * @dc: Current DC state - * @context: New DC state to be programmed - * @temp_config: struct used to cache the existing MALL state - * - * Return: void - */ -void dcn32_save_mall_state(struct dc *dc, - struct dc_state *context, - struct mall_temp_config *temp_config) -{ - uint32_t i; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - - if (pipe->stream) - temp_config->mall_stream_config[i] = pipe->stream->mall_stream_config; - - if (pipe->plane_state) - temp_config->is_phantom_plane[i] = pipe->plane_state->is_phantom; - } -} - -/** - * dcn32_restore_mall_state(): Restore MALL (SubVP) state for fast validation cases - * - * Restore the MALL state based on the previously saved state from dcn32_save_mall_state - * - * @dc: Current DC state - * @context: New DC state to be programmed, restore MALL state into here - * @temp_config: struct that has the cached MALL state - * - * Return: void - */ -void dcn32_restore_mall_state(struct dc *dc, - struct dc_state *context, - struct mall_temp_config *temp_config) -{ - uint32_t i; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - - if (pipe->stream) - pipe->stream->mall_stream_config = temp_config->mall_stream_config[i]; - - if (pipe->plane_state) - pipe->plane_state->is_phantom = temp_config->is_phantom_plane[i]; - } -} - #define MAX_STRETCHED_V_BLANK 1000 // in micro-seconds (must ensure to match value in FW) /* * Scaling factor for v_blank stretch calculations considering timing in @@ -589,13 +524,14 @@ static int get_refresh_rate(struct dc_stream_state *fpo_candidate_stream) * * Return: Pointer to FPO stream candidate if config can support FPO, otherwise NULL */ -struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, const struct dc_state *context) +struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context) { int refresh_rate = 0; const int minimum_refreshrate_supported = 120; struct dc_stream_state *fpo_candidate_stream = NULL; bool is_fpo_vactive = false; uint32_t fpo_vactive_margin_us = 0; + struct dc_stream_status *fpo_stream_status = NULL; if (context == NULL) return NULL; @@ -618,16 +554,28 @@ struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stre DC_FP_START(); dcn32_assign_fpo_vactive_candidate(dc, context, &fpo_candidate_stream); DC_FP_END(); - + if (fpo_candidate_stream) + fpo_stream_status = dc_state_get_stream_status(context, fpo_candidate_stream); DC_FP_START(); is_fpo_vactive = dcn32_find_vactive_pipe(dc, context, dc->debug.fpo_vactive_min_active_margin_us); DC_FP_END(); if (!is_fpo_vactive || dc->debug.disable_fpo_vactive) return NULL; - } else + } else { fpo_candidate_stream = context->streams[0]; + if (fpo_candidate_stream) + fpo_stream_status = dc_state_get_stream_status(context, fpo_candidate_stream); + } - if (!fpo_candidate_stream) + /* In DCN32/321, FPO uses per-pipe P-State force. + * If there's no planes, HUBP is power gated and + * therefore programming UCLK_PSTATE_FORCE does + * nothing (P-State will always be asserted naturally + * on a pipe that has HUBP power gated. Therefore we + * only want to enable FPO if the FPO pipe has both + * a stream and a plane. + */ + if (!fpo_candidate_stream || !fpo_stream_status || fpo_stream_status->plane_count == 0) return NULL; if (fpo_candidate_stream->sink->edid_caps.panel_patch.disable_fams) @@ -666,6 +614,30 @@ bool dcn32_check_native_scaling_for_res(struct pipe_ctx *pipe, unsigned int widt } /** + * disallow_subvp_in_active_plus_blank() - Function to determine disallowed subvp + drr/vblank configs + * + * @pipe: subvp pipe to be used for the subvp + drr/vblank config + * + * Since subvp is being enabled on more configs (such as 1080p60), we want + * to explicitly block any configs that we don't want to enable. We do not + * want to enable any 1080p60 (SubVP) + drr / vblank configs since these + * are already convered by FPO. + * + * Return: True if disallowed, false otherwise + */ +static bool disallow_subvp_in_active_plus_blank(struct pipe_ctx *pipe) +{ + bool disallow = false; + + if (resource_is_pipe_type(pipe, OPP_HEAD) && + resource_is_pipe_type(pipe, DPP_PIPE)) { + if (pipe->stream->timing.v_addressable == 1080 && pipe->stream->timing.h_addressable == 1920) + disallow = true; + } + return disallow; +} + +/** * dcn32_subvp_drr_admissable() - Determine if SubVP + DRR config is admissible * * @dc: Current DC state @@ -688,21 +660,24 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context) bool drr_pipe_found = false; bool drr_psr_capable = false; uint64_t refresh_rate = 0; + bool subvp_disallow = false; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); if (resource_is_pipe_type(pipe, OPP_HEAD) && resource_is_pipe_type(pipe, DPP_PIPE)) { - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + if (pipe_mall_type == SUBVP_MAIN) { subvp_count++; + subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe); refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); } - if (pipe->stream->mall_stream_config.type == SUBVP_NONE) { + if (pipe_mall_type == SUBVP_NONE) { non_subvp_pipes++; drr_psr_capable = (drr_psr_capable || dcn32_is_psr_capable(pipe)); if (pipe->stream->ignore_msa_timing_param && @@ -713,7 +688,7 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context) } } - if (subvp_count == 1 && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable && + if (subvp_count == 1 && !subvp_disallow && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable && ((uint32_t)refresh_rate < 120)) result = true; @@ -746,21 +721,24 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int struct vba_vars_st *vba = &context->bw_ctx.dml.vba; bool vblank_psr_capable = false; uint64_t refresh_rate = 0; + bool subvp_disallow = false; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); if (resource_is_pipe_type(pipe, OPP_HEAD) && resource_is_pipe_type(pipe, DPP_PIPE)) { - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + if (pipe_mall_type == SUBVP_MAIN) { subvp_count++; + subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe); refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); } - if (pipe->stream->mall_stream_config.type == SUBVP_NONE) { + if (pipe_mall_type == SUBVP_NONE) { non_subvp_pipes++; vblank_psr_capable = (vblank_psr_capable || dcn32_is_psr_capable(pipe)); if (pipe->stream->ignore_msa_timing_param && @@ -772,7 +750,7 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int } if (subvp_count == 1 && non_subvp_pipes == 1 && !drr_pipe_found && !vblank_psr_capable && - ((uint32_t)refresh_rate < 120) && + ((uint32_t)refresh_rate < 120) && !subvp_disallow && vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp) result = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/Makefile b/drivers/gpu/drm/amd/display/dc/dcn321/Makefile index 0a199c83bb..c195c47f58 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn321/Makefile @@ -10,7 +10,7 @@ # # Makefile for dcn321. -DCN321 = dcn321_resource.o dcn321_dio_link_encoder.o +DCN321 = dcn321_dio_link_encoder.o AMD_DAL_DCN321 = $(addprefix $(AMDDALPATH)/dc/dcn321/,$(DCN321)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile index 20d0eef1a1..0e317e0c36 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile @@ -10,9 +10,9 @@ # # Makefile for DCN35. -DCN35 = dcn35_resource.o dcn35_init.o dcn35_dio_stream_encoder.o \ - dcn35_dio_link_encoder.o dcn35_dccg.o dcn35_optc.o \ - dcn35_dsc.o dcn35_hubp.o dcn35_hubbub.o \ +DCN35 = dcn35_dio_stream_encoder.o \ + dcn35_dio_link_encoder.o dcn35_dccg.o \ + dcn35_hubp.o dcn35_hubbub.o \ dcn35_mmhubbub.o dcn35_opp.o dcn35_dpp.o dcn35_pg_cntl.o dcn35_dwb.o AMD_DAL_DCN35 = $(addprefix $(AMDDALPATH)/dc/dcn35/,$(DCN35)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c index 479f3683c0..f1ba7bb792 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c @@ -256,6 +256,21 @@ static void dccg35_set_dtbclk_dto( if (params->ref_dtbclk_khz && req_dtbclk_khz) { uint32_t modulo, phase; + switch (params->otg_inst) { + case 0: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 1); + break; + case 1: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, 1); + break; + case 2: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, 1); + break; + case 3: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, 1); + break; + } + // phase / modulo = dtbclk / dtbclk ref modulo = params->ref_dtbclk_khz * 1000; phase = req_dtbclk_khz * 1000; @@ -280,6 +295,21 @@ static void dccg35_set_dtbclk_dto( REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], PIPE_DTO_SRC_SEL[params->otg_inst], 2); } else { + switch (params->otg_inst) { + case 0: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 0); + break; + case 1: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, 0); + break; + case 2: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, 0); + break; + case 3: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, 0); + break; + } + REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[params->otg_inst], DTBCLK_DTO_ENABLE[params->otg_inst], 0, PIPE_DTO_SRC_SEL[params->otg_inst], params->is_hdmi ? 0 : 1); @@ -476,6 +506,64 @@ static void dccg35_dpp_root_clock_control( dccg->dpp_clock_gated[dpp_inst] = !clock_on; } +static void dccg35_disable_symclk32_se( + struct dccg *dccg, + int hpo_se_inst) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + /* set refclk as the source for symclk32_se */ + switch (hpo_se_inst) { + case 0: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE0_SRC_SEL, 0, + SYMCLK32_SE0_EN, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) { + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE0_GATE_DISABLE, 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, +// SYMCLK32_ROOT_SE0_GATE_DISABLE, 0); + } + break; + case 1: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE1_SRC_SEL, 0, + SYMCLK32_SE1_EN, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) { + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE1_GATE_DISABLE, 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, +// SYMCLK32_ROOT_SE1_GATE_DISABLE, 0); + } + break; + case 2: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE2_SRC_SEL, 0, + SYMCLK32_SE2_EN, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) { + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE2_GATE_DISABLE, 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, +// SYMCLK32_ROOT_SE2_GATE_DISABLE, 0); + } + break; + case 3: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE3_SRC_SEL, 0, + SYMCLK32_SE3_EN, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) { + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE3_GATE_DISABLE, 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, +// SYMCLK32_ROOT_SE3_GATE_DISABLE, 0); + } + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + void dccg35_init(struct dccg *dccg) { int otg_inst; @@ -484,7 +572,7 @@ void dccg35_init(struct dccg *dccg) * will cause DCN to hang. */ for (otg_inst = 0; otg_inst < 4; otg_inst++) - dccg31_disable_symclk32_se(dccg, otg_inst); + dccg35_disable_symclk32_se(dccg, otg_inst); if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) for (otg_inst = 0; otg_inst < 2; otg_inst++) @@ -758,7 +846,7 @@ static const struct dccg_funcs dccg35_funcs = { .dccg_init = dccg35_init, .set_dpstreamclk = dccg35_set_dpstreamclk, .enable_symclk32_se = dccg31_enable_symclk32_se, - .disable_symclk32_se = dccg31_disable_symclk32_se, + .disable_symclk32_se = dccg35_disable_symclk32_se, .enable_symclk32_le = dccg31_enable_symclk32_le, .disable_symclk32_le = dccg31_disable_symclk32_le, .set_symclk32_le_root_clock_gating = dccg31_set_symclk32_le_root_clock_gating, diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h index 423feb4c2f..1586a45ca3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h @@ -34,6 +34,8 @@ #define DCCG_REG_LIST_DCN35() \ DCCG_REG_LIST_DCN314(),\ SR(DPPCLK_CTRL),\ + SR(DCCG_GATE_DISABLE_CNTL4),\ + SR(DCCG_GATE_DISABLE_CNTL5),\ SR(DCCG_GATE_DISABLE_CNTL6),\ SR(DCCG_GLOBAL_FGCG_REP_CNTL),\ SR(SYMCLKA_CLOCK_ENABLE),\ @@ -174,7 +176,61 @@ DCCG_SF(SYMCLKB_CLOCK_ENABLE, SYMCLKB_FE_SRC_SEL, mask_sh),\ DCCG_SF(SYMCLKC_CLOCK_ENABLE, SYMCLKC_FE_SRC_SEL, mask_sh),\ DCCG_SF(SYMCLKD_CLOCK_ENABLE, SYMCLKD_FE_SRC_SEL, mask_sh),\ - DCCG_SF(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_SRC_SEL, mask_sh) + DCCG_SF(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_SRC_SEL, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_FE_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_FE_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_FE_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_FE_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_FE_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL4, HDMICHARCLK0_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, HDMISTREAMCLK0_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE1_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE2_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE3_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE1_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE1_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE2_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE3_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_LE0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_LE1_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYA_REFCLK_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYB_REFCLK_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYC_REFCLK_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYD_REFCLK_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYE_REFCLK_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_PHASE, mask_sh),\ + DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_MODULO, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL, DISPCLK_DCCG_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, HDMISTREAMCLK0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\ struct dccg *dccg35_create( struct dc_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c index 81e349d583..da94e5309f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c @@ -184,6 +184,8 @@ void dcn35_link_encoder_construct( enc10->base.hpd_source = init_data->hpd_source; enc10->base.connector = init_data->connector; + if (enc10->base.connector.id == CONNECTOR_ID_USBC) + enc10->base.features.flags.bits.DP_IS_USB_C = 1; enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; @@ -238,8 +240,6 @@ void dcn35_link_encoder_construct( } enc10->base.features.flags.bits.HDMI_6GB_EN = 1; - if (enc10->base.connector.id == CONNECTOR_ID_USBC) - enc10->base.features.flags.bits.DP_IS_USB_C = 1; if (bp_funcs->get_connector_speed_cap_info) result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios, diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c index d19db8e9b8..53bd0ae4ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c @@ -342,13 +342,6 @@ void pg_cntl35_io_clk_pg_control(struct pg_cntl *pg_cntl, bool power_on) pg_cntl->pg_res_enable[PG_DCIO] = power_on; } -void pg_cntl35_set_force_poweron_domain22(struct pg_cntl *pg_cntl, bool power_on) -{ - struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl); - - REG_UPDATE(DOMAIN22_PG_CONFIG, DOMAIN_POWER_FORCEON, power_on ? 1 : 0); -} - static bool pg_cntl35_plane_otg_status(struct pg_cntl *pg_cntl) { struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl); @@ -518,8 +511,7 @@ static const struct pg_cntl_funcs pg_cntl35_funcs = { .mpcc_pg_control = pg_cntl35_mpcc_pg_control, .opp_pg_control = pg_cntl35_opp_pg_control, .optc_pg_control = pg_cntl35_optc_pg_control, - .dwb_pg_control = pg_cntl35_dwb_pg_control, - .set_force_poweron_domain22 = pg_cntl35_set_force_poweron_domain22 + .dwb_pg_control = pg_cntl35_dwb_pg_control }; struct pg_cntl *pg_cntl35_create( diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h index 069dae08e2..3de240884d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h @@ -183,7 +183,6 @@ void pg_cntl35_optc_pg_control(struct pg_cntl *pg_cntl, unsigned int optc_inst, bool power_on); void pg_cntl35_dwb_pg_control(struct pg_cntl *pg_cntl, bool power_on); void pg_cntl35_init_pg_status(struct pg_cntl *pg_cntl); -void pg_cntl35_set_force_poweron_domain22(struct pg_cntl *pg_cntl, bool power_on); struct pg_cntl *pg_cntl35_create( struct dc_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index 7ce9a5b6c3..6d7a15dcf8 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -103,10 +103,16 @@ enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( /* * Sends ALLOCATE_PAYLOAD message. */ -bool dm_helpers_dp_mst_send_payload_allocation( +void dm_helpers_dp_mst_send_payload_allocation( struct dc_context *ctx, - const struct dc_stream_state *stream, - bool enable); + const struct dc_stream_state *stream); + +/* + * Update mst manager relevant variables + */ +void dm_helpers_dp_mst_update_mst_mgr_for_deallocation( + struct dc_context *ctx, + const struct dc_stream_state *stream); bool dm_helpers_dp_mst_start_top_mgr( struct dc_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h index 4440d08743..bd7ba0a251 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h +++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h @@ -247,6 +247,7 @@ struct pp_smu_funcs_nv { #define PP_SMU_NUM_MEMCLK_DPM_LEVELS 4 #define PP_SMU_NUM_DCLK_DPM_LEVELS 8 #define PP_SMU_NUM_VCLK_DPM_LEVELS 8 +#define PP_SMU_NUM_VPECLK_DPM_LEVELS 8 struct dpm_clock { uint32_t Freq; // In MHz @@ -262,6 +263,7 @@ struct dpm_clocks { struct dpm_clock MemClocks[PP_SMU_NUM_MEMCLK_DPM_LEVELS]; struct dpm_clock VClocks[PP_SMU_NUM_VCLK_DPM_LEVELS]; struct dpm_clock DClocks[PP_SMU_NUM_DCLK_DPM_LEVELS]; + struct dpm_clock VPEClocks[PP_SMU_NUM_VPECLK_DPM_LEVELS]; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c index 50b0434354..0c4a8fe8e5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c @@ -30,7 +30,7 @@ #include "dcn_calc_auto.h" #include "dal_asic_id.h" #include "resource.h" -#include "dcn10/dcn10_resource.h" +#include "resource/dcn10/dcn10_resource.h" #include "dcn10/dcn10_hubbub.h" #include "dml/dml1_display_rq_dlg_calc.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index d2271e308f..38ab9ad60e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -33,6 +33,7 @@ #include "link.h" #include "dcn20_fpu.h" +#include "dc_state_priv.h" #define DC_LOGGER \ dc->ctx->logger @@ -1182,7 +1183,7 @@ void dcn20_calculate_dlg_params(struct dc *dc, pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); - if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) { // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0; context->res_ctx.pipe_ctx[i].unbounded_req = false; @@ -1532,7 +1533,7 @@ int dcn20_populate_dml_pipes_from_context(struct dc *dc, */ if (res_ctx->pipe_ctx[i].plane_state && (res_ctx->pipe_ctx[i].plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE || - res_ctx->pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM)) + dc_state_get_pipe_subvp_type(context, &res_ctx->pipe_ctx[i]) == SUBVP_PHANTOM)) pipes[pipe_cnt].pipe.src.num_cursors = 0; else pipes[pipe_cnt].pipe.src.num_cursors = dc->dml.ip.number_of_cursors; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index 3686f1e7de..63c48c29ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -3542,7 +3542,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l { struct vba_vars_st *v = &mode_lib->vba; int MinPrefetchMode, MaxPrefetchMode; - int i; + int i, start_state; unsigned int j, k, m; bool EnoughWritebackUnits = true; bool WritebackModeSupport = true; @@ -3553,6 +3553,11 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l /*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/ + if (mode_lib->validate_max_state) + start_state = v->soc.num_states - 1; + else + start_state = 0; + CalculateMinAndMaxPrefetchMode( mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank, &MinPrefetchMode, &MaxPrefetchMode); @@ -3851,7 +3856,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->SingleDPPViewportSizeSupportPerPlane, &v->ViewportSizeSupport[0][0]); - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { for (j = 0; j < 2; j++) { v->MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(v->MaxDispclk[i], v->DISPCLKDPPCLKVCOSpeed); v->MaxDppclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(v->MaxDppclk[i], v->DISPCLKDPPCLKVCOSpeed); @@ -4007,7 +4012,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l /*Total Available Pipes Support Check*/ - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { for (j = 0; j < 2; j++) { if (v->TotalNumberOfActiveDPP[i][j] <= v->MaxNumDPP) { v->TotalAvailablePipesSupport[i][j] = true; @@ -4046,7 +4051,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { v->RequiresDSC[i][k] = false; v->RequiresFEC[i][k] = false; @@ -4174,7 +4179,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } } - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { v->DIOSupport[i] = true; for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { if (!v->skip_dio_check[k] && v->BlendingAndTiming[k] == k && (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_hdmi) @@ -4185,7 +4190,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } - for (i = 0; i < v->soc.num_states; ++i) { + for (i = start_state; i < v->soc.num_states; ++i) { v->ODMCombine4To1SupportCheckOK[i] = true; for (k = 0; k < v->NumberOfActivePlanes; ++k) { if (v->BlendingAndTiming[k] == k && v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1 @@ -4197,7 +4202,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l /* Skip dscclk validation: as long as dispclk is supported, dscclk is also implicitly supported */ - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { v->NotEnoughDSCUnits[i] = false; v->TotalDSCUnitsRequired = 0.0; for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { @@ -4217,7 +4222,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } /*DSC Delay per state*/ - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { if (v->OutputBppPerState[i][k] == BPP_INVALID) { v->BPP = 0.0; @@ -4333,7 +4338,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->cursor_bw[k] = v->NumberOfCursors[k] * v->CursorWidth[k][0] * v->CursorBPP[k][0] / 8.0 / (v->HTotal[k] / v->PixelClock[k]) * v->VRatio[k]; } - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { for (j = 0; j < 2; j++) { for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { v->swath_width_luma_ub_this_state[k] = v->swath_width_luma_ub_all_states[i][j][k]; @@ -5075,7 +5080,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l /*PTE Buffer Size Check*/ - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { for (j = 0; j < 2; j++) { v->PTEBufferSizeNotExceeded[i][j] = true; for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { @@ -5136,7 +5141,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } /*Mode Support, Voltage State and SOC Configuration*/ - for (i = v->soc.num_states - 1; i >= 0; i--) { + for (i = v->soc.num_states - 1; i >= start_state; i--) { for (j = 0; j < 2; j++) { if (v->ScaleRatioAndTapsSupport == 1 && v->SourceFormatPixelAndScanSupport == 1 && v->ViewportSizeSupport[i][j] == 1 && v->DIOSupport[i] == 1 && v->ODMCombine4To1SupportCheckOK[i] == 1 @@ -5158,7 +5163,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } { unsigned int MaximumMPCCombine = 0; - for (i = v->soc.num_states; i >= 0; i--) { + for (i = v->soc.num_states; i >= start_state; i--) { if (i == v->soc.num_states || v->ModeSupport[i][0] == true || v->ModeSupport[i][1] == true) { v->VoltageLevel = i; v->ModeIsSupported = v->ModeSupport[i][0] == true || v->ModeSupport[i][1] == true; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index b315ca6f1c..ba76dd4a2c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -32,6 +32,7 @@ #include "clk_mgr/dcn32/dcn32_smu13_driver_if.h" #include "dcn30/dcn30_resource.h" #include "link.h" +#include "dc_state_priv.h" #define DC_LOGGER_INIT(logger) @@ -45,6 +46,14 @@ static const struct subvp_high_refresh_list subvp_high_refresh_list = { {.width = 1920, .height = 1080, }}, }; +static const struct subvp_active_margin_list subvp_active_margin_list = { + .min_refresh = 55, + .max_refresh = 65, + .res = { + {.width = 2560, .height = 1440, }, + {.width = 1920, .height = 1080, }}, +}; + struct _vcs_dpi_ip_params_st dcn3_2_ip = { .gpuvm_enable = 0, .gpuvm_max_page_table_levels = 4, @@ -333,7 +342,7 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc, if (!pipe->stream) continue; - if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (pipe->plane_state && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); pipes[pipe_idx].pipe.dest.vupdate_offset = @@ -616,7 +625,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc, if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) && (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) && - pipe->stream->mall_stream_config.type == SUBVP_NONE && + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE && (refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) && !pipe->plane_state->address.tmz_surface && (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 || @@ -674,7 +683,7 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context // Find the minimum pipe split count for non SubVP pipes if (resource_is_pipe_type(pipe, OPP_HEAD) && - pipe->stream->mall_stream_config.type == SUBVP_NONE) { + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE) { split_cnt = 0; while (pipe) { split_cnt++; @@ -727,8 +736,8 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context) * and also to store the two main SubVP pipe pointers in subvp_pipes[2]. */ if (pipe->stream && pipe->plane_state && !pipe->top_pipe && - pipe->stream->mall_stream_config.type == SUBVP_MAIN) { - phantom = pipe->stream->mall_stream_config.paired_stream; + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) { + phantom = dc_state_get_paired_subvp_stream(context, pipe->stream); microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) + phantom->timing.v_addressable; @@ -796,6 +805,9 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context) int16_t stretched_drr_us = 0; int16_t drr_stretched_vblank_us = 0; int16_t max_vblank_mallregion = 0; + struct dc_stream_state *phantom_stream; + bool subvp_found = false; + bool drr_found = false; // Find SubVP pipe for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -808,8 +820,10 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context) continue; // Find the SubVP pipe - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) { + subvp_found = true; break; + } } // Find the DRR pipe @@ -817,32 +831,37 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context) drr_pipe = &context->res_ctx.pipe_ctx[i]; // We check for master pipe only - if (!resource_is_pipe_type(pipe, OTG_MASTER) || - !resource_is_pipe_type(pipe, DPP_PIPE)) + if (!resource_is_pipe_type(drr_pipe, OTG_MASTER) || + !resource_is_pipe_type(drr_pipe, DPP_PIPE)) continue; - if (drr_pipe->stream->mall_stream_config.type == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param && - (drr_pipe->stream->allow_freesync || drr_pipe->stream->vrr_active_variable || drr_pipe->stream->vrr_active_fixed)) + if (dc_state_get_pipe_subvp_type(context, drr_pipe) == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param && + (drr_pipe->stream->allow_freesync || drr_pipe->stream->vrr_active_variable || drr_pipe->stream->vrr_active_fixed)) { + drr_found = true; break; + } } - main_timing = &pipe->stream->timing; - phantom_timing = &pipe->stream->mall_stream_config.paired_stream->timing; - drr_timing = &drr_pipe->stream->timing; - prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total / - (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 + - dc->caps.subvp_prefetch_end_to_mall_start_us; - subvp_active_us = main_timing->v_addressable * main_timing->h_total / - (double)(main_timing->pix_clk_100hz * 100) * 1000000; - drr_frame_us = drr_timing->v_total * drr_timing->h_total / - (double)(drr_timing->pix_clk_100hz * 100) * 1000000; - // P-State allow width and FW delays already included phantom_timing->v_addressable - mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total / - (double)(phantom_timing->pix_clk_100hz * 100) * 1000000; - stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US; - drr_stretched_vblank_us = (drr_timing->v_total - drr_timing->v_addressable) * drr_timing->h_total / - (double)(drr_timing->pix_clk_100hz * 100) * 1000000 + (stretched_drr_us - drr_frame_us); - max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us; + if (subvp_found && drr_found) { + phantom_stream = dc_state_get_paired_subvp_stream(context, pipe->stream); + main_timing = &pipe->stream->timing; + phantom_timing = &phantom_stream->timing; + drr_timing = &drr_pipe->stream->timing; + prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total / + (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 + + dc->caps.subvp_prefetch_end_to_mall_start_us; + subvp_active_us = main_timing->v_addressable * main_timing->h_total / + (double)(main_timing->pix_clk_100hz * 100) * 1000000; + drr_frame_us = drr_timing->v_total * drr_timing->h_total / + (double)(drr_timing->pix_clk_100hz * 100) * 1000000; + // P-State allow width and FW delays already included phantom_timing->v_addressable + mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total / + (double)(phantom_timing->pix_clk_100hz * 100) * 1000000; + stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US; + drr_stretched_vblank_us = (drr_timing->v_total - drr_timing->v_addressable) * drr_timing->h_total / + (double)(drr_timing->pix_clk_100hz * 100) * 1000000 + (stretched_drr_us - drr_frame_us); + max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us; + } /* We consider SubVP + DRR schedulable if the stretched frame duration of the DRR display (i.e. the * highest refresh rate + margin that can support UCLK P-State switch) passes the static analysis @@ -887,6 +906,8 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context) struct dc_crtc_timing *main_timing = NULL; struct dc_crtc_timing *phantom_timing = NULL; struct dc_crtc_timing *vblank_timing = NULL; + struct dc_stream_state *phantom_stream; + enum mall_stream_type pipe_mall_type; /* For SubVP + VBLANK/DRR cases, we assume there can only be * a single VBLANK/DRR display. If DML outputs SubVP + VBLANK @@ -896,6 +917,7 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context) */ for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; + pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); // We check for master pipe, but it shouldn't matter since we only need // the pipe for timing info (stream should be same for any pipe splits) @@ -903,18 +925,19 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context) !resource_is_pipe_type(pipe, DPP_PIPE)) continue; - if (!found && pipe->stream->mall_stream_config.type == SUBVP_NONE) { + if (!found && pipe_mall_type == SUBVP_NONE) { // Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe). vblank_index = i; found = true; } - if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN) + if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN) subvp_pipe = pipe; } if (found) { + phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream); main_timing = &subvp_pipe->stream->timing; - phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; + phantom_timing = &phantom_stream->timing; vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing; // Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe // Also include the prefetch end to mallstart delay time @@ -969,7 +992,7 @@ static bool subvp_subvp_admissable(struct dc *dc, continue; if (pipe->plane_state && !pipe->top_pipe && - pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) { refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); @@ -1018,23 +1041,23 @@ static bool subvp_validate_static_schedulability(struct dc *dc, for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); if (!pipe->stream) continue; if (pipe->plane_state && !pipe->top_pipe) { - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) + if (pipe_mall_type == SUBVP_MAIN) subvp_count++; - if (pipe->stream->mall_stream_config.type == SUBVP_NONE) { + if (pipe_mall_type == SUBVP_NONE) non_subvp_pipes++; - } } // Count how many planes that aren't SubVP/phantom are capable of VACTIVE // switching (SubVP + VACTIVE unsupported). In situations where we force // SubVP for a VACTIVE plane, we don't want to increment the vactive_count. if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vlevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 && - pipe->stream->mall_stream_config.type == SUBVP_NONE) { + pipe_mall_type == SUBVP_NONE) { vactive_count++; } pipe_idx++; @@ -1070,7 +1093,7 @@ static void assign_subvp_index(struct dc *dc, struct dc_state *context) struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && - pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) { + dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_MAIN) { pipe_ctx->subvp_index = index++; } else { pipe_ctx->subvp_index = 0; @@ -1414,6 +1437,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, unsigned int dc_pipe_idx = 0; int i = 0; bool found_supported_config = false; + int vlevel_temp = 0; dc_assert_fp_enabled(); @@ -1446,13 +1470,15 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, */ if (!dc->debug.force_disable_subvp && !dc->caps.dmub_caps.gecc_enable && dcn32_all_pipes_have_stream_and_plane(dc, context) && !dcn32_mpo_in_use(context) && !dcn32_any_surfaces_rotated(dc, context) && !is_test_pattern_enabled(context) && - (*vlevel == context->bw_ctx.dml.soc.num_states || + (*vlevel == context->bw_ctx.dml.soc.num_states || (vba->DRAMSpeedPerState[*vlevel] != vba->DRAMSpeedPerState[0] && + vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported) || vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported || dc->debug.force_subvp_mclk_switch)) { dcn32_merge_pipes_for_subvp(dc, context); memset(merge, 0, MAX_PIPES * sizeof(bool)); + vlevel_temp = *vlevel; /* to re-initialize viewport after the pipe merge */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; @@ -1521,10 +1547,14 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, } } + if (vba->DRAMSpeedPerState[*vlevel] >= vba->DRAMSpeedPerState[vlevel_temp]) + found_supported_config = false; + // If SubVP pipe config is unsupported (or cannot be used for UCLK switching) // remove phantom pipes and repopulate dml pipes if (!found_supported_config) { - dc->res_pool->funcs->remove_phantom_pipes(dc, context, false); + dc_state_remove_phantom_streams_and_planes(dc, context); + dc_state_release_phantom_streams_and_planes(dc, context); vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported; *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false); @@ -1676,7 +1706,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); - if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) { // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0; context->res_ctx.pipe_ctx[i].unbounded_req = false; @@ -1708,7 +1738,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, context->res_ctx.pipe_ctx[i].plane_state != context->res_ctx.pipe_ctx[i].top_pipe->plane_state) && context->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) { /* SS: all active surfaces stored in MALL */ - if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type != SUBVP_PHANTOM) { + if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) != SUBVP_PHANTOM) { context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes; if (context->res_ctx.pipe_ctx[i].stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) { @@ -1922,7 +1952,8 @@ bool dcn32_internal_validate_bw(struct dc *dc, return false; // For each full update, remove all existing phantom pipes first - dc->res_pool->funcs->remove_phantom_pipes(dc, context, fast_validate); + dc_state_remove_phantom_streams_and_planes(dc, context); + dc_state_release_phantom_streams_and_planes(dc, context); dc->res_pool->funcs->update_soc_for_wm_a(dc, context); @@ -2729,7 +2760,7 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk struct _vcs_dpi_voltage_scaling_st entry = {0}; struct clk_limit_table_entry max_clk_data = {0}; - unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299; + unsigned int min_dcfclk_mhz = 399, min_fclk_mhz = 599; static const unsigned int num_dcfclk_stas = 5; unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {199, 615, 906, 1324, 1564}; @@ -3305,25 +3336,24 @@ bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe) { bool allow = false; uint32_t refresh_rate = 0; + uint32_t min_refresh = subvp_active_margin_list.min_refresh; + uint32_t max_refresh = subvp_active_margin_list.max_refresh; + uint32_t i; - /* Allow subvp on displays that have active margin for 2560x1440@60hz displays - * only for now. There must be no scaling as well. - * - * For now we only enable on 2560x1440@60hz displays to enable 4K60 + 1440p60 configs - * for p-state switching. - */ - if (pipe->stream && pipe->plane_state) { - refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 + - pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1) - / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total); - if (pipe->stream->timing.v_addressable == 1440 && - pipe->stream->timing.h_addressable == 2560 && - refresh_rate >= 55 && refresh_rate <= 65 && - pipe->plane_state->src_rect.height == 1440 && - pipe->plane_state->src_rect.width == 2560 && - pipe->plane_state->dst_rect.height == 1440 && - pipe->plane_state->dst_rect.width == 2560) + for (i = 0; i < SUBVP_ACTIVE_MARGIN_LIST_LEN; i++) { + uint32_t width = subvp_active_margin_list.res[i].width; + uint32_t height = subvp_active_margin_list.res[i].height; + + refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); + + if (refresh_rate >= min_refresh && refresh_rate <= max_refresh && + dcn32_check_native_scaling_for_res(pipe, width, height)) { allow = true; + break; + } } return allow; } @@ -3442,7 +3472,15 @@ void dcn32_assign_fpo_vactive_candidate(struct dc *dc, const struct dc_state *co for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - if (!pipe->stream) + /* In DCN32/321, FPO uses per-pipe P-State force. + * If there's no planes, HUBP is power gated and + * therefore programming UCLK_PSTATE_FORCE does + * nothing (P-State will always be asserted naturally + * on a pipe that has HUBP power gated. Therefore we + * only want to enable FPO if the FPO pipe has both + * a stream and a plane. + */ + if (!pipe->stream || !pipe->plane_state) continue; if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index f154a3eb1d..7ea2bd5374 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -164,11 +164,11 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = { }, }, .num_states = 5, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, - .sr_exit_z8_time_us = 525.0, - .sr_enter_plus_exit_z8_time_us = 715.0, - .fclk_change_latency_us = 20.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, + .sr_exit_z8_time_us = 210.0, + .sr_enter_plus_exit_z8_time_us = 320.0, + .fclk_change_latency_us = 24.0, .usr_retraining_latency_us = 2, .writeback_latency_us = 12.0, @@ -326,6 +326,25 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc, dcn3_5_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000.0; } + + if (dc->bb_overrides.dram_clock_change_latency_ns > 0) + dcn3_5_soc.dram_clock_change_latency_us = + dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; + + if (dc->bb_overrides.sr_exit_time_ns > 0) + dcn3_5_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; + + if (dc->bb_overrides.sr_enter_plus_exit_time_ns > 0) + dcn3_5_soc.sr_enter_plus_exit_time_us = + dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; + + if (dc->bb_overrides.sr_exit_z8_time_ns > 0) + dcn3_5_soc.sr_exit_z8_time_us = dc->bb_overrides.sr_exit_z8_time_ns / 1000.0; + + if (dc->bb_overrides.sr_enter_plus_exit_z8_time_ns > 0) + dcn3_5_soc.sr_enter_plus_exit_z8_time_us = + dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0; + /*temp till dml2 fully work without dml1*/ dml_init_instance(&dc->dml, &dcn3_5_soc, &dcn3_5_ip, DML_PROJECT_DCN31); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c index 1a2b24cc6b..0baf39d64a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c @@ -772,18 +772,29 @@ static unsigned int get_mpc_factor(struct dml2_context *ctx, const struct dc_state *state, const struct dml_display_cfg_st *disp_cfg, struct dml2_dml_to_dc_pipe_mapping *mapping, - const struct dc_stream_status *status, unsigned int stream_id, + const struct dc_stream_status *status, + const struct dc_stream_state *stream, int plane_idx) { unsigned int plane_id; unsigned int cfg_idx; + unsigned int mpc_factor; - get_plane_id(ctx, state, status->plane_states[plane_idx], stream_id, plane_idx, &plane_id); + get_plane_id(ctx, state, status->plane_states[plane_idx], + stream->stream_id, plane_idx, &plane_id); cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id); - if (ctx->architecture == dml2_architecture_20) - return (unsigned int)disp_cfg->hw.DPPPerSurface[cfg_idx]; - ASSERT(false); - return 1; + if (ctx->architecture == dml2_architecture_20) { + mpc_factor = (unsigned int)disp_cfg->hw.DPPPerSurface[cfg_idx]; + } else { + mpc_factor = 1; + ASSERT(false); + } + + /* For stereo timings, we need to pipe split */ + if (dml2_is_stereo_timing(stream)) + mpc_factor = 2; + + return mpc_factor; } static unsigned int get_odm_factor( @@ -820,14 +831,13 @@ static void populate_mpc_factors_for_stream( unsigned int mpc_factors[MAX_PIPES]) { const struct dc_stream_status *status = &state->stream_status[stream_idx]; - unsigned int stream_id = state->streams[stream_idx]->stream_id; int i; for (i = 0; i < status->plane_count; i++) if (odm_factor == 1) mpc_factors[i] = get_mpc_factor( ctx, state, disp_cfg, mapping, status, - stream_id, i); + state->streams[stream_idx], i); else mpc_factors[i] = 1; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h index e85866db80..7ca7f2a743 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h @@ -38,5 +38,6 @@ #include "core_types.h" #include "dsc.h" #include "clk_mgr.h" +#include "dc_state_priv.h" #endif //__DML2_DC_TYPES_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c index 32f8a43af3..282d70e2b1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c @@ -51,7 +51,7 @@ unsigned int dml2_helper_calculate_num_ways_for_subvp(struct dml2_context *ctx, // Find the phantom pipes if (pipe->stream && pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe && - pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4; mblk_width = ctx->config.mall_cfg.mblk_width_pixels; mblk_height = bytes_per_pixel == 4 ? mblk_width = ctx->config.mall_cfg.mblk_height_4bpe_pixels : ctx->config.mall_cfg.mblk_height_8bpe_pixels; @@ -253,7 +253,7 @@ static bool assign_subvp_pipe(struct dml2_context *ctx, struct dc_state *context * to combine this with SubVP can cause issues with the scheduling). */ if (pipe->plane_state && !pipe->top_pipe && - pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && + ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_NONE && refresh_rate < 120 && vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) { while (pipe) { num_pipes++; @@ -317,7 +317,7 @@ static bool enough_pipes_for_subvp(struct dml2_context *ctx, struct dc_state *st // Find the minimum pipe split count for non SubVP pipes if (pipe->stream && !pipe->top_pipe && - pipe->stream->mall_stream_config.type == SUBVP_NONE) { + ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(state, pipe) == SUBVP_NONE) { split_cnt = 0; while (pipe) { split_cnt++; @@ -372,8 +372,8 @@ static bool subvp_subvp_schedulable(struct dml2_context *ctx, struct dc_state *c * and also to store the two main SubVP pipe pointers in subvp_pipes[2]. */ if (pipe->stream && pipe->plane_state && !pipe->top_pipe && - pipe->stream->mall_stream_config.type == SUBVP_MAIN) { - phantom = pipe->stream->mall_stream_config.paired_stream; + ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) { + phantom = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, pipe->stream); microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) + phantom->timing.v_addressable; @@ -435,6 +435,7 @@ bool dml2_svp_drr_schedulable(struct dml2_context *ctx, struct dc_state *context struct pipe_ctx *pipe = NULL; struct dc_crtc_timing *main_timing = NULL; struct dc_crtc_timing *phantom_timing = NULL; + struct dc_stream_state *phantom_stream; int16_t prefetch_us = 0; int16_t mall_region_us = 0; int16_t drr_frame_us = 0; // nominal frame time @@ -453,12 +454,13 @@ bool dml2_svp_drr_schedulable(struct dml2_context *ctx, struct dc_state *context continue; // Find the SubVP pipe - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) + if (ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) break; } + phantom_stream = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, pipe->stream); main_timing = &pipe->stream->timing; - phantom_timing = &pipe->stream->mall_stream_config.paired_stream->timing; + phantom_timing = &phantom_stream->timing; prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total / (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 + ctx->config.svp_pstate.subvp_prefetch_end_to_mall_start_us; @@ -519,6 +521,8 @@ static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state * struct dc_crtc_timing *main_timing = NULL; struct dc_crtc_timing *phantom_timing = NULL; struct dc_crtc_timing *vblank_timing = NULL; + struct dc_stream_state *phantom_stream; + enum mall_stream_type pipe_mall_type; /* For SubVP + VBLANK/DRR cases, we assume there can only be * a single VBLANK/DRR display. If DML outputs SubVP + VBLANK @@ -528,19 +532,20 @@ static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state * */ for (i = 0; i < ctx->config.dcn_pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; + pipe_mall_type = ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe); // We check for master pipe, but it shouldn't matter since we only need // the pipe for timing info (stream should be same for any pipe splits) if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe) continue; - if (!found && pipe->stream->mall_stream_config.type == SUBVP_NONE) { + if (!found && pipe_mall_type == SUBVP_NONE) { // Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe). vblank_index = i; found = true; } - if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN) + if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN) subvp_pipe = pipe; } // Use ignore_msa_timing_param flag to identify as DRR @@ -548,8 +553,9 @@ static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state * // SUBVP + DRR case schedulable = dml2_svp_drr_schedulable(ctx, context, &context->res_ctx.pipe_ctx[vblank_index].stream->timing); } else if (found) { + phantom_stream = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, subvp_pipe->stream); main_timing = &subvp_pipe->stream->timing; - phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; + phantom_timing = &phantom_stream->timing; vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing; // Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe // Also include the prefetch end to mallstart delay time @@ -602,19 +608,20 @@ bool dml2_svp_validate_static_schedulability(struct dml2_context *ctx, struct dc for (i = 0, pipe_idx = 0; i < ctx->config.dcn_pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + enum mall_stream_type pipe_mall_type = ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe); if (!pipe->stream) continue; if (pipe->plane_state && !pipe->top_pipe && - pipe->stream->mall_stream_config.type == SUBVP_MAIN) + pipe_mall_type == SUBVP_MAIN) subvp_count++; // Count how many planes that aren't SubVP/phantom are capable of VACTIVE // switching (SubVP + VACTIVE unsupported). In situations where we force // SubVP for a VACTIVE plane, we don't want to increment the vactive_count. if (vba->ActiveDRAMClockChangeLatencyMargin[vba->pipe_plane[pipe_idx]] > 0 && - pipe->stream->mall_stream_config.type == SUBVP_NONE) { + pipe_mall_type == SUBVP_NONE) { vactive_count++; } pipe_idx++; @@ -708,14 +715,10 @@ static void set_phantom_stream_timing(struct dml2_context *ctx, struct dc_state static struct dc_stream_state *enable_phantom_stream(struct dml2_context *ctx, struct dc_state *state, unsigned int dc_pipe_idx, unsigned int svp_height, unsigned int vstartup) { struct pipe_ctx *ref_pipe = &state->res_ctx.pipe_ctx[dc_pipe_idx]; - struct dc_stream_state *phantom_stream = ctx->config.svp_pstate.callbacks.create_stream_for_sink(ref_pipe->stream->sink); - - phantom_stream->signal = SIGNAL_TYPE_VIRTUAL; - phantom_stream->dpms_off = true; - phantom_stream->mall_stream_config.type = SUBVP_PHANTOM; - phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream; - ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN; - ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream; + struct dc_stream_state *phantom_stream = ctx->config.svp_pstate.callbacks.create_phantom_stream( + ctx->config.svp_pstate.callbacks.dc, + state, + ref_pipe->stream); /* stream has limited viewport and small timing */ memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing)); @@ -723,7 +726,10 @@ static struct dc_stream_state *enable_phantom_stream(struct dml2_context *ctx, s memcpy(&phantom_stream->dst, &ref_pipe->stream->dst, sizeof(phantom_stream->dst)); set_phantom_stream_timing(ctx, state, ref_pipe, phantom_stream, dc_pipe_idx, svp_height, vstartup); - ctx->config.svp_pstate.callbacks.add_stream_to_ctx(ctx->config.svp_pstate.callbacks.dc, state, phantom_stream); + ctx->config.svp_pstate.callbacks.add_phantom_stream(ctx->config.svp_pstate.callbacks.dc, + state, + phantom_stream, + ref_pipe->stream); return phantom_stream; } @@ -740,7 +746,10 @@ static void enable_phantom_plane(struct dml2_context *ctx, if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state) { phantom_plane = prev_phantom_plane; } else { - phantom_plane = ctx->config.svp_pstate.callbacks.create_plane(ctx->config.svp_pstate.callbacks.dc); + phantom_plane = ctx->config.svp_pstate.callbacks.create_phantom_plane( + ctx->config.svp_pstate.callbacks.dc, + state, + curr_pipe->plane_state); } memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address)); @@ -763,9 +772,7 @@ static void enable_phantom_plane(struct dml2_context *ctx, phantom_plane->clip_rect.y = 0; phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable; - phantom_plane->is_phantom = true; - - ctx->config.svp_pstate.callbacks.add_plane_to_context(ctx->config.svp_pstate.callbacks.dc, phantom_stream, phantom_plane, state); + ctx->config.svp_pstate.callbacks.add_phantom_plane(ctx->config.svp_pstate.callbacks.dc, phantom_stream, phantom_plane, state); curr_pipe = curr_pipe->bottom_pipe; prev_phantom_plane = phantom_plane; @@ -790,7 +797,7 @@ static void add_phantom_pipes_for_main_pipe(struct dml2_context *ctx, struct dc_ // We determine which phantom pipes were added by comparing with // the phantom stream. if (pipe->plane_state && pipe->stream && pipe->stream == phantom_stream && - pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(state, pipe) == SUBVP_PHANTOM) { pipe->stream->use_dynamic_meta = false; pipe->plane_state->flip_immediate = false; if (!ctx->config.svp_pstate.callbacks.build_scaling_params(pipe)) { @@ -800,7 +807,7 @@ static void add_phantom_pipes_for_main_pipe(struct dml2_context *ctx, struct dc_ } } -static bool remove_all_planes_for_stream(struct dml2_context *ctx, struct dc_stream_state *stream, struct dc_state *context) +static bool remove_all_phantom_planes_for_stream(struct dml2_context *ctx, struct dc_stream_state *stream, struct dc_state *context) { int i, old_plane_count; struct dc_stream_status *stream_status = NULL; @@ -821,9 +828,11 @@ static bool remove_all_planes_for_stream(struct dml2_context *ctx, struct dc_str for (i = 0; i < old_plane_count; i++) del_planes[i] = stream_status->plane_states[i]; - for (i = 0; i < old_plane_count; i++) - if (!ctx->config.svp_pstate.callbacks.remove_plane_from_context(ctx->config.svp_pstate.callbacks.dc, stream, del_planes[i], context)) + for (i = 0; i < old_plane_count; i++) { + if (!ctx->config.svp_pstate.callbacks.remove_phantom_plane(ctx->config.svp_pstate.callbacks.dc, stream, del_planes[i], context)) return false; + ctx->config.svp_pstate.callbacks.release_phantom_plane(ctx->config.svp_pstate.callbacks.dc, context, del_planes[i]); + } return true; } @@ -832,35 +841,21 @@ bool dml2_svp_remove_all_phantom_pipes(struct dml2_context *ctx, struct dc_state { int i; bool removed_pipe = false; - struct dc_plane_state *phantom_plane = NULL; struct dc_stream_state *phantom_stream = NULL; for (i = 0; i < ctx->config.dcn_pipe_count; i++) { struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i]; // build scaling params for phantom pipes - if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - phantom_plane = pipe->plane_state; + if (pipe->plane_state && pipe->stream && ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(state, pipe) == SUBVP_PHANTOM) { phantom_stream = pipe->stream; - remove_all_planes_for_stream(ctx, pipe->stream, state); - ctx->config.svp_pstate.callbacks.remove_stream_from_ctx(ctx->config.svp_pstate.callbacks.dc, state, pipe->stream); - - /* Ref count is incremented on allocation and also when added to the context. - * Therefore we must call release for the the phantom plane and stream once - * they are removed from the ctx to finally decrement the refcount to 0 to free. - */ - ctx->config.svp_pstate.callbacks.plane_state_release(phantom_plane); - ctx->config.svp_pstate.callbacks.stream_release(phantom_stream); + remove_all_phantom_planes_for_stream(ctx, phantom_stream, state); + ctx->config.svp_pstate.callbacks.remove_phantom_stream(ctx->config.svp_pstate.callbacks.dc, state, phantom_stream); + ctx->config.svp_pstate.callbacks.release_phantom_stream(ctx->config.svp_pstate.callbacks.dc, state, phantom_stream); removed_pipe = true; } - // Clear all phantom stream info - if (pipe->stream) { - pipe->stream->mall_stream_config.type = SUBVP_NONE; - pipe->stream->mall_stream_config.paired_stream = NULL; - } - if (pipe->plane_state) { pipe->plane_state->is_phantom = false; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index b6744ad778..a20f28a5d2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -1055,8 +1055,10 @@ static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2, void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg) { - int i = 0, j = 0; + int i = 0, j = 0, k = 0; int disp_cfg_stream_location, disp_cfg_plane_location; + enum mall_stream_type stream_mall_type; + struct pipe_ctx *current_pipe_context; for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) { dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id_valid[i] = false; @@ -1076,7 +1078,17 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat dml2_populate_pipe_to_plane_index_mapping(dml2, context); for (i = 0; i < context->stream_count; i++) { + current_pipe_context = NULL; + for (k = 0; k < MAX_PIPES; k++) { + /* find one pipe allocated to this stream for the purpose of getting + info about the link later */ + if (context->streams[i] == context->res_ctx.pipe_ctx[k].stream) { + current_pipe_context = &context->res_ctx.pipe_ctx[k]; + break; + } + } disp_cfg_stream_location = map_stream_to_dml_display_cfg(dml2, context->streams[i], dml_dispcfg); + stream_mall_type = dc_state_get_stream_subvp_type(context, context->streams[i]); if (disp_cfg_stream_location < 0) disp_cfg_stream_location = dml_dispcfg->num_timings++; @@ -1084,7 +1096,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__); populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_stream_location, context->streams[i]); - populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], &context->res_ctx.pipe_ctx[i]); + populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context); switch (context->streams[i]->debug.force_odm_combine_segments) { case 2: dml2->v20.dml_core_ctx.policy.ODMUse[disp_cfg_stream_location] = dml_odm_use_policy_combine_2to1; @@ -1121,10 +1133,10 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat populate_dml_surface_cfg_from_plane_state(dml2->v20.dml_core_ctx.project, &dml_dispcfg->surface, disp_cfg_plane_location, context->stream_status[i].plane_states[j]); populate_dml_plane_cfg_from_plane_state(&dml_dispcfg->plane, disp_cfg_plane_location, context->stream_status[i].plane_states[j], context); - if (context->streams[i]->mall_stream_config.type == SUBVP_MAIN) { + if (stream_mall_type == SUBVP_MAIN) { dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_sub_viewport; dml_dispcfg->plane.UseMALLForStaticScreen[disp_cfg_plane_location] = dml_use_mall_static_screen_optimize; - } else if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM) { + } else if (stream_mall_type == SUBVP_PHANTOM) { dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_phantom_pipe; dml_dispcfg->plane.UseMALLForStaticScreen[disp_cfg_plane_location] = dml_use_mall_static_screen_disable; dml2->v20.dml_core_ctx.policy.ImmediateFlipRequirement[disp_cfg_plane_location] = dml_immediate_flip_not_required; @@ -1141,7 +1153,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat if (j >= 1) { populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_plane_location, context->streams[i]); - populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_plane_location, context->streams[i], &context->res_ctx.pipe_ctx[i]); + populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_plane_location, context->streams[i], current_pipe_context); switch (context->streams[i]->debug.force_odm_combine_segments) { case 2: dml2->v20.dml_core_ctx.policy.ODMUse[disp_cfg_plane_location] = dml_odm_use_policy_combine_2to1; @@ -1153,9 +1165,9 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat break; } - if (context->streams[i]->mall_stream_config.type == SUBVP_MAIN) + if (stream_mall_type == SUBVP_MAIN) dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_sub_viewport; - else if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM) + else if (stream_mall_type == SUBVP_PHANTOM) dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_phantom_pipe; dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[disp_cfg_plane_location] = context->streams[i]->stream_id; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c index d6a6848415..1068b962d1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c @@ -155,8 +155,12 @@ unsigned int dml2_util_get_maximum_odm_combine_for_output(bool force_odm_4to1, e bool is_dp2p0_output_encoder(const struct pipe_ctx *pipe_ctx) { + if (pipe_ctx == NULL || pipe_ctx->stream == NULL) + return false; + /* If this assert is hit then we have a link encoder dynamic management issue */ ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true); + /* Count MST hubs once by treating only 1st remote sink in topology as an encoder */ if (pipe_ctx->stream->link && pipe_ctx->stream->link->remote_sinks[0]) { return (pipe_ctx->stream_res.hpo_dp_stream_enc && @@ -283,6 +287,7 @@ static void populate_pipe_ctx_dlg_params_from_dml(struct pipe_ctx *pipe_ctx, str void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct resource_context *out_new_hw_state, struct dml2_context *in_ctx, unsigned int pipe_cnt) { unsigned int dc_pipe_ctx_index, dml_pipe_idx, plane_id; + enum mall_stream_type pipe_mall_type; bool unbounded_req_enabled = false; struct dml2_calculate_rq_and_dlg_params_scratch *s = &in_ctx->v20.scratch.calculate_rq_and_dlg_params_scratch; @@ -330,7 +335,8 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont */ populate_pipe_ctx_dlg_params_from_dml(&context->res_ctx.pipe_ctx[dc_pipe_ctx_index], &context->bw_ctx.dml2->v20.dml_core_ctx, dml_pipe_idx); - if (context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->mall_stream_config.type == SUBVP_PHANTOM) { + pipe_mall_type = dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[dc_pipe_ctx_index]); + if (pipe_mall_type == SUBVP_PHANTOM) { // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests context->res_ctx.pipe_ctx[dc_pipe_ctx_index].det_buffer_size_kb = 0; context->res_ctx.pipe_ctx[dc_pipe_ctx_index].unbounded_req = false; @@ -357,7 +363,7 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont context->res_ctx.pipe_ctx[dc_pipe_ctx_index].plane_state != context->res_ctx.pipe_ctx[dc_pipe_ctx_index].top_pipe->plane_state) && context->res_ctx.pipe_ctx[dc_pipe_ctx_index].prev_odm_pipe == NULL) { /* SS: all active surfaces stored in MALL */ - if (context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->mall_stream_config.type != SUBVP_PHANTOM) { + if (pipe_mall_type != SUBVP_PHANTOM) { context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[dc_pipe_ctx_index].surface_size_in_mall_bytes; } else { /* SUBVP: phantom surfaces only stored in MALL */ @@ -476,7 +482,7 @@ bool dml2_verify_det_buffer_configuration(struct dml2_context *in_ctx, struct dc return need_recalculation; } -bool dml2_is_stereo_timing(struct dc_stream_state *stream) +bool dml2_is_stereo_timing(const struct dc_stream_state *stream) { bool is_stereo = false; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h index 23b9028337..5842d6d3c4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h @@ -42,7 +42,7 @@ void dml2_copy_clocks_to_dc_state(struct dml2_dcn_clocks *out_clks, struct dc_st void dml2_extract_watermark_set(struct dcn_watermarks *watermark, struct display_mode_lib_st *dml_core_ctx); int dml2_helper_find_dml_pipe_idx_by_stream_id(struct dml2_context *ctx, unsigned int stream_id); bool is_dtbclk_required(const struct dc *dc, struct dc_state *context); -bool dml2_is_stereo_timing(struct dc_stream_state *stream); +bool dml2_is_stereo_timing(const struct dc_stream_state *stream); /* * dml2_dc_construct_pipes - This function will determine if we need additional pipes based diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 269bfb14c2..72cca36706 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -423,7 +423,7 @@ static int find_drr_eligible_stream(struct dc_state *display_state) int i; for (i = 0; i < display_state->stream_count; i++) { - if (display_state->streams[i]->mall_stream_config.type == SUBVP_NONE + if (dc_state_get_stream_subvp_type(display_state, display_state->streams[i]) == SUBVP_NONE && display_state->streams[i]->ignore_msa_timing_param) { // Use ignore_msa_timing_param flag to identify as DRR return i; @@ -639,6 +639,8 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.b, &dml2->v20.dml_core_ctx); memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c)); dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.d, &dml2->v20.dml_core_ctx); + //copy for deciding zstate use + context->bw_ctx.dml.vba.StutterPeriod = context->bw_ctx.dml2->v20.dml_core_ctx.mp.StutterPeriod; } return result; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h index 548504d7de..cc662d682f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h @@ -93,15 +93,34 @@ struct dml2_dc_callbacks { struct dml2_dc_svp_callbacks { struct dc *dc; bool (*build_scaling_params)(struct pipe_ctx *pipe_ctx); - struct dc_stream_state* (*create_stream_for_sink)(struct dc_sink *dc_sink_data); - struct dc_plane_state* (*create_plane)(struct dc *dc); - enum dc_status (*add_stream_to_ctx)(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); - bool (*add_plane_to_context)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context); - bool (*remove_plane_from_context)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context); - enum dc_status (*remove_stream_from_ctx)(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *stream); - void (*plane_state_release)(struct dc_plane_state *plane_state); - void (*stream_release)(struct dc_stream_state *stream); + struct dc_stream_state* (*create_phantom_stream)(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *main_stream); + struct dc_plane_state* (*create_phantom_plane)(struct dc *dc, + struct dc_state *state, + struct dc_plane_state *main_plane); + enum dc_status (*add_phantom_stream)(struct dc *dc, + struct dc_state *state, + struct dc_stream_state *phantom_stream, + struct dc_stream_state *main_stream); + bool (*add_phantom_plane)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context); + bool (*remove_phantom_plane)(const struct dc *dc, + struct dc_stream_state *stream, + struct dc_plane_state *plane_state, + struct dc_state *context); + enum dc_status (*remove_phantom_stream)(struct dc *dc, + struct dc_state *state, + struct dc_stream_state *stream); + void (*release_phantom_plane)(const struct dc *dc, + struct dc_state *state, + struct dc_plane_state *plane); + void (*release_phantom_stream)(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *stream); void (*release_dsc)(struct resource_context *res_ctx, const struct resource_pool *pool, struct display_stream_compressor **dsc); + enum mall_stream_type (*get_pipe_subvp_type)(const struct dc_state *state, const struct pipe_ctx *pipe_ctx); + enum mall_stream_type (*get_stream_subvp_type)(const struct dc_state *state, const struct dc_stream_state *stream); + struct dc_stream_state *(*get_paired_subvp_stream)(const struct dc_state *state, const struct dc_stream_state *stream); }; struct dml2_clks_table_entry { diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile index a2537229ee..b183ba5a69 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile @@ -1,8 +1,34 @@ # SPDX-License-Identifier: MIT # # Makefile for the 'dsc' sub-component of DAL. + +ifdef CONFIG_DRM_AMD_DC_FP + +############################################################################### +# DCN20 +############################################################################### +DSC_DCN20 = dcn20_dsc.o + +AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/dsc/dcn20/,$(DSC_DCN20)) + + + + +############################################################################### +# DCN35 +############################################################################### + +DSC_DCN35 = dcn35_dsc.o + +AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/dsc/dcn35/,$(DSC_DCN35)) + + + +endif + DSC = dc_dsc.o rc_calc.o rc_calc_dpi.o AMD_DAL_DSC = $(addprefix $(AMDDALPATH)/dc/dsc/,$(DSC)) AMD_DISPLAY_FILES += $(AMD_DAL_DSC) + diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index e8b5f17beb..0df6c55eb3 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -331,8 +331,9 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, int buff_block_size; int buff_size; - if (!dsc_buff_block_size_from_dpcd(dpcd_dsc_basic_data[DP_DSC_RC_BUF_BLK_SIZE - DP_DSC_SUPPORT], - &buff_block_size)) + if (!dsc_buff_block_size_from_dpcd( + dpcd_dsc_basic_data[DP_DSC_RC_BUF_BLK_SIZE - DP_DSC_SUPPORT] & 0x03, + &buff_block_size)) return false; buff_size = dpcd_dsc_basic_data[DP_DSC_RC_BUF_SIZE - DP_DSC_SUPPORT] + 1; @@ -357,10 +358,15 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, { int dpcd_throughput = dpcd_dsc_basic_data[DP_DSC_PEAK_THROUGHPUT - DP_DSC_SUPPORT]; + int dsc_throughput_granular_delta; + + dsc_throughput_granular_delta = dpcd_dsc_basic_data[DP_DSC_RC_BUF_BLK_SIZE - DP_DSC_SUPPORT] >> 3; + dsc_throughput_granular_delta *= 2; if (!dsc_throughput_from_dpcd(dpcd_throughput & DP_DSC_THROUGHPUT_MODE_0_MASK, &dsc_sink_caps->throughput_mode_0_mps)) return false; + dsc_sink_caps->throughput_mode_0_mps += dsc_throughput_granular_delta; dpcd_throughput = (dpcd_throughput & DP_DSC_THROUGHPUT_MODE_1_MASK) >> DP_DSC_THROUGHPUT_MODE_1_SHIFT; if (!dsc_throughput_from_dpcd(dpcd_throughput, &dsc_sink_caps->throughput_mode_1_mps)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c index c9ae2d8f00..c9ae2d8f00 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h index ba869387c3..ba869387c3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c index 71d2dff998..71d2dff998 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.h index 133ad38842..133ad38842 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.h diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h index 4b27f29d0d..4b27f29d0d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h diff --git a/drivers/gpu/drm/amd/display/dc/hwss/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/Makefile index bccd46bd18..254136f8e3 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/Makefile +++ b/drivers/gpu/drm/amd/display/dc/hwss/Makefile @@ -78,7 +78,7 @@ ifdef CONFIG_DRM_AMD_DC_FP # DCN ############################################################################### -HWSS_DCN10 = dcn10_hwseq.o +HWSS_DCN10 = dcn10_hwseq.o dcn10_init.o AMD_DAL_HWSS_DCN10 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn10/,$(HWSS_DCN10)) @@ -86,7 +86,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN10) ############################################################################### -HWSS_DCN20 = dcn20_hwseq.o +HWSS_DCN20 = dcn20_hwseq.o dcn20_init.o AMD_DAL_HWSS_DCN20 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn20/,$(HWSS_DCN20)) @@ -94,7 +94,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN20) ############################################################################### -HWSS_DCN201 = dcn201_hwseq.o +HWSS_DCN201 = dcn201_hwseq.o dcn201_init.o AMD_DAL_HWSS_DCN201 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn201/,$(HWSS_DCN201)) @@ -102,7 +102,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN201) ############################################################################### -HWSS_DCN21 = dcn21_hwseq.o +HWSS_DCN21 = dcn21_hwseq.o dcn21_init.o AMD_DAL_HWSS_DCN21 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn21/,$(HWSS_DCN21)) @@ -114,7 +114,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN21) ############################################################################### -HWSS_DCN30 = dcn30_hwseq.o +HWSS_DCN30 = dcn30_hwseq.o dcn30_init.o AMD_DAL_HWSS_DCN30 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn30/,$(HWSS_DCN30)) @@ -122,7 +122,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN30) ############################################################################### -HWSS_DCN301 = dcn301_hwseq.o +HWSS_DCN301 = dcn301_hwseq.o dcn301_init.o AMD_DAL_HWSS_DCN301 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn301/,$(HWSS_DCN301)) @@ -130,15 +130,17 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN301) ############################################################################### -HWSS_DCN302 = dcn302_hwseq.o +HWSS_DCN302 = dcn302_hwseq.o dcn302_init.o AMD_DAL_HWSS_DCN302 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn302/,$(HWSS_DCN302)) AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN302) + + ############################################################################### -HWSS_DCN303 = dcn303_hwseq.o +HWSS_DCN303 = dcn303_hwseq.o dcn303_init.o AMD_DAL_HWSS_DCN303 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn303/,$(HWSS_DCN303)) @@ -146,7 +148,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN303) ############################################################################### -HWSS_DCN31 = dcn31_hwseq.o +HWSS_DCN31 = dcn31_hwseq.o dcn31_init.o AMD_DAL_HWSS_DCN31 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn31/,$(HWSS_DCN31)) @@ -154,7 +156,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN31) ############################################################################### -HWSS_DCN314 = dcn314_hwseq.o +HWSS_DCN314 = dcn314_hwseq.o dcn314_init.o AMD_DAL_HWSS_DCN314 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn314/,$(HWSS_DCN314)) @@ -162,7 +164,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN314) ############################################################################### -HWSS_DCN32 = dcn32_hwseq.o +HWSS_DCN32 = dcn32_hwseq.o dcn32_init.o AMD_DAL_HWSS_DCN32 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn32/,$(HWSS_DCN32)) @@ -170,7 +172,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN32) ############################################################################### -HWSS_DCN35 = dcn35_hwseq.o +HWSS_DCN35 = dcn35_hwseq.o dcn35_init.o AMD_DAL_HWSS_DCN35 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn35/,$(HWSS_DCN35)) @@ -180,4 +182,4 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN35) ############################################################################### -endif
\ No newline at end of file +endif diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h index 44b4df6469..52f045cfd5 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h @@ -682,6 +682,7 @@ struct dce_hwseq_registers { uint32_t DCHUBBUB_ARB_HOSTVM_CNTL; uint32_t HPO_TOP_HW_CONTROL; uint32_t DMU_CLK_CNTL; + uint32_t DCCG_GATE_DISABLE_CNTL4; uint32_t DCCG_GATE_DISABLE_CNTL5; }; /* set field name */ @@ -1199,7 +1200,19 @@ struct dce_hwseq_registers { type PHYBSYMCLK_ROOT_GATE_DISABLE;\ type PHYCSYMCLK_ROOT_GATE_DISABLE;\ type PHYDSYMCLK_ROOT_GATE_DISABLE;\ - type PHYESYMCLK_ROOT_GATE_DISABLE; + type PHYESYMCLK_ROOT_GATE_DISABLE;\ + type DTBCLK_P0_GATE_DISABLE;\ + type DTBCLK_P1_GATE_DISABLE;\ + type DTBCLK_P2_GATE_DISABLE;\ + type DTBCLK_P3_GATE_DISABLE;\ + type DPSTREAMCLK0_GATE_DISABLE;\ + type DPSTREAMCLK1_GATE_DISABLE;\ + type DPSTREAMCLK2_GATE_DISABLE;\ + type DPSTREAMCLK3_GATE_DISABLE;\ + type DPIASYMCLK0_GATE_DISABLE;\ + type DPIASYMCLK1_GATE_DISABLE;\ + type DPIASYMCLK2_GATE_DISABLE;\ + type DPIASYMCLK3_GATE_DISABLE; struct dce_hwseq_shift { HWSEQ_REG_FIELD_LIST(uint8_t) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 3642c069bd..12af6bb9fe 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -55,6 +55,7 @@ #include "audio.h" #include "reg_helper.h" #include "panel_cntl.h" +#include "dc_state_priv.h" #include "dpcd_defs.h" /* include DCE11 register header files */ #include "dce/dce_11_0_d.h" @@ -1476,7 +1477,7 @@ static enum dc_status dce110_enable_stream_timing( return DC_OK; } -static enum dc_status apply_single_controller_ctx_to_hw( +enum dc_status dce110_apply_single_controller_ctx_to_hw( struct pipe_ctx *pipe_ctx, struct dc_state *context, struct dc *dc) @@ -1597,7 +1598,7 @@ static enum dc_status apply_single_controller_ctx_to_hw( * is constructed with the same sink). Make sure not to override * and link programming on the main. */ - if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) { + if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) { pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false; pipe_ctx->stream->link->replay_settings.replay_feature_enabled = false; } @@ -1685,7 +1686,7 @@ static void disable_vga_and_power_gate_all_controllers( true); dc->current_state->res_ctx.pipe_ctx[i].pipe_idx = i; - dc->hwss.disable_plane(dc, + dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]); } } @@ -2135,7 +2136,7 @@ static void dce110_reset_hw_ctx_wrap( old_clk)) old_clk->funcs->cs_power_down(old_clk); - dc->hwss.disable_plane(dc, pipe_ctx_old); + dc->hwss.disable_plane(dc, dc->current_state, pipe_ctx_old); pipe_ctx_old->stream = NULL; } @@ -2302,7 +2303,7 @@ enum dc_status dce110_apply_ctx_to_hw( if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe) continue; - status = apply_single_controller_ctx_to_hw( + status = dce110_apply_single_controller_ctx_to_hw( pipe_ctx, context, dc); @@ -2499,6 +2500,7 @@ static bool wait_for_reset_trigger_to_occur( /* Enable timing synchronization for a group of Timing Generators. */ static void dce110_enable_timing_synchronization( struct dc *dc, + struct dc_state *state, int group_index, int group_size, struct pipe_ctx *grouped_pipes[]) @@ -2592,6 +2594,7 @@ static void init_hw(struct dc *dc) struct dmcu *dmcu; struct dce_hwseq *hws = dc->hwseq; uint32_t backlight = MAX_BACKLIGHT_LEVEL; + uint32_t user_level = MAX_BACKLIGHT_LEVEL; bp = dc->ctx->dc_bios; for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -2641,13 +2644,15 @@ static void init_hw(struct dc *dc) for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; - if (link->panel_cntl) + if (link->panel_cntl) { backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); + user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL; + } } abm = dc->res_pool->abm; if (abm != NULL) - abm->funcs->abm_init(abm, backlight); + abm->funcs->abm_init(abm, backlight, user_level); dmcu = dc->res_pool->dmcu; if (dmcu != NULL && abm != NULL) @@ -2844,7 +2849,7 @@ static void dce110_post_unlock_program_front_end( { } -static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx) +static void dce110_power_down_fe(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx) { struct dce_hwseq *hws = dc->hwseq; int fe_idx = pipe_ctx->plane_res.mi ? @@ -3117,7 +3122,8 @@ void dce110_disable_link_output(struct dc_link *link, struct dmcu *dmcu = dc->res_pool->dmcu; if (signal == SIGNAL_TYPE_EDP && - link->dc->hwss.edp_backlight_control) + link->dc->hwss.edp_backlight_control && + !link->skip_implict_edp_power_control) link->dc->hwss.edp_backlight_control(link, false); else if (dmcu != NULL && dmcu->funcs->lock_phy) dmcu->funcs->lock_phy(dmcu); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h index 08028a1779..ed3cc3648e 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h @@ -39,6 +39,10 @@ enum dc_status dce110_apply_ctx_to_hw( struct dc *dc, struct dc_state *context); +enum dc_status dce110_apply_single_controller_ctx_to_hw( + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct dc *dc); void dce110_enable_stream(struct pipe_ctx *pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index 2b3ef5cdbd..c45f84aa32 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -56,6 +56,7 @@ #include "dc_trace.h" #include "dce/dmub_outbox.h" #include "link.h" +#include "dc_state_priv.h" #define DC_LOGGER \ dc_logger @@ -115,7 +116,7 @@ void dcn10_lock_all_pipes(struct dc *dc, !pipe_ctx->stream || (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) || !tg->funcs->is_tg_enabled(tg) || - pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM) + dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) continue; if (lock) @@ -1181,7 +1182,9 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc) } /* trigger HW to start disconnect plane from stream on the next vsync */ -void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_plane_atomic_disconnect(struct dc *dc, + struct dc_state *state, + struct pipe_ctx *pipe_ctx) { struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; @@ -1201,7 +1204,7 @@ void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove); // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle, // so don't wait for MPCC_IDLE in the programming sequence - if (opp != NULL && !pipe_ctx->plane_state->is_phantom) + if (opp != NULL && dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM) opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; dc->optimized_required = true; @@ -1291,7 +1294,7 @@ void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) pipe_ctx->plane_state = NULL; } -void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx) { struct dce_hwseq *hws = dc->hwseq; DC_LOGGER_INIT(dc->ctx->logger); @@ -1417,12 +1420,12 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; - hws->funcs.plane_atomic_disconnect(dc, pipe_ctx); + hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx); if (tg->funcs->is_tg_enabled(tg)) tg->funcs->unlock(tg); - dc->hwss.disable_plane(dc, pipe_ctx); + dc->hwss.disable_plane(dc, context, pipe_ctx); pipe_ctx->stream_res.tg = NULL; pipe_ctx->plane_res.hubp = NULL; @@ -1487,6 +1490,7 @@ void dcn10_init_hw(struct dc *dc) struct dc_bios *dcb = dc->ctx->dc_bios; struct resource_pool *res_pool = dc->res_pool; uint32_t backlight = MAX_BACKLIGHT_LEVEL; + uint32_t user_level = MAX_BACKLIGHT_LEVEL; bool is_optimized_init_done = false; if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) @@ -1584,12 +1588,14 @@ void dcn10_init_hw(struct dc *dc) for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; - if (link->panel_cntl) + if (link->panel_cntl) { backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); + user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL; + } } if (abm != NULL) - abm->funcs->abm_init(abm, backlight); + abm->funcs->abm_init(abm, backlight, user_level); if (dmcu != NULL && !dmcu->auto_load_dmcu) dmcu->funcs->dmcu_init(dmcu); @@ -2266,6 +2272,7 @@ void dcn10_enable_vblanks_synchronization( void dcn10_enable_timing_synchronization( struct dc *dc, + struct dc_state *state, int group_index, int group_size, struct pipe_ctx *grouped_pipes[]) @@ -2280,7 +2287,7 @@ void dcn10_enable_timing_synchronization( DC_SYNC_INFO("Setting up OTG reset trigger\n"); for (i = 1; i < group_size; i++) { - if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) + if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM) continue; opp = grouped_pipes[i]->stream_res.opp; @@ -2300,14 +2307,14 @@ void dcn10_enable_timing_synchronization( if (grouped_pipes[i]->stream == NULL) continue; - if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) + if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM) continue; grouped_pipes[i]->stream->vblank_synchronized = false; } for (i = 1; i < group_size; i++) { - if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) + if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM) continue; grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger( @@ -2321,11 +2328,11 @@ void dcn10_enable_timing_synchronization( * synchronized. Look at last pipe programmed to reset. */ - if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM) + if (grouped_pipes[1]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[1]) != SUBVP_PHANTOM) wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg); for (i = 1; i < group_size; i++) { - if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) + if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM) continue; grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger( @@ -2333,7 +2340,7 @@ void dcn10_enable_timing_synchronization( } for (i = 1; i < group_size; i++) { - if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) + if (dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM) continue; opp = grouped_pipes[i]->stream_res.opp; @@ -3025,7 +3032,7 @@ void dcn10_post_unlock_program_front_end( for (i = 0; i < dc->res_pool->pipe_count; i++) if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) - dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); + dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]); for (i = 0; i < dc->res_pool->pipe_count; i++) if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) { @@ -3072,7 +3079,7 @@ void dcn10_prepare_bandwidth( context, false); - dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub, + dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub, &context->bw_ctx.bw.dcn.watermarks, dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, true); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h index ef6d56da41..bc5dd68a24 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h @@ -75,7 +75,7 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx); void dcn10_reset_hw_ctx_wrap( struct dc *dc, struct dc_state *context); -void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx); void dcn10_lock_all_pipes( struct dc *dc, struct dc_state *context, @@ -108,13 +108,16 @@ void dcn10_power_down_on_boot(struct dc *dc); enum dc_status dce110_apply_ctx_to_hw( struct dc *dc, struct dc_state *context); -void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_plane_atomic_disconnect(struct dc *dc, + struct dc_state *state, + struct pipe_ctx *pipe_ctx); void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data); void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx); void dce110_power_down(struct dc *dc); void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context); void dcn10_enable_timing_synchronization( struct dc *dc, + struct dc_state *state, int group_index, int group_size, struct pipe_ctx *grouped_pipes[]); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c index a5bdac79a7..a5bdac79a7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.h index 8c6fd7b844..8c6fd7b844 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.h diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index e3f547e061..868a086c72 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -55,6 +55,7 @@ #include "inc/link_enc_cfg.h" #include "link_hwss.h" #include "link.h" +#include "dc_state_priv.h" #define DC_LOGGER \ dc_logger @@ -623,9 +624,9 @@ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) } -void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn20_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx) { - bool is_phantom = pipe_ctx->plane_state && pipe_ctx->plane_state->is_phantom; + bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM; struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL; DC_LOGGER_INIT(dc->ctx->logger); @@ -847,7 +848,7 @@ enum dc_status dcn20_enable_stream_timing( /* TODO enable stream if timing changed */ /* TODO unblank stream if DP */ - if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) { if (pipe_ctx->stream_res.tg && pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable) pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg); } @@ -1368,8 +1369,14 @@ void dcn20_pipe_control_lock( } } -static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe) +static void dcn20_detect_pipe_changes(struct dc_state *old_state, + struct dc_state *new_state, + struct pipe_ctx *old_pipe, + struct pipe_ctx *new_pipe) { + bool old_is_phantom = dc_state_get_pipe_subvp_type(old_state, old_pipe) == SUBVP_PHANTOM; + bool new_is_phantom = dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM; + new_pipe->update_flags.raw = 0; /* If non-phantom pipe is being transitioned to a phantom pipe, @@ -1379,8 +1386,8 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx * be different). The post_unlock sequence will set the correct * update flags to enable the phantom pipe. */ - if (old_pipe->plane_state && !old_pipe->plane_state->is_phantom && - new_pipe->plane_state && new_pipe->plane_state->is_phantom) { + if (old_pipe->plane_state && !old_is_phantom && + new_pipe->plane_state && new_is_phantom) { new_pipe->update_flags.bits.disable = 1; return; } @@ -1405,6 +1412,10 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx new_pipe->update_flags.bits.scaler = 1; new_pipe->update_flags.bits.viewport = 1; new_pipe->update_flags.bits.det_size = 1; + if (new_pipe->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE && + new_pipe->stream_res.test_pattern_params.width != 0 && + new_pipe->stream_res.test_pattern_params.height != 0) + new_pipe->update_flags.bits.test_pattern_changed = 1; if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) { new_pipe->update_flags.bits.odm = 1; new_pipe->update_flags.bits.global_sync = 1; @@ -1417,14 +1428,14 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx * The remove-add sequence of the phantom pipe always results in the pipe * being blanked in enable_stream_timing (DPG). */ - if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) + if (new_pipe->stream && dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM) new_pipe->update_flags.bits.enable = 1; /* Phantom pipes are effectively disabled, if the pipe was previously phantom * we have to enable */ - if (old_pipe->plane_state && old_pipe->plane_state->is_phantom && - new_pipe->plane_state && !new_pipe->plane_state->is_phantom) + if (old_pipe->plane_state && old_is_phantom && + new_pipe->plane_state && !new_is_phantom) new_pipe->update_flags.bits.enable = 1; if (old_pipe->plane_state && !new_pipe->plane_state) { @@ -1557,6 +1568,7 @@ static void dcn20_update_dchubp_dpp( struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct dccg *dccg = dc->res_pool->dccg; bool viewport_changed = false; + enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe_ctx); if (pipe_ctx->update_flags.bits.dppclk) dpp->funcs->dpp_dppclk_control(dpp, false, true); @@ -1702,7 +1714,7 @@ static void dcn20_update_dchubp_dpp( pipe_ctx->update_flags.bits.plane_changed || plane_state->update_flags.bits.addr_update) { if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && - pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) { + pipe_mall_type == SUBVP_MAIN) { union block_sequence_params params; params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv; @@ -1716,7 +1728,7 @@ static void dcn20_update_dchubp_dpp( if (pipe_ctx->update_flags.bits.enable) hubp->funcs->set_blank(hubp, false); /* If the stream paired with this plane is phantom, the plane is also phantom */ - if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM + if (pipe_ctx->stream && pipe_mall_type == SUBVP_PHANTOM && hubp->funcs->phantom_hubp_post_enable) hubp->funcs->phantom_hubp_post_enable(hubp); } @@ -1774,7 +1786,7 @@ static void dcn20_program_pipe( pipe_ctx->pipe_dlg_param.vupdate_offset, pipe_ctx->pipe_dlg_param.vupdate_width); - if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) + if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); pipe_ctx->stream_res.tg->funcs->set_vtg_params( @@ -1914,7 +1926,7 @@ void dcn20_program_front_end_for_ctx( /* Set pipe update flags and lock pipes */ for (i = 0; i < dc->res_pool->pipe_count; i++) - dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i], + dcn20_detect_pipe_changes(dc->current_state, context, &dc->current_state->res_ctx.pipe_ctx[i], &context->res_ctx.pipe_ctx[i]); /* When disabling phantom pipes, turn on phantom OTG first (so we can get double @@ -1924,15 +1936,16 @@ void dcn20_program_front_end_for_ctx( struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream; if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream && - dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) { + dc_state_get_pipe_subvp_type(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) { struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg; if (tg->funcs->enable_crtc) { if (dc->hwss.blank_phantom) { int main_pipe_width, main_pipe_height; + struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(dc->current_state, dc->current_state->res_ctx.pipe_ctx[i].stream); - main_pipe_width = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.width; - main_pipe_height = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.height; + main_pipe_width = phantom_stream->dst.width; + main_pipe_height = phantom_stream->dst.height; dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height); } tg->funcs->enable_crtc(tg); @@ -1961,9 +1974,9 @@ void dcn20_program_front_end_for_ctx( * DET allocation. */ if (hubbub->funcs->program_det_size && (context->res_ctx.pipe_ctx[i].update_flags.bits.disable || - (context->res_ctx.pipe_ctx[i].plane_state && context->res_ctx.pipe_ctx[i].plane_state->is_phantom))) + (context->res_ctx.pipe_ctx[i].plane_state && dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM))) hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0); - hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]); + hws->funcs.plane_atomic_disconnect(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]); DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); } @@ -1996,7 +2009,7 @@ void dcn20_program_front_end_for_ctx( * but the MPO still exists until the double buffered update of the main pipe so we * will get a frame of underflow if the phantom pipe is programmed here. */ - if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) + if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) dcn20_program_pipe(dc, pipe, context); } @@ -2035,7 +2048,7 @@ void dcn20_post_unlock_program_front_end( for (i = 0; i < dc->res_pool->pipe_count; i++) if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) - dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); + dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]); /* * If we are enabling a pipe, we need to wait for pending clear as this is a critical @@ -2047,7 +2060,7 @@ void dcn20_post_unlock_program_front_end( struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; // Don't check flip pending on phantom pipes if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable && - pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { + dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) { struct hubp *hubp = pipe->plane_res.hubp; int j = 0; for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us @@ -2070,7 +2083,7 @@ void dcn20_post_unlock_program_front_end( * programming sequence). */ while (pipe) { - if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { /* When turning on the phantom pipe we want to run through the * entire enable sequence, so apply all the "enable" flags. */ @@ -2140,17 +2153,17 @@ void dcn20_prepare_bandwidth( struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; // At optimize don't restore the original watermark value - if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) { + if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE) { context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U; break; } } /* program dchubbub watermarks: - * For assigning wm_optimized_required, use |= operator since we don't want + * For assigning optimized_required, use |= operator since we don't want * to clear the value if the optimize has not happened yet */ - dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub, + dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub, &context->bw_ctx.bw.dcn.watermarks, dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, false); @@ -2163,10 +2176,10 @@ void dcn20_prepare_bandwidth( if (hubbub->funcs->program_compbuf_size) { if (context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes) { compbuf_size_kb = context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes; - dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes); + dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes); } else { compbuf_size_kb = context->bw_ctx.bw.dcn.compbuf_size_kb; - dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb); + dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb); } hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, false); @@ -2184,7 +2197,7 @@ void dcn20_optimize_bandwidth( struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; // At optimize don't need to restore the original watermark value - if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) { + if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE) { context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U; break; } @@ -2218,7 +2231,8 @@ void dcn20_optimize_bandwidth( dc->clk_mgr, context, true); - if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) { + if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW && + !dc->debug.disable_extblankadj) { for (i = 0; i < dc->res_pool->pipe_count; ++i) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; @@ -2331,7 +2345,7 @@ bool dcn20_wait_for_blank_complete( int counter; for (counter = 0; counter < 1000; counter++) { - if (opp->funcs->dpg_is_blanked(opp)) + if (!opp->funcs->dpg_is_pending(opp)) break; udelay(100); @@ -2342,7 +2356,7 @@ bool dcn20_wait_for_blank_complete( return false; } - return true; + return opp->funcs->dpg_is_blanked(opp); } bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx) @@ -2548,7 +2562,7 @@ void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx) tg->funcs->setup_vertical_interrupt2(tg, start_line); } -static void dcn20_reset_back_end_for_pipe( +void dcn20_reset_back_end_for_pipe( struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context) @@ -2944,7 +2958,7 @@ void dcn20_fpga_init_hw(struct dc *dc) dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; /*to do*/ - hws->funcs.plane_atomic_disconnect(dc, pipe_ctx); + hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx); } /* initialize DWB pointer to MCIF_WB */ @@ -2961,7 +2975,7 @@ void dcn20_fpga_init_hw(struct dc *dc) for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - dc->hwss.disable_plane(dc, pipe_ctx); + dc->hwss.disable_plane(dc, context, pipe_ctx); pipe_ctx->stream_res.tg = NULL; pipe_ctx->plane_res.hubp = NULL; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h index ab02e4e9c8..d950b3e54e 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h @@ -52,7 +52,7 @@ void dcn20_program_output_csc(struct dc *dc, void dcn20_enable_stream(struct pipe_ctx *pipe_ctx); void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings); -void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx); void dcn20_disable_pixel_data( struct dc *dc, struct pipe_ctx *pipe_ctx, @@ -84,6 +84,10 @@ enum dc_status dcn20_enable_stream_timing( void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx); void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx); void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_reset_back_end_for_pipe( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context); void dcn20_init_blank( struct dc *dc, struct timing_generator *tg); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c index 884e3e3233..884e3e3233 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.h index 12277797cd..12277797cd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.h diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c index d3fe6092f5..d5769f3887 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c @@ -320,7 +320,7 @@ void dcn201_init_hw(struct dc *dc) res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; pipe_ctx->stream_res.opp = res_pool->opps[i]; /*To do: number of MPCC != number of opp*/ - hws->funcs.plane_atomic_disconnect(dc, pipe_ctx); + hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx); } /* initialize DWB pointer to MCIF_WB */ @@ -337,7 +337,7 @@ void dcn201_init_hw(struct dc *dc) for (i = 0; i < res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - dc->hwss.disable_plane(dc, pipe_ctx); + dc->hwss.disable_plane(dc, context, pipe_ctx); pipe_ctx->stream_res.tg = NULL; pipe_ctx->plane_res.hubp = NULL; @@ -369,7 +369,9 @@ void dcn201_init_hw(struct dc *dc) } /* trigger HW to start disconnect plane from stream on the next vsync */ -void dcn201_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn201_plane_atomic_disconnect(struct dc *dc, + struct dc_state *state, + struct pipe_ctx *pipe_ctx) { struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h index 26cd62be64..6a50a9894b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h @@ -33,7 +33,7 @@ void dcn201_init_hw(struct dc *dc); void dcn201_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings); void dcn201_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx); -void dcn201_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn201_plane_atomic_disconnect(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx); void dcn201_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx); void dcn201_set_cursor_attribute(struct pipe_ctx *pipe_ctx); void dcn201_pipe_control_lock( diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c index a13bf6c938..a13bf6c938 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.h index 1168887b03..1168887b03 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.h diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c index 5c7f380a84..7252f5f781 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c @@ -211,7 +211,7 @@ void dcn21_set_pipe(struct pipe_ctx *pipe_ctx) struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu; uint32_t otg_inst; - if (!abm && !tg && !panel_cntl) + if (!abm || !tg || !panel_cntl) return; otg_inst = tg->inst; @@ -245,7 +245,7 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx, struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl; uint32_t otg_inst; - if (!abm && !tg && !panel_cntl) + if (!abm || !tg || !panel_cntl) return false; otg_inst = tg->inst; diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c index 18249c6b6d..18249c6b6d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.h index 3ed2429264..3ed2429264 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.h diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index c89149d153..55cf4c9e6a 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -51,7 +51,7 @@ #include "dcn20/dcn20_hwseq.h" #include "dcn30/dcn30_resource.h" #include "link.h" - +#include "dc_state_priv.h" @@ -367,6 +367,10 @@ void dcn30_enable_writeback( DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\ __func__, wb_info->dwb_pipe_inst,\ wb_info->mpcc_inst); + + /* Warmup interface */ + dcn30_mmhubbub_warmup(dc, 1, wb_info); + /* Update writeback pipe */ dcn30_set_writeback(dc, wb_info, context); @@ -472,6 +476,7 @@ void dcn30_init_hw(struct dc *dc) int i; int edp_num; uint32_t backlight = MAX_BACKLIGHT_LEVEL; + uint32_t user_level = MAX_BACKLIGHT_LEVEL; if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); @@ -608,13 +613,15 @@ void dcn30_init_hw(struct dc *dc) for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; - if (link->panel_cntl) + if (link->panel_cntl) { backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); + user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL; + } } for (i = 0; i < dc->res_pool->pipe_count; i++) { if (abms[i] != NULL) - abms[i]->funcs->abm_init(abms[i], backlight); + abms[i]->funcs->abm_init(abms[i], backlight, user_level); } /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ @@ -972,7 +979,7 @@ void dcn30_hardware_release(struct dc *dc) if (!pipe->stream) continue; - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_MAIN) { subvp_in_use = true; break; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c index 9894caedff..9894caedff 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.h index c280ff90bf..c280ff90bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c index 6477009ce0..6477009ce0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h index 0bca48ccbf..0bca48ccbf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.c index 637f9514d3..637f9514d3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.h index 899587b93a..899587b93a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.c index edb4d68b81..edb4d68b81 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.h index 4949981126..4949981126 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.h diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index 3a70a3cbc2..7423880fab 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -96,7 +96,8 @@ static void enable_memory_low_power(struct dc *dc) if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) { // Power down VPGs for (i = 0; i < dc->res_pool->stream_enc_count; i++) - dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg); + if (dc->res_pool->stream_enc[i]->vpg) + dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg); #if defined(CONFIG_DRM_AMD_DC_FP) for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg); @@ -112,6 +113,7 @@ void dcn31_init_hw(struct dc *dc) struct dc_bios *dcb = dc->ctx->dc_bios; struct resource_pool *res_pool = dc->res_pool; uint32_t backlight = MAX_BACKLIGHT_LEVEL; + uint32_t user_level = MAX_BACKLIGHT_LEVEL; int i; if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) @@ -223,13 +225,15 @@ void dcn31_init_hw(struct dc *dc) for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; - if (link->panel_cntl) + if (link->panel_cntl) { backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); + user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL; + } } for (i = 0; i < dc->res_pool->pipe_count; i++) { if (abms[i] != NULL) - abms[i]->funcs->abm_init(abms[i], backlight); + abms[i]->funcs->abm_init(abms[i], backlight, user_level); } /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c index 669f524bd0..669f524bd0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.h index a3db08c8bd..a3db08c8bd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c index ccb7e317e8..ccb7e317e8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.h index 8f92e66577..8f92e66577 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.h diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index 580afb0088..7668229438 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -51,6 +51,7 @@ #include "dcn32/dcn32_resource.h" #include "link.h" #include "../dcn20/dcn20_hwseq.h" +#include "dc_state_priv.h" #define DC_LOGGER_INIT(logger) @@ -348,8 +349,7 @@ void dcn32_commit_subvp_config(struct dc *dc, struct dc_state *context) for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.paired_stream && - pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) { + if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_MAIN) { // There is at least 1 SubVP pipe, so enable SubVP enable_subvp = true; break; @@ -375,18 +375,20 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc, bool subvp_immediate_flip = false; bool subvp_in_use = false; struct pipe_ctx *pipe; + enum mall_stream_type pipe_mall_type = SUBVP_NONE; for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; + pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); - if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + if (pipe->stream && pipe->plane_state && pipe_mall_type == SUBVP_MAIN) { subvp_in_use = true; break; } } if (top_pipe_to_program && top_pipe_to_program->stream && top_pipe_to_program->plane_state) { - if (top_pipe_to_program->stream->mall_stream_config.type == SUBVP_MAIN && + if (dc_state_get_pipe_subvp_type(context, top_pipe_to_program) == SUBVP_MAIN && top_pipe_to_program->plane_state->flip_immediate) subvp_immediate_flip = true; } @@ -398,7 +400,7 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc, if (!lock) { for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; - if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN && + if (pipe->stream && pipe->plane_state && pipe_mall_type == SUBVP_MAIN && should_lock_all_pipes) pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); } @@ -416,14 +418,7 @@ void dcn32_subvp_pipe_control_lock_fast(union block_sequence_params *params) { struct dc *dc = params->subvp_pipe_control_lock_fast_params.dc; bool lock = params->subvp_pipe_control_lock_fast_params.lock; - struct pipe_ctx *pipe_ctx = params->subvp_pipe_control_lock_fast_params.pipe_ctx; - bool subvp_immediate_flip = false; - - if (pipe_ctx && pipe_ctx->stream && pipe_ctx->plane_state) { - if (pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN && - pipe_ctx->plane_state->flip_immediate) - subvp_immediate_flip = true; - } + bool subvp_immediate_flip = params->subvp_pipe_control_lock_fast_params.subvp_immediate_flip; // Don't need to lock for DRR VSYNC flips -- FW will wait for DRR pending update cleared. if (subvp_immediate_flip) { @@ -609,7 +604,7 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context) struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct hubp *hubp = pipe->plane_res.hubp; - if (!pipe->stream || !(pipe->stream->mall_stream_config.type == SUBVP_MAIN || + if (!pipe->stream || !(dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN || pipe->stream->fpo_in_use)) { if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) hubp->funcs->hubp_update_force_pstate_disallow(hubp, false); @@ -624,7 +619,7 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context) struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct hubp *hubp = pipe->plane_res.hubp; - if (pipe->stream && (pipe->stream->mall_stream_config.type == SUBVP_MAIN || + if (pipe->stream && (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN || pipe->stream->fpo_in_use)) { if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) hubp->funcs->hubp_update_force_pstate_disallow(hubp, true); @@ -671,8 +666,8 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context) if (cursor_size > 16384) cache_cursor = true; - if (pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - hubp->funcs->hubp_update_mall_sel(hubp, 1, false); + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { + hubp->funcs->hubp_update_mall_sel(hubp, 1, false); } else { // MALL not supported with Stereo3D hubp->funcs->hubp_update_mall_sel(hubp, @@ -714,9 +709,8 @@ void dcn32_program_mall_pipe_config(struct dc *dc, struct dc_state *context) * see if CURSOR_REQ_MODE will be back to 1 for SubVP * when it should be 0 for MPO */ - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) hubp->funcs->hubp_prepare_subvp_buffering(hubp, true); - } } } } @@ -759,6 +753,7 @@ void dcn32_init_hw(struct dc *dc) int i; int edp_num; uint32_t backlight = MAX_BACKLIGHT_LEVEL; + uint32_t user_level = MAX_BACKLIGHT_LEVEL; if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); @@ -913,13 +908,15 @@ void dcn32_init_hw(struct dc *dc) for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; - if (link->panel_cntl) + if (link->panel_cntl) { backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); + user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL; + } } for (i = 0; i < dc->res_pool->pipe_count; i++) { if (abms[i] != NULL && abms[i]->funcs != NULL) - abms[i]->funcs->abm_init(abms[i], backlight); + abms[i]->funcs->abm_init(abms[i], backlight, user_level); } /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ @@ -1194,7 +1191,7 @@ void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_ continue; if ((pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal)) - && pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { + && dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_PHANTOM) { pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg); reset_sync_context_for_pipe(dc, context, i); otg_disabled[i] = true; @@ -1345,8 +1342,8 @@ void dcn32_update_phantom_vp_position(struct dc *dc, for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN && - pipe->stream->mall_stream_config.paired_stream == phantom_pipe->stream) { + if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN && + dc_state_get_paired_subvp_stream(context, pipe->stream) == phantom_pipe->stream) { if (pipe->plane_state && pipe->plane_state->update_flags.bits.position_change) { phantom_plane->src_rect.x = pipe->plane_state->src_rect.x; @@ -1371,21 +1368,19 @@ void dcn32_update_phantom_vp_position(struct dc *dc, void dcn32_apply_update_flags_for_phantom(struct pipe_ctx *phantom_pipe) { phantom_pipe->update_flags.raw = 0; - if (phantom_pipe->stream && phantom_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - if (resource_is_pipe_type(phantom_pipe, DPP_PIPE)) { - phantom_pipe->update_flags.bits.enable = 1; - phantom_pipe->update_flags.bits.mpcc = 1; - phantom_pipe->update_flags.bits.dppclk = 1; - phantom_pipe->update_flags.bits.hubp_interdependent = 1; - phantom_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; - phantom_pipe->update_flags.bits.gamut_remap = 1; - phantom_pipe->update_flags.bits.scaler = 1; - phantom_pipe->update_flags.bits.viewport = 1; - phantom_pipe->update_flags.bits.det_size = 1; - if (resource_is_pipe_type(phantom_pipe, OTG_MASTER)) { - phantom_pipe->update_flags.bits.odm = 1; - phantom_pipe->update_flags.bits.global_sync = 1; - } + if (resource_is_pipe_type(phantom_pipe, DPP_PIPE)) { + phantom_pipe->update_flags.bits.enable = 1; + phantom_pipe->update_flags.bits.mpcc = 1; + phantom_pipe->update_flags.bits.dppclk = 1; + phantom_pipe->update_flags.bits.hubp_interdependent = 1; + phantom_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; + phantom_pipe->update_flags.bits.gamut_remap = 1; + phantom_pipe->update_flags.bits.scaler = 1; + phantom_pipe->update_flags.bits.viewport = 1; + phantom_pipe->update_flags.bits.det_size = 1; + if (resource_is_pipe_type(phantom_pipe, OTG_MASTER)) { + phantom_pipe->update_flags.bits.odm = 1; + phantom_pipe->update_flags.bits.global_sync = 1; } } } @@ -1445,9 +1440,44 @@ void dcn32_update_dsc_pg(struct dc *dc, } } +void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context) +{ + struct dce_hwseq *hws = dc->hwseq; + int i; + + for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { + struct pipe_ctx *pipe_ctx_old = + &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (!pipe_ctx_old->stream) + continue; + + if (dc_state_get_pipe_subvp_type(dc->current_state, pipe_ctx_old) != SUBVP_PHANTOM) + continue; + + if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) + continue; + + if (!pipe_ctx->stream || pipe_need_reprogram(pipe_ctx_old, pipe_ctx) || + (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)) { + struct clock_source *old_clk = pipe_ctx_old->clock_source; + + if (hws->funcs.reset_back_end_for_pipe) + hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); + if (hws->funcs.enable_stream_gating) + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); + if (old_clk) + old_clk->funcs->cs_power_down(old_clk); + } + } +} + void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context) { unsigned int i; + enum dc_status status = DC_OK; + struct dce_hwseq *hws = dc->hwseq; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; @@ -1457,8 +1487,8 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context) * pipe, wait for the double buffer update to complete first before we do * ANY phantom pipe programming. */ - if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM && - old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { + if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM && + old_pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) != SUBVP_PHANTOM) { old_pipe->stream_res.tg->funcs->wait_for_state( old_pipe->stream_res.tg, CRTC_STATE_VBLANK); @@ -1468,16 +1498,39 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context) } } for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; - - if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - // If old context or new context has phantom pipes, apply - // the phantom timings now. We can't change the phantom - // pipe configuration safely without driver acquiring - // the DMCUB lock first. - dc->hwss.apply_ctx_to_hw(dc, context); - break; + struct pipe_ctx *pipe_ctx_old = + &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->stream == NULL) + continue; + + if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) + continue; + + if (pipe_ctx->stream == pipe_ctx_old->stream && + pipe_ctx->stream->link->link_state_valid) { + continue; } + + if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) + continue; + + if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe) + continue; + + if (hws->funcs.apply_single_controller_ctx_to_hw) + status = hws->funcs.apply_single_controller_ctx_to_hw( + pipe_ctx, + context, + dc); + + ASSERT(status == DC_OK); + +#ifdef CONFIG_DRM_AMD_DC_FP + if (hws->funcs.resync_fifo_dccg_dio) + hws->funcs.resync_fifo_dccg_dio(hws, dc, context); +#endif } } @@ -1691,3 +1744,26 @@ void dcn32_prepare_bandwidth(struct dc *dc, context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; } } + +void dcn32_interdependent_update_lock(struct dc *dc, + struct dc_state *context, bool lock) +{ + unsigned int i; + struct pipe_ctx *pipe; + struct timing_generator *tg; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + tg = pipe->stream_res.tg; + + if (!resource_is_pipe_type(pipe, OTG_MASTER) || + !tg->funcs->is_tg_enabled(tg) || + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) + continue; + + if (lock) + dc->hwss.pipe_control_lock(dc, pipe, true); + else + dc->hwss.pipe_control_lock(dc, pipe, false); + } +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h index cecf7f0f56..f55c11fc56 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h @@ -111,6 +111,8 @@ void dcn32_update_dsc_pg(struct dc *dc, void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context); +void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context); + void dcn32_init_blank( struct dc *dc, struct timing_generator *tg); @@ -127,4 +129,6 @@ bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc, void dcn32_prepare_bandwidth(struct dc *dc, struct dc_state *context); +void dcn32_interdependent_update_lock(struct dc *dc, + struct dc_state *context, bool lock); #endif /* __DC_HWSS_DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c index 427cfc8c24..03253faeae 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c @@ -58,7 +58,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .disable_plane = dcn20_disable_plane, .disable_pixel_data = dcn20_disable_pixel_data, .pipe_control_lock = dcn20_pipe_control_lock, - .interdependent_update_lock = dcn10_lock_all_pipes, + .interdependent_update_lock = dcn32_interdependent_update_lock, .cursor_lock = dcn10_cursor_lock, .prepare_bandwidth = dcn32_prepare_bandwidth, .optimize_bandwidth = dcn20_optimize_bandwidth, @@ -109,6 +109,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .get_dcc_en_bits = dcn10_get_dcc_en_bits, .commit_subvp_config = dcn32_commit_subvp_config, .enable_phantom_streams = dcn32_enable_phantom_streams, + .disable_phantom_streams = dcn32_disable_phantom_streams, .subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock, .update_visual_confirm_color = dcn10_update_visual_confirm_color, .subvp_pipe_control_lock_fast = dcn32_subvp_pipe_control_lock_fast, @@ -159,6 +160,8 @@ static const struct hwseq_private_funcs dcn32_private_funcs = { .set_pixels_per_cycle = dcn32_set_pixels_per_cycle, .resync_fifo_dccg_dio = dcn32_resync_fifo_dccg_dio, .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy, + .apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw, + .reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe, }; void dcn32_hw_sequencer_init_functions(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.h index 89a591eb2c..89a591eb2c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.h diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 325a711a14..1e67374b89 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -56,6 +56,7 @@ #include "dcn30/dcn30_cm_common.h" #include "dcn31/dcn31_hwseq.h" #include "dcn20/dcn20_hwseq.h" +#include "dc_state_priv.h" #define DC_LOGGER_INIT(logger) \ struct dal_logger *dc_logger = logger @@ -133,6 +134,7 @@ void dcn35_init_hw(struct dc *dc) struct dc_bios *dcb = dc->ctx->dc_bios; struct resource_pool *res_pool = dc->res_pool; uint32_t backlight = MAX_BACKLIGHT_LEVEL; + uint32_t user_level = MAX_BACKLIGHT_LEVEL; int i; if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) @@ -145,17 +147,36 @@ void dcn35_init_hw(struct dc *dc) hws->funcs.bios_golden_init(dc); } - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); - - /* Disable gating for PHYASYMCLK. This will be enabled in dccg if needed */ - REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, 1, - PHYBSYMCLK_ROOT_GATE_DISABLE, 1, - PHYCSYMCLK_ROOT_GATE_DISABLE, 1, - PHYDSYMCLK_ROOT_GATE_DISABLE, 1, - PHYESYMCLK_ROOT_GATE_DISABLE, 1); + if (!dc->debug.disable_clock_gate) { + REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); + REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); + + /* Disable gating for PHYASYMCLK. This will be enabled in dccg if needed */ + REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, 1, + PHYBSYMCLK_ROOT_GATE_DISABLE, 1, + PHYCSYMCLK_ROOT_GATE_DISABLE, 1, + PHYDSYMCLK_ROOT_GATE_DISABLE, 1, + PHYESYMCLK_ROOT_GATE_DISABLE, 1); + + REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL4, + DPIASYMCLK0_GATE_DISABLE, 0, + DPIASYMCLK1_GATE_DISABLE, 0, + DPIASYMCLK2_GATE_DISABLE, 0, + DPIASYMCLK3_GATE_DISABLE, 0); + + REG_WRITE(DCCG_GATE_DISABLE_CNTL5, 0xFFFFFFFF); + REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL5, + DTBCLK_P0_GATE_DISABLE, 0, + DTBCLK_P1_GATE_DISABLE, 0, + DTBCLK_P2_GATE_DISABLE, 0, + DTBCLK_P3_GATE_DISABLE, 0); + REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL5, + DPSTREAMCLK0_GATE_DISABLE, 0, + DPSTREAMCLK1_GATE_DISABLE, 0, + DPSTREAMCLK2_GATE_DISABLE, 0, + DPSTREAMCLK3_GATE_DISABLE, 0); - REG_WRITE(DCCG_GATE_DISABLE_CNTL5, 0x1f7c3fcf); + } // Initialize the dccg if (res_pool->dccg->funcs->dccg_init) @@ -260,13 +281,15 @@ void dcn35_init_hw(struct dc *dc) for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; - if (link->panel_cntl) + if (link->panel_cntl) { backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); + user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL; + } } if (dc->ctx->dmub_srv) { for (i = 0; i < dc->res_pool->pipe_count; i++) { if (abms[i] != NULL && abms[i]->funcs != NULL) - abms[i]->funcs->abm_init(abms[i], backlight); + abms[i]->funcs->abm_init(abms[i], backlight, user_level); } } @@ -332,9 +355,6 @@ void dcn35_init_hw(struct dc *dc) if (dc->res_pool->pg_cntl) { if (dc->res_pool->pg_cntl->funcs->init_pg_status) dc->res_pool->pg_cntl->funcs->init_pg_status(dc->res_pool->pg_cntl); - - if (dc->res_pool->pg_cntl->funcs->set_force_poweron_domain22) - dc->res_pool->pg_cntl->funcs->set_force_poweron_domain22(dc->res_pool->pg_cntl, false); } } @@ -619,7 +639,7 @@ void dcn35_power_down_on_boot(struct dc *dc) bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable) { struct dc_link *edp_links[MAX_NUM_EDP]; - int edp_num; + int i, edp_num; if (dc->debug.dmcub_emulation) return true; @@ -627,6 +647,13 @@ bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable) dc_get_edp_links(dc, edp_links, &edp_num); if (edp_num == 0 || edp_num > 1) return false; + + for (i = 0; i < dc->current_state->stream_count; ++i) { + struct dc_stream_state *stream = dc->current_state->streams[i]; + + if (!stream->dpms_off && !dc_is_embedded_signal(stream->signal)) + return false; + } } // TODO: review other cases when idle optimization is allowed @@ -756,12 +783,12 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context) dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; - hws->funcs.plane_atomic_disconnect(dc, pipe_ctx); + hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx); if (tg->funcs->is_tg_enabled(tg)) tg->funcs->unlock(tg); - dc->hwss.disable_plane(dc, pipe_ctx); + dc->hwss.disable_plane(dc, context, pipe_ctx); pipe_ctx->stream_res.tg = NULL; pipe_ctx->plane_res.hubp = NULL; @@ -888,10 +915,10 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) pipe_ctx->plane_state = NULL; } -void dcn35_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx) { struct dce_hwseq *hws = dc->hwseq; - bool is_phantom = pipe_ctx->plane_state && pipe_ctx->plane_state->is_phantom; + bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM; struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL; DC_LOGGER_INIT(dc->ctx->logger); @@ -918,6 +945,8 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, bool hpo_frl_stream_enc_acquired = false; bool hpo_dp_stream_enc_acquired = false; int i = 0, j = 0; + int edp_num = 0; + struct dc_link *edp_links[MAX_NUM_EDP] = { NULL }; memset(update_state, 0, sizeof(struct pg_block_update)); @@ -958,10 +987,24 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, if (pipe_ctx->stream_res.opp) update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false; + } + /*domain24 controls all the otg, mpc, opp, as long as one otg is still up, avoid enabling OTG PG*/ + for (i = 0; i < dc->res_pool->timing_generator_count; i++) { + struct timing_generator *tg = dc->res_pool->timing_generators[i]; + if (tg && tg->funcs->is_tg_enabled(tg)) { + update_state->pg_pipe_res_update[PG_OPTC][i] = false; + break; + } + } - if (pipe_ctx->stream_res.tg) - update_state->pg_pipe_res_update[PG_OPTC][pipe_ctx->stream_res.tg->inst] = false; + dc_get_edp_links(dc, edp_links, &edp_num); + if (edp_num == 0 || + ((!edp_links[0] || !edp_links[0]->edp_sink_present) && + (!edp_links[1] || !edp_links[1]->edp_sink_present))) { + /*eDP not exist on this config, keep Domain24 power on, for S0i3, this will be handled in dmubfw*/ + update_state->pg_pipe_res_update[PG_OPTC][0] = false; } + } void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, @@ -1047,8 +1090,29 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, } -void dcn35_block_power_control(struct dc *dc, - struct pg_block_update *update_state, bool power_on) +/** + * dcn35_hw_block_power_down() - power down sequence + * + * The following sequence describes the ON-OFF (ONO) for power down: + * + * ONO Region 3, DCPG 25: hpo - SKIPPED + * ONO Region 4, DCPG 0: dchubp0, dpp0 + * ONO Region 6, DCPG 1: dchubp1, dpp1 + * ONO Region 8, DCPG 2: dchubp2, dpp2 + * ONO Region 10, DCPG 3: dchubp3, dpp3 + * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will pwr dwn at IPS2 entry + * ONO Region 5, DCPG 16: dsc0 + * ONO Region 7, DCPG 17: dsc1 + * ONO Region 9, DCPG 18: dsc2 + * ONO Region 11, DCPG 19: dsc3 + * ONO Region 2, DCPG 24: mpc opp optc dwb + * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED. will be pwr dwn after lono timer is armed + * + * @dc: Current DC state + * @update_state: update PG sequence states for HW block + */ +void dcn35_hw_block_power_down(struct dc *dc, + struct pg_block_update *update_state) { int i = 0; struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; @@ -1057,64 +1121,106 @@ void dcn35_block_power_control(struct dc *dc, return; if (dc->debug.ignore_pg) return; + if (update_state->pg_res_update[PG_HPO]) { if (pg_cntl->funcs->hpo_pg_control) - pg_cntl->funcs->hpo_pg_control(pg_cntl, power_on); + pg_cntl->funcs->hpo_pg_control(pg_cntl, false); } for (i = 0; i < dc->res_pool->pipe_count; i++) { if (update_state->pg_pipe_res_update[PG_HUBP][i] && update_state->pg_pipe_res_update[PG_DPP][i]) { if (pg_cntl->funcs->hubp_dpp_pg_control) - pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, power_on); + pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false); } - + } + for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) if (update_state->pg_pipe_res_update[PG_DSC][i]) { if (pg_cntl->funcs->dsc_pg_control) - pg_cntl->funcs->dsc_pg_control(pg_cntl, i, power_on); + pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false); } - if (update_state->pg_pipe_res_update[PG_MPCC][i]) { - if (pg_cntl->funcs->mpcc_pg_control) - pg_cntl->funcs->mpcc_pg_control(pg_cntl, i, power_on); - } - - if (update_state->pg_pipe_res_update[PG_OPP][i]) { - if (pg_cntl->funcs->opp_pg_control) - pg_cntl->funcs->opp_pg_control(pg_cntl, i, power_on); - } - if (update_state->pg_pipe_res_update[PG_OPTC][i]) { - if (pg_cntl->funcs->optc_pg_control) - pg_cntl->funcs->optc_pg_control(pg_cntl, i, power_on); - } - } + /*this will need all the clients to unregister optc interruts let dmubfw handle this*/ + if (pg_cntl->funcs->plane_otg_pg_control) + pg_cntl->funcs->plane_otg_pg_control(pg_cntl, false); - if (update_state->pg_res_update[PG_DWB]) { - if (pg_cntl->funcs->dwb_pg_control) - pg_cntl->funcs->dwb_pg_control(pg_cntl, power_on); - } + //domain22, 23, 25 currently always on. - if (pg_cntl->funcs->plane_otg_pg_control) - pg_cntl->funcs->plane_otg_pg_control(pg_cntl, power_on); } -void dcn35_root_clock_control(struct dc *dc, - struct pg_block_update *update_state, bool power_on) +/** + * dcn35_hw_block_power_up() - power up sequence + * + * The following sequence describes the ON-OFF (ONO) for power up: + * + * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED + * ONO Region 2, DCPG 24: mpc opp optc dwb + * ONO Region 5, DCPG 16: dsc0 + * ONO Region 7, DCPG 17: dsc1 + * ONO Region 9, DCPG 18: dsc2 + * ONO Region 11, DCPG 19: dsc3 + * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will power up at IPS2 exit + * ONO Region 4, DCPG 0: dchubp0, dpp0 + * ONO Region 6, DCPG 1: dchubp1, dpp1 + * ONO Region 8, DCPG 2: dchubp2, dpp2 + * ONO Region 10, DCPG 3: dchubp3, dpp3 + * ONO Region 3, DCPG 25: hpo - SKIPPED + * + * @dc: Current DC state + * @update_state: update PG sequence states for HW block + */ +void dcn35_hw_block_power_up(struct dc *dc, + struct pg_block_update *update_state) { int i = 0; struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; if (!pg_cntl) return; + if (dc->debug.ignore_pg) + return; + //domain22, 23, 25 currently always on. + /*this will need all the clients to unregister optc interruts let dmubfw handle this*/ + if (pg_cntl->funcs->plane_otg_pg_control) + pg_cntl->funcs->plane_otg_pg_control(pg_cntl, true); + + for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) + if (update_state->pg_pipe_res_update[PG_DSC][i]) { + if (pg_cntl->funcs->dsc_pg_control) + pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true); + } for (i = 0; i < dc->res_pool->pipe_count; i++) { if (update_state->pg_pipe_res_update[PG_HUBP][i] && update_state->pg_pipe_res_update[PG_DPP][i]) { - if (dc->hwseq->funcs.dpp_root_clock_control) - dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on); + if (pg_cntl->funcs->hubp_dpp_pg_control) + pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, true); } + } + if (update_state->pg_res_update[PG_HPO]) { + if (pg_cntl->funcs->hpo_pg_control) + pg_cntl->funcs->hpo_pg_control(pg_cntl, true); + } +} +void dcn35_root_clock_control(struct dc *dc, + struct pg_block_update *update_state, bool power_on) +{ + int i = 0; + struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; + if (!pg_cntl) + return; + /*enable root clock first when power up*/ + if (power_on) + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (update_state->pg_pipe_res_update[PG_HUBP][i] && + update_state->pg_pipe_res_update[PG_DPP][i]) { + if (dc->hwseq->funcs.dpp_root_clock_control) + dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on); + } + } + for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { if (update_state->pg_pipe_res_update[PG_DSC][i]) { if (power_on) { if (dc->res_pool->dccg->funcs->enable_dsc) @@ -1125,6 +1231,15 @@ void dcn35_root_clock_control(struct dc *dc, } } } + /*disable root clock first when power down*/ + if (!power_on) + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (update_state->pg_pipe_res_update[PG_HUBP][i] && + update_state->pg_pipe_res_update[PG_DPP][i]) { + if (dc->hwseq->funcs.dpp_root_clock_control) + dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on); + } + } } void dcn35_prepare_bandwidth( @@ -1138,9 +1253,9 @@ void dcn35_prepare_bandwidth( if (dc->hwss.root_clock_control) dc->hwss.root_clock_control(dc, &pg_update_state, true); - - if (dc->hwss.block_power_control) - dc->hwss.block_power_control(dc, &pg_update_state, true); + /*power up required HW block*/ + if (dc->hwss.hw_block_power_up) + dc->hwss.hw_block_power_up(dc, &pg_update_state); } dcn20_prepare_bandwidth(dc, context); @@ -1156,9 +1271,9 @@ void dcn35_optimize_bandwidth( if (dc->hwss.calc_blocks_to_gate) { dc->hwss.calc_blocks_to_gate(dc, context, &pg_update_state); - - if (dc->hwss.block_power_control) - dc->hwss.block_power_control(dc, &pg_update_state, false); + /*try to power down unused block*/ + if (dc->hwss.hw_block_power_down) + dc->hwss.hw_block_power_down(dc, &pg_update_state); if (dc->hwss.root_clock_control) dc->hwss.root_clock_control(dc, &pg_update_state, false); @@ -1180,3 +1295,44 @@ uint32_t dcn35_get_idle_state(const struct dc *dc) return 0; } + +void dcn35_set_drr(struct pipe_ctx **pipe_ctx, + int num_pipes, struct dc_crtc_timing_adjust adjust) +{ + int i = 0; + struct drr_params params = {0}; + // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow + unsigned int event_triggers = 0x800; + // Note DRR trigger events are generated regardless of whether num frames met. + unsigned int num_frames = 2; + + params.vertical_total_max = adjust.v_total_max; + params.vertical_total_min = adjust.v_total_min; + params.vertical_total_mid = adjust.v_total_mid; + params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num; + + for (i = 0; i < num_pipes; i++) { + if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) { + struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing; + struct dc *dc = pipe_ctx[i]->stream->ctx->dc; + + if (dc->debug.static_screen_wait_frames) { + unsigned int frame_rate = timing->pix_clk_100hz / (timing->h_total * timing->v_total); + + if (frame_rate >= 120 && dc->caps.ips_support && + dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) { + /*ips enable case*/ + num_frames = 2 * (frame_rate % 60); + } + } + if (pipe_ctx[i]->stream_res.tg->funcs->set_drr) + pipe_ctx[i]->stream_res.tg->funcs->set_drr( + pipe_ctx[i]->stream_res.tg, ¶ms); + if (adjust.v_total_max != 0 && adjust.v_total_min != 0) + if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control) + pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control( + pipe_ctx[i]->stream_res.tg, + event_triggers, num_frames); + } + } +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h index 0dff10d179..fd66316e33 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h @@ -57,14 +57,16 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context); void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx); void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context); -void dcn35_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx); void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, struct pg_block_update *update_state); void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, struct pg_block_update *update_state); -void dcn35_block_power_control(struct dc *dc, - struct pg_block_update *update_state, bool power_on); +void dcn35_hw_block_power_up(struct dc *dc, + struct pg_block_update *update_state); +void dcn35_hw_block_power_down(struct dc *dc, + struct pg_block_update *update_state); void dcn35_root_clock_control(struct dc *dc, struct pg_block_update *update_state, bool power_on); @@ -84,4 +86,8 @@ void dcn35_dsc_pg_control( void dcn35_set_idle_state(const struct dc *dc, bool allow_idle); uint32_t dcn35_get_idle_state(const struct dc *dc); + +void dcn35_set_drr(struct pipe_ctx **pipe_ctx, + int num_pipes, struct dc_crtc_timing_adjust adjust); + #endif /* __DC_HWSS_DCN35_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c index 296bf3a38c..a630aa77dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c @@ -68,7 +68,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .prepare_bandwidth = dcn35_prepare_bandwidth, .optimize_bandwidth = dcn35_optimize_bandwidth, .update_bandwidth = dcn20_update_bandwidth, - .set_drr = dcn10_set_drr, + .set_drr = dcn35_set_drr, .get_position = dcn10_get_position, .set_static_screen_control = dcn30_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, @@ -118,7 +118,8 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .update_dsc_pg = dcn32_update_dsc_pg, .calc_blocks_to_gate = dcn35_calc_blocks_to_gate, .calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate, - .block_power_control = dcn35_block_power_control, + .hw_block_power_up = dcn35_hw_block_power_up, + .hw_block_power_down = dcn35_hw_block_power_down, .root_clock_control = dcn35_root_clock_control, .set_idle_state = dcn35_set_idle_state, .get_idle_state = dcn35_get_idle_state diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.h index b67015032c..b67015032c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.h diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt new file mode 100644 index 0000000000..951ca2da44 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt @@ -0,0 +1,4 @@ +dal3_subdirectory_sources( + dcn351_init.c + dcn351_init.h +) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile new file mode 100644 index 0000000000..b24ad27fe6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile @@ -0,0 +1,17 @@ +# +# (c) Copyright 2022 Advanced Micro Devices, Inc. All the rights reserved +# +# All rights reserved. This notice is intended as a precaution against +# inadvertent publication and does not imply publication or any waiver +# of confidentiality. The year included in the foregoing notice is the +# year of creation of the work. +# +# Authors: AMD +# +# Makefile for DCN351. + +DCN351 = dcn351_init.o + +AMD_DAL_DCN351 = $(addprefix $(AMDDALPATH)/dc/dcn351/,$(DCN351)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DCN351) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c new file mode 100644 index 0000000000..143d3fc022 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c @@ -0,0 +1,171 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dce110/dce110_hwseq.h" +#include "dcn10/dcn10_hwseq.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn21/dcn21_hwseq.h" +#include "dcn30/dcn30_hwseq.h" +#include "dcn301/dcn301_hwseq.h" +#include "dcn31/dcn31_hwseq.h" +#include "dcn32/dcn32_hwseq.h" +#include "dcn35/dcn35_hwseq.h" + +#include "dcn351_init.h" + +static const struct hw_sequencer_funcs dcn351_funcs = { + .program_gamut_remap = dcn30_program_gamut_remap, + .init_hw = dcn35_init_hw, + .power_down_on_boot = dcn35_power_down_on_boot, + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = NULL, + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, + .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, + .update_plane_addr = dcn20_update_plane_addr, + .update_dchub = dcn10_update_dchub, + .update_pending_status = dcn10_update_pending_status, + .program_output_csc = dcn20_program_output_csc, + .enable_accelerated_mode = dce110_enable_accelerated_mode, + .enable_timing_synchronization = dcn10_enable_timing_synchronization, + .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, + .update_info_frame = dcn31_update_info_frame, + .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, + .enable_stream = dcn20_enable_stream, + .disable_stream = dce110_disable_stream, + .unblank_stream = dcn32_unblank_stream, + .blank_stream = dce110_blank_stream, + .enable_audio_stream = dce110_enable_audio_stream, + .disable_audio_stream = dce110_disable_audio_stream, + .disable_plane = dcn35_disable_plane, + .disable_pixel_data = dcn20_disable_pixel_data, + .pipe_control_lock = dcn20_pipe_control_lock, + .interdependent_update_lock = dcn10_lock_all_pipes, + .cursor_lock = dcn10_cursor_lock, + .prepare_bandwidth = dcn35_prepare_bandwidth, + .optimize_bandwidth = dcn35_optimize_bandwidth, + .update_bandwidth = dcn20_update_bandwidth, + .set_drr = dcn10_set_drr, + .get_position = dcn10_get_position, + .set_static_screen_control = dcn30_set_static_screen_control, + .setup_stereo = dcn10_setup_stereo, + .set_avmute = dcn30_set_avmute, + .log_hw_state = dcn10_log_hw_state, + .get_hw_state = dcn10_get_hw_state, + .clear_status_bits = dcn10_clear_status_bits, + .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .edp_backlight_control = dce110_edp_backlight_control, + .edp_power_control = dce110_edp_power_control, + .edp_wait_for_T12 = dce110_edp_wait_for_T12, + .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, + .set_clock = dcn10_set_clock, + .get_clock = dcn10_get_clock, + .program_triplebuffer = dcn20_program_triple_buffer, + .enable_writeback = dcn30_enable_writeback, + .disable_writeback = dcn30_disable_writeback, + .update_writeback = dcn30_update_writeback, + .mmhubbub_warmup = dcn30_mmhubbub_warmup, + .dmdata_status_done = dcn20_dmdata_status_done, + .program_dmdata_engine = dcn30_program_dmdata_engine, + .set_dmdata_attributes = dcn20_set_dmdata_attributes, + .init_sys_ctx = dcn31_init_sys_ctx, + .init_vm_ctx = dcn20_init_vm_ctx, + .set_flip_control_gsl = dcn20_set_flip_control_gsl, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .calc_vupdate_position = dcn10_calc_vupdate_position, + .power_down = dce110_power_down, + .set_backlight_level = dcn21_set_backlight_level, + .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, + .set_pipe = dcn21_set_pipe, + .enable_lvds_link_output = dce110_enable_lvds_link_output, + .enable_tmds_link_output = dce110_enable_tmds_link_output, + .enable_dp_link_output = dce110_enable_dp_link_output, + .disable_link_output = dcn32_disable_link_output, + .z10_restore = dcn35_z10_restore, + .z10_save_init = dcn31_z10_save_init, + .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, + .optimize_pwr_state = dcn21_optimize_pwr_state, + .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, + .apply_idle_power_optimizations = dcn35_apply_idle_power_optimizations, + .update_dsc_pg = dcn32_update_dsc_pg, + .calc_blocks_to_gate = dcn35_calc_blocks_to_gate, + .calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate, + .hw_block_power_up = dcn35_hw_block_power_up, + .hw_block_power_down = dcn35_hw_block_power_down, + .root_clock_control = dcn35_root_clock_control, + .set_idle_state = dcn35_set_idle_state, + .get_idle_state = dcn35_get_idle_state +}; + +static const struct hwseq_private_funcs dcn351_private_funcs = { + .init_pipes = dcn35_init_pipes, + .update_plane_addr = dcn20_update_plane_addr, + .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .update_mpcc = dcn20_update_mpcc, + .set_input_transfer_func = dcn32_set_input_transfer_func, + .set_output_transfer_func = dcn32_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .blank_pixel_data = dcn20_blank_pixel_data, + .reset_hw_ctx_wrap = dcn31_reset_hw_ctx_wrap, + .enable_stream_timing = dcn20_enable_stream_timing, + .edp_backlight_control = dce110_edp_backlight_control, + .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, + .did_underflow_occur = dcn10_did_underflow_occur, + .init_blank = dcn20_init_blank, + .disable_vga = NULL, + .bios_golden_init = dcn10_bios_golden_init, + .plane_atomic_disable = dcn35_plane_atomic_disable, + //.plane_atomic_disable = dcn20_plane_atomic_disable,/*todo*/ + //.hubp_pg_control = dcn35_hubp_pg_control, + .enable_power_gating_plane = dcn35_enable_power_gating_plane, + .dpp_root_clock_control = dcn35_dpp_root_clock_control, + .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, + .update_odm = dcn35_update_odm, + .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, + .wait_for_blank_complete = dcn20_wait_for_blank_complete, + .dccg_init = dcn20_dccg_init, + .set_mcm_luts = dcn32_set_mcm_luts, + .setup_hpo_hw_control = dcn35_setup_hpo_hw_control, + .calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values, + .set_pixels_per_cycle = dcn32_set_pixels_per_cycle, + .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy, + .dsc_pg_control = dcn35_dsc_pg_control, + .dsc_pg_status = dcn32_dsc_pg_status, + .enable_plane = dcn35_enable_plane, +}; + +void dcn351_hw_sequencer_construct(struct dc *dc) +{ + dc->hwss = dcn351_funcs; + dc->hwseq->funcs = dcn351_private_funcs; + +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h new file mode 100644 index 0000000000..970b01008b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN351_INIT_H__ +#define __DC_DCN351_INIT_H__ + +struct dc; + +void dcn351_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN351_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index 452680fe9a..64ca7c6650 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -50,7 +50,7 @@ struct pg_block_update; struct subvp_pipe_control_lock_fast_params { struct dc *dc; bool lock; - struct pipe_ctx *pipe_ctx; + bool subvp_immediate_flip; }; struct pipe_control_lock_params { @@ -200,7 +200,7 @@ struct hw_sequencer_funcs { struct dc_state *context); enum dc_status (*apply_ctx_to_hw)(struct dc *dc, struct dc_state *context); - void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*disable_plane)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx); void (*disable_pixel_data)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank); void (*apply_ctx_for_surface)(struct dc *dc, const struct dc_stream_state *stream, @@ -248,6 +248,7 @@ struct hw_sequencer_funcs { void (*enable_per_frame_crtc_position_reset)(struct dc *dc, int group_size, struct pipe_ctx *grouped_pipes[]); void (*enable_timing_synchronization)(struct dc *dc, + struct dc_state *state, int group_index, int group_size, struct pipe_ctx *grouped_pipes[]); void (*enable_vblanks_synchronization)(struct dc *dc, @@ -378,6 +379,7 @@ struct hw_sequencer_funcs { struct dc_cursor_attributes *cursor_attr); void (*commit_subvp_config)(struct dc *dc, struct dc_state *context); void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context); + void (*disable_phantom_streams)(struct dc *dc, struct dc_state *context); void (*subvp_pipe_control_lock)(struct dc *dc, struct dc_state *context, bool lock, @@ -414,8 +416,10 @@ struct hw_sequencer_funcs { struct pg_block_update *update_state); void (*calc_blocks_to_ungate)(struct dc *dc, struct dc_state *context, struct pg_block_update *update_state); - void (*block_power_control)(struct dc *dc, - struct pg_block_update *update_state, bool power_on); + void (*hw_block_power_up)(struct dc *dc, + struct pg_block_update *update_state); + void (*hw_block_power_down)(struct dc *dc, + struct pg_block_update *update_state); void (*root_clock_control)(struct dc *dc, struct pg_block_update *update_state, bool power_on); void (*set_idle_state)(const struct dc *dc, bool allow_idle); @@ -452,17 +456,18 @@ void get_mpctree_visual_confirm_color( struct tg_color *color); void get_subvp_visual_confirm_color( - struct dc *dc, - struct dc_state *context, struct pipe_ctx *pipe_ctx, struct tg_color *color); void get_mclk_switch_visual_confirm_color( - struct dc *dc, - struct dc_state *context, struct pipe_ctx *pipe_ctx, struct tg_color *color); +void set_p_state_switch_method( + struct dc *dc, + struct dc_state *context, + struct pipe_ctx *pipe_ctx); + void hwss_execute_sequence(struct dc *dc, struct block_sequence block_sequence[], int num_steps); @@ -472,7 +477,8 @@ void hwss_build_fast_sequence(struct dc *dc, unsigned int dmub_cmd_count, struct block_sequence block_sequence[], int *num_steps, - struct pipe_ctx *pipe_ctx); + struct pipe_ctx *pipe_ctx, + struct dc_stream_status *stream_status); void hwss_send_dmcub_cmd(union block_sequence_params *params); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h index 82c5921668..b3c62a82cb 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h @@ -79,6 +79,7 @@ struct hwseq_private_funcs { void (*update_plane_addr)(const struct dc *dc, struct pipe_ctx *pipe_ctx); void (*plane_atomic_disconnect)(struct dc *dc, + struct dc_state *state, struct pipe_ctx *pipe_ctx); void (*update_mpcc)(struct dc *dc, struct pipe_ctx *pipe_ctx); bool (*set_input_transfer_func)(struct dc *dc, @@ -164,8 +165,15 @@ struct hwseq_private_funcs { void (*set_pixels_per_cycle)(struct pipe_ctx *pipe_ctx); void (*resync_fifo_dccg_dio)(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context); + enum dc_status (*apply_single_controller_ctx_to_hw)( + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct dc *dc); bool (*is_dp_dig_pixel_rate_div_policy)(struct pipe_ctx *pipe_ctx); #endif + void (*reset_back_end_for_pipe)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context); }; struct dce_hwseq { diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index bc9cda3294..3a6bf77a68 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -200,11 +200,7 @@ struct resource_funcs { unsigned int pipe_cnt, unsigned int index); - bool (*remove_phantom_pipes)(struct dc *dc, struct dc_state *context, bool fast_update); - void (*retain_phantom_pipes)(struct dc *dc, struct dc_state *context); void (*get_panel_config_defaults)(struct dc_panel_config *panel_config); - void (*save_mall_state)(struct dc *dc, struct dc_state *context, struct mall_temp_config *temp_config); - void (*restore_mall_state)(struct dc *dc, struct dc_state *context, struct mall_temp_config *temp_config); void (*build_pipe_pix_clk_params)(struct pipe_ctx *pipe_ctx); }; @@ -385,6 +381,16 @@ union pipe_update_flags { uint32_t raw; }; +enum p_state_switch_method { + P_STATE_UNKNOWN = 0, + P_STATE_V_BLANK = 1, + P_STATE_FPO, + P_STATE_V_ACTIVE, + P_STATE_SUB_VP, + P_STATE_DRR_SUB_VP, + P_STATE_V_BLANK_SUB_VP +}; + struct pipe_ctx { struct dc_plane_state *plane_state; struct dc_stream_state *stream; @@ -433,6 +439,7 @@ struct pipe_ctx { struct dwbc *dwbc; struct mcif_wb *mcif_wb; union pipe_update_flags update_flags; + enum p_state_switch_method p_state_type; struct tg_color visual_confirm_color; bool has_vactive_margin; /* subvp_index: only valid if the pipe is a SUBVP_MAIN*/ @@ -528,6 +535,14 @@ struct dc_state { * @stream_status: Planes status on a given stream */ struct dc_stream_status stream_status[MAX_PIPES]; + /** + * @phantom_streams: Stream state properties for phantoms + */ + struct dc_stream_state *phantom_streams[MAX_PHANTOM_PIPES]; + /** + * @phantom_planes: Planes state properties for phantoms + */ + struct dc_plane_state *phantom_planes[MAX_PHANTOM_PIPES]; /** * @stream_count: Total of streams in use @@ -536,6 +551,14 @@ struct dc_state { uint8_t stream_mask; /** + * @stream_count: Total phantom streams in use + */ + uint8_t phantom_stream_count; + /** + * @stream_count: Total phantom planes in use + */ + uint8_t phantom_plane_count; + /** * @res_ctx: Persistent state of resources */ struct resource_context res_ctx; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h index 9f521cf0fc..3f0161d646 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h @@ -36,7 +36,7 @@ struct abm { }; struct abm_funcs { - void (*abm_init)(struct abm *abm, uint32_t back_light); + void (*abm_init)(struct abm *abm, uint32_t back_light, uint32_t user_level); bool (*set_abm_level)(struct abm *abm, unsigned int abm_level); bool (*set_abm_immediate_disable)(struct abm *abm, unsigned int panel_inst); bool (*set_pipe)(struct abm *abm, unsigned int controller_id, unsigned int panel_inst); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 55ded5fb8a..17e014d3bd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -62,6 +62,25 @@ struct dcn3_clk_internal { uint32_t CLK4_CLK0_CURRENT_CNT; //fclk }; +struct dcn35_clk_internal { + int dummy; + uint32_t CLK1_CLK0_CURRENT_CNT; //dispclk + uint32_t CLK1_CLK1_CURRENT_CNT; //dppclk + uint32_t CLK1_CLK2_CURRENT_CNT; //dprefclk + uint32_t CLK1_CLK3_CURRENT_CNT; //dcfclk + uint32_t CLK1_CLK4_CURRENT_CNT; //dtbclk + //uint32_t CLK1_CLK5_CURRENT_CNT; //dpiaclk + //uint32_t CLK1_CLK6_CURRENT_CNT; //srdbgclk + uint32_t CLK1_CLK3_DS_CNTL; //dcf_deep_sleep_divider + uint32_t CLK1_CLK3_ALLOW_DS; //dcf_deep_sleep_allow + + uint32_t CLK1_CLK0_BYPASS_CNTL; //dispclk bypass + uint32_t CLK1_CLK1_BYPASS_CNTL; //dppclk bypass + uint32_t CLK1_CLK2_BYPASS_CNTL; //dprefclk bypass + uint32_t CLK1_CLK3_BYPASS_CNTL; //dcfclk bypass + uint32_t CLK1_CLK4_BYPASS_CNTL; //dtbclk bypass +}; + struct dcn301_clk_internal { int dummy; uint32_t CLK1_CLK0_CURRENT_CNT; //dispclk diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index 6b44557fcb..b9a06bf84c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -59,8 +59,8 @@ enum dentist_dispclk_change_mode { struct dp_dto_params { int otg_inst; enum signal_type signal; - long long pixclk_hz; - long long refclk_hz; + uint64_t pixclk_hz; + uint64_t refclk_hz; }; enum pixel_rate_div { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h index 86b711dcc7..729ca0064e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h @@ -188,6 +188,10 @@ struct dwbc_funcs { bool (*is_enabled)( struct dwbc *dwbc); + void (*set_fc_enable)( + struct dwbc *dwbc, + enum dwb_frame_capture_enable enable); + void (*set_stereo)( struct dwbc *dwbc, struct dwb_stereo_params *stereo_params); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index b95ae9596c..dcae23faee 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -43,6 +43,7 @@ * to be used inside loops and for determining array sizes. */ #define MAX_PIPES 6 +#define MAX_PHANTOM_PIPES (MAX_PIPES / 2) #define MAX_DIG_LINK_ENCODERS 7 #define MAX_DWB_PIPES 1 #define MAX_HPO_DP2_ENCODERS 4 diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h index 7617fabbd1..0717920812 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h @@ -321,6 +321,9 @@ struct opp_funcs { bool (*dpg_is_blanked)( struct output_pixel_processor *opp); + bool (*dpg_is_pending)(struct output_pixel_processor *opp); + + void (*opp_dpg_set_blank_color)( struct output_pixel_processor *opp, const struct tg_color *color); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h index 660897e12a..e97d964a17 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h @@ -40,6 +40,7 @@ struct panel_cntl_backlight_registers { unsigned int BL_PWM_PERIOD_CNTL; unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV; unsigned int PANEL_PWRSEQ_REF_DIV2; + unsigned int USER_LEVEL; }; struct panel_cntl_funcs { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h index b9812afb88..00ea3864dd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h @@ -47,8 +47,6 @@ struct pg_cntl_funcs { void (*optc_pg_control)(struct pg_cntl *pg_cntl, unsigned int optc_inst, bool power_on); void (*dwb_pg_control)(struct pg_cntl *pg_cntl, bool power_on); void (*init_pg_status)(struct pg_cntl *pg_cntl); - - void (*set_force_poweron_domain22)(struct pg_cntl *pg_cntl, bool power_on); }; #endif //__DC_PG_CNTL_H__ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 9a00a99317..cad3e5f148 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -333,6 +333,7 @@ struct timing_generator_funcs { void (*init_odm)(struct timing_generator *tg); void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg); + void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h index d768536814..bf29fc58ea 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link.h @@ -281,11 +281,16 @@ struct link_service { const unsigned int *power_opts); bool (*edp_setup_replay)(struct dc_link *link, const struct dc_stream_state *stream); + bool (*edp_send_replay_cmd)(struct dc_link *link, + enum replay_FW_Message_type msg, + union dmub_replay_cmd_set *cmd_data); bool (*edp_set_coasting_vtotal)( - struct dc_link *link, uint16_t coasting_vtotal); + struct dc_link *link, uint32_t coasting_vtotal); bool (*edp_replay_residency)(const struct dc_link *link, unsigned int *residency, const bool is_start, const bool is_alpm); + bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link, + const unsigned int *power_opts, uint32_t coasting_vtotal); bool (*edp_wait_for_t12)(struct dc_link *link); bool (*edp_is_ilr_optimization_required)(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 3d72443938..77a60aa9f2 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -497,6 +497,18 @@ int recource_find_free_pipe_not_used_in_cur_res_ctx( const struct resource_pool *pool); /* + * Look for a free pipe in new resource context that is used in current resource + * context as an OTG master pipe. + * + * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise + * pipe idx of the free pipe + */ +int recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool); + +/* * Look for a free pipe in new resource context that is used as a secondary DPP * pipe in any MPCC combine in current resource context. * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise @@ -557,9 +569,6 @@ void update_audio_usage( unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format); -void get_audio_check(struct audio_info *aud_modes, - struct audio_check *aud_chk); - bool get_temp_dp_link_res(struct dc_link *link, struct link_resource *link_res, struct dc_link_settings *link_settings); @@ -606,5 +615,4 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc, struct pipe_ctx *pipe_ctx); bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream); - #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 007ee32c20..3cbfbf8d10 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -1279,86 +1279,6 @@ static void remove_stream_from_alloc_table( } } -static enum dc_status deallocate_mst_payload_with_temp_drm_wa( - struct pipe_ctx *pipe_ctx) -{ - struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->link; - struct dc_dp_mst_stream_allocation_table proposed_table = {0}; - struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0); - int i; - bool mst_mode = (link->type == dc_connection_mst_branch); - /* adjust for drm changes*/ - const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); - const struct dc_link_settings empty_link_settings = {0}; - DC_LOGGER_INIT(link->ctx->logger); - - if (link_hwss->ext.set_throttled_vcp_size) - link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); - if (link_hwss->ext.set_hblank_min_symbol_width) - link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, - &empty_link_settings, - avg_time_slots_per_mtp); - - if (dm_helpers_dp_mst_write_payload_allocation_table( - stream->ctx, - stream, - &proposed_table, - false)) - update_mst_stream_alloc_table( - link, - pipe_ctx->stream_res.stream_enc, - pipe_ctx->stream_res.hpo_dp_stream_enc, - &proposed_table); - else - DC_LOG_WARNING("Failed to update" - "MST allocation table for" - "pipe idx:%d\n", - pipe_ctx->pipe_idx); - - DC_LOG_MST("%s" - "stream_count: %d: ", - __func__, - link->mst_stream_alloc_table.stream_count); - - for (i = 0; i < MAX_CONTROLLER_NUM; i++) { - DC_LOG_MST("stream_enc[%d]: %p " - "stream[%d].hpo_dp_stream_enc: %p " - "stream[%d].vcp_id: %d " - "stream[%d].slot_count: %d\n", - i, - (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, - i, - (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, - i, - link->mst_stream_alloc_table.stream_allocations[i].vcp_id, - i, - link->mst_stream_alloc_table.stream_allocations[i].slot_count); - } - - if (link_hwss->ext.update_stream_allocation_table == NULL || - link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { - DC_LOG_DEBUG("Unknown encoding format\n"); - return DC_ERROR_UNEXPECTED; - } - - link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, - &link->mst_stream_alloc_table); - - if (mst_mode) { - dm_helpers_dp_mst_poll_for_allocation_change_trigger( - stream->ctx, - stream); - } - - dm_helpers_dp_mst_send_payload_allocation( - stream->ctx, - stream, - false); - - return DC_OK; -} - static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; @@ -1371,9 +1291,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) const struct dc_link_settings empty_link_settings = {0}; DC_LOGGER_INIT(link->ctx->logger); - if (link->dc->debug.temp_mst_deallocation_sequence) - return deallocate_mst_payload_with_temp_drm_wa(pipe_ctx); - /* deallocate_mst_payload is called before disable link. When mode or * disable/enable monitor, new stream is created which is not in link * stream[] yet. For this, payload is not allocated yet, so de-alloc @@ -1446,16 +1363,14 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, &link->mst_stream_alloc_table); - if (mst_mode) { + if (mst_mode) dm_helpers_dp_mst_poll_for_allocation_change_trigger( stream->ctx, stream); - dm_helpers_dp_mst_send_payload_allocation( - stream->ctx, - stream, - false); - } + dm_helpers_dp_mst_update_mst_mgr_for_deallocation( + stream->ctx, + stream); return DC_OK; } @@ -1536,12 +1451,10 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) stream->ctx, stream); - if (ret != ACT_LINK_LOST) { + if (ret != ACT_LINK_LOST) dm_helpers_dp_mst_send_payload_allocation( stream->ctx, - stream, - true); - } + stream); /* slot X.Y for only current stream */ pbn_per_slot = get_pbn_per_slot(stream); @@ -1801,8 +1714,7 @@ enum dc_status link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in /* send ALLOCATE_PAYLOAD sideband message with updated pbn */ dm_helpers_dp_mst_send_payload_allocation( stream->ctx, - stream, - true); + stream); /* notify immediate branch device table update */ if (dm_helpers_dp_mst_write_payload_allocation_table( @@ -1931,8 +1843,7 @@ enum dc_status link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_ /* send ALLOCATE_PAYLOAD sideband message with updated pbn */ dm_helpers_dp_mst_send_payload_allocation( stream->ctx, - stream, - true); + stream); } /* increase throttled vcp size */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c index 41f8230f94..cf22b8f28b 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -213,8 +213,10 @@ static void construct_link_service_edp_panel_control(struct link_service *link_s link_srv->edp_get_replay_state = edp_get_replay_state; link_srv->edp_set_replay_allow_active = edp_set_replay_allow_active; link_srv->edp_setup_replay = edp_setup_replay; + link_srv->edp_send_replay_cmd = edp_send_replay_cmd; link_srv->edp_set_coasting_vtotal = edp_set_coasting_vtotal; link_srv->edp_replay_residency = edp_replay_residency; + link_srv->edp_set_replay_power_opt_and_coasting_vtotal = edp_set_replay_power_opt_and_coasting_vtotal; link_srv->edp_wait_for_t12 = edp_wait_for_t12; link_srv->edp_is_ilr_optimization_required = diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h index 4a954317d0..595fb05946 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.h +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h @@ -25,6 +25,7 @@ #ifndef __LINK_VALIDATION_H__ #define __LINK_VALIDATION_H__ #include "link.h" + enum dc_status link_validate_mode_timing( const struct dc_stream_state *stream, struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index 2f11eaabbe..289f5d1333 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -412,12 +412,18 @@ static enum dc_link_rate get_cable_max_link_rate(struct dc_link *link) { enum dc_link_rate cable_max_link_rate = LINK_RATE_UNKNOWN; - if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20) + if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20) { cable_max_link_rate = LINK_RATE_UHBR20; - else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY) + } else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY) { cable_max_link_rate = LINK_RATE_UHBR13_5; - else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10) - cable_max_link_rate = LINK_RATE_UHBR10; + } else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10) { + // allow DP40 cables to do UHBR13.5 for passive or unknown cable type + if (link->dpcd_caps.cable_id.bits.CABLE_TYPE < 2) { + cable_max_link_rate = LINK_RATE_UHBR13_5; + } else { + cable_max_link_rate = LINK_RATE_UHBR10; + } + } return cable_max_link_rate; } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c index 982eda3c46..6af42ba988 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c @@ -82,25 +82,33 @@ bool dpia_query_hpd_status(struct dc_link *link) { union dmub_rb_cmd cmd = {0}; struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv; - bool is_hpd_high = false; /* prepare QUERY_HPD command */ cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE; cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1; cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA; - /* Return HPD status reported by DMUB if query successfully executed. */ - if (dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && - cmd.query_hpd.data.status == AUX_RET_SUCCESS) - is_hpd_high = cmd.query_hpd.data.result; - - DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n", - __func__, - link->link_index, - link->link_id.enum_id - ENUM_ID_1, - cmd.query_hpd.data.status, - cmd.query_hpd.data.result); - - return is_hpd_high; + /* Query dpia hpd status from dmub */ + if (dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, + DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && + cmd.query_hpd.data.status == AUX_RET_SUCCESS) { + DC_LOG_DEBUG("%s: for link(%d) dpia(%d) success, current_hpd_status(%d) new_hpd_status(%d)\n", + __func__, + link->link_index, + link->link_id.enum_id - ENUM_ID_1, + link->hpd_status, + cmd.query_hpd.data.result); + link->hpd_status = cmd.query_hpd.data.result; + } else { + DC_LOG_ERROR("%s: for link(%d) dpia(%d) failed with status(%d), current_hpd_status(%d) new_hpd_status(0)\n", + __func__, + link->link_index, + link->link_id.enum_id - ENUM_ID_1, + cmd.query_hpd.data.status, + link->hpd_status); + link->hpd_status = false; + } + + return link->hpd_status; } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c index 9eadc2c7f2..ba69874be5 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c @@ -265,7 +265,7 @@ void dp_handle_link_loss(struct dc_link *link) for (i = count - 1; i >= 0; i--) { // Always use max settings here for DP 1.4a LL Compliance CTS - if (link->is_automated) { + if (link->skip_fallback_on_link_loss) { pipes[i]->link_config.dp_link_settings.lane_count = link->verified_link_cap.lane_count; pipes[i]->link_config.dp_link_settings.link_rate = @@ -404,7 +404,9 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link, if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { // Workaround for DP 1.4a LL Compliance CTS as USB4 has to share encoders unlike DP and USBC - link->is_automated = true; + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + link->skip_fallback_on_link_loss = true; + device_service_clear.bits.AUTOMATED_TEST = 1; core_link_write_dpcd( link, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c index 4f4e899e5c..5d36bab002 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c @@ -619,7 +619,7 @@ static enum link_training_result dpia_training_eq_non_transparent( uint32_t retries_eq = 0; enum dc_status status; enum dc_dp_training_pattern tr_pattern; - uint32_t wait_time_microsec; + uint32_t wait_time_microsec = 0; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_align_status_updated dpcd_lane_status_updated = {0}; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; @@ -811,7 +811,7 @@ static enum link_training_result dpia_training_eq_transparent( /* Take into consideration corner case for DP 1.4a LL Compliance CTS as USB4 * has to share encoders unlike DP and USBC */ - if (dp_is_interlane_aligned(dpcd_lane_status_updated) || (link->is_automated && retries_eq)) { + if (dp_is_interlane_aligned(dpcd_lane_status_updated) || (link->skip_fallback_on_link_loss && retries_eq)) { result = LINK_TRAINING_SUCCESS; break; } @@ -1037,7 +1037,7 @@ enum link_training_result dpia_perform_link_training( */ if (result == LINK_TRAINING_SUCCESS) { fsleep(5000); - if (!link->is_automated) + if (!link->skip_fallback_on_link_loss) result = dp_check_link_loss_status(link, <_settings); } else if (result == LINK_TRAINING_ABORT) dpia_training_abort(link, <_settings, repeater_id); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c index 68096d12f5..7087cdc9e9 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c @@ -205,6 +205,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18}; const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03}; const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06}; + const uint8_t vendor_lttpr_write_data_dpmf[4] = {0x1, 0x6, 0x70, 0x87}; enum link_training_result status = LINK_TRAINING_SUCCESS; uint8_t lane = 0; union down_spread_ctrl downspread = {0}; @@ -293,6 +294,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( DP_DOWNSPREAD_CTRL, lt_settings->link_settings.link_spread); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_dpmf[0], + sizeof(vendor_lttpr_write_data_dpmf)); + if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) { link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1)); @@ -552,6 +557,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18}; const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03}; const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06}; + const uint8_t vendor_lttpr_write_data_dpmf[4] = {0x1, 0x6, 0x70, 0x87}; enum link_training_result status = LINK_TRAINING_SUCCESS; uint8_t lane = 0; union down_spread_ctrl downspread = {0}; @@ -639,6 +645,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( DP_DOWNSPREAD_CTRL, lt_settings->link_settings.link_spread); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_dpmf[0], + sizeof(vendor_lttpr_write_data_dpmf)); + if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) { link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1)); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index 7c4a93d3cd..d01b77fb98 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -529,6 +529,9 @@ bool edp_set_backlight_level(const struct dc_link *link, if (dc_is_embedded_signal(link->connector_signal)) { struct pipe_ctx *pipe_ctx = get_pipe_from_link(link); + if (link->panel_cntl) + link->panel_cntl->stored_backlight_registers.USER_LEVEL = backlight_pwm_u16_16; + if (pipe_ctx) { /* Disable brightness ramping when the display is blanked * as it can hang the DMCU @@ -1001,7 +1004,37 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream return true; } -bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal) +/* + * This is general Interface for Replay to set an 32 bit variable to dmub + * replay_FW_Message_type: Indicates which instruction or variable pass to DMUB + * cmd_data: Value of the config. + */ +bool edp_send_replay_cmd(struct dc_link *link, + enum replay_FW_Message_type msg, + union dmub_replay_cmd_set *cmd_data) +{ + struct dc *dc = link->ctx->dc; + struct dmub_replay *replay = dc->res_pool->replay; + unsigned int panel_inst; + + if (!replay) + return false; + + DC_LOGGER_INIT(link->ctx->logger); + + if (dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + cmd_data->panel_inst = panel_inst; + else { + DC_LOG_DC("%s(): get edp panel inst fail ", __func__); + return false; + } + + replay->funcs->replay_send_cmd(replay, msg, cmd_data); + + return true; +} + +bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal) { struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; @@ -1039,6 +1072,33 @@ bool edp_replay_residency(const struct dc_link *link, return true; } +bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link, + const unsigned int *power_opts, uint32_t coasting_vtotal) +{ + struct dc *dc = link->ctx->dc; + struct dmub_replay *replay = dc->res_pool->replay; + unsigned int panel_inst; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + /* Only both power and coasting vtotal changed, this func could return true */ + if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts && + coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) { + if (link->replay_settings.replay_feature_enabled && + replay->funcs->replay_set_power_opt_and_coasting_vtotal) { + replay->funcs->replay_set_power_opt_and_coasting_vtotal(replay, + *power_opts, panel_inst, coasting_vtotal); + link->replay_settings.replay_power_opt_active = *power_opts; + link->replay_settings.coasting_vtotal = coasting_vtotal; + } else + return false; + } else + return false; + + return true; +} + static struct abm *get_abm_from_stream_res(const struct dc_link *link) { int i; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h index a034288ad7..a158c6234d 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -56,10 +56,15 @@ bool edp_set_replay_allow_active(struct dc_link *dc_link, const bool *enable, bool wait, bool force_static, const unsigned int *power_opts); bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream); -bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal); +bool edp_send_replay_cmd(struct dc_link *link, + enum replay_FW_Message_type msg, + union dmub_replay_cmd_set *cmd_data); +bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal); bool edp_replay_residency(const struct dc_link *link, unsigned int *residency, const bool is_start, const bool is_alpm); bool edp_get_replay_state(const struct dc_link *link, uint64_t *state); +bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link, + const unsigned int *power_opts, uint32_t coasting_vtotal); bool edp_wait_for_t12(struct dc_link *link); bool edp_is_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing); diff --git a/drivers/gpu/drm/amd/display/dc/optc/Makefile b/drivers/gpu/drm/amd/display/dc/optc/Makefile new file mode 100644 index 0000000000..bb213335fb --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/optc/Makefile @@ -0,0 +1,108 @@ + +# Copyright 2022 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# Makefile for the 'optc' sub-component of DAL. +# + + +ifdef CONFIG_DRM_AMD_DC_FP +############################################################################### +# DCN +############################################################################### + +OPTC_DCN10 = dcn10_optc.o + +AMD_DAL_OPTC_DCN10 = $(addprefix $(AMDDALPATH)/dc/optc/dcn10/,$(OPTC_DCN10)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN10) + +############################################################################### + +OPTC_DCN20 = dcn20_optc.o + +AMD_DAL_OPTC_DCN20 = $(addprefix $(AMDDALPATH)/dc/optc/dcn20/,$(OPTC_DCN20)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN20) + +############################################################################### + +OPTC_DCN201 = dcn201_optc.o + +AMD_DAL_OPTC_DCN201 = $(addprefix $(AMDDALPATH)/dc/optc/dcn201/,$(OPTC_DCN201)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN201) + +############################################################################### + +############################################################################### + +############################################################################### + +OPTC_DCN30 = dcn30_optc.o + +AMD_DAL_OPTC_DCN30 = $(addprefix $(AMDDALPATH)/dc/optc/dcn30/,$(OPTC_DCN30)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN30) + +############################################################################### + +OPTC_DCN301 = dcn301_optc.o + +AMD_DAL_OPTC_DCN301 = $(addprefix $(AMDDALPATH)/dc/optc/dcn301/,$(OPTC_DCN301)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN301) + +############################################################################### + +OPTC_DCN31 = dcn31_optc.o + +AMD_DAL_OPTC_DCN31 = $(addprefix $(AMDDALPATH)/dc/optc/dcn31/,$(OPTC_DCN31)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN31) + +############################################################################### + +OPTC_DCN314 = dcn314_optc.o + +AMD_DAL_OPTC_DCN314 = $(addprefix $(AMDDALPATH)/dc/optc/dcn314/,$(OPTC_DCN314)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN314) + +############################################################################### + +OPTC_DCN32 = dcn32_optc.o + +AMD_DAL_OPTC_DCN32 = $(addprefix $(AMDDALPATH)/dc/optc/dcn32/,$(OPTC_DCN32)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN32) + +############################################################################### + +OPTC_DCN35 = dcn35_optc.o + +AMD_DAL_OPTC_DCN35 = $(addprefix $(AMDDALPATH)/dc/optc/dcn35/,$(OPTC_DCN35)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN35) + +############################################################################### + +############################################################################### +endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c index 0e8f4f36c8..0e8f4f36c8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h index ab81594a7f..6c2e84d396 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h @@ -557,7 +557,8 @@ struct dcn_optc_registers { type OTG_CRC_DATA_STREAM_SPLIT_MODE;\ type OTG_CRC_DATA_FORMAT;\ type OTG_V_TOTAL_LAST_USED_BY_DRR;\ - type OTG_DRR_TIMING_DBUF_UPDATE_PENDING; + type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;\ + type OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING; #define TG_REG_FIELD_LIST_DCN3_2(type) \ type OTG_H_TIMING_DIV_MODE_MANUAL; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c index 58bdbd859b..58bdbd859b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h index f7968b9ca1..c2e03ced39 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h @@ -26,7 +26,7 @@ #ifndef __DC_OPTC_DCN20_H__ #define __DC_OPTC_DCN20_H__ -#include "../dcn10/dcn10_optc.h" +#include "dcn10/dcn10_optc.h" #define TG_COMMON_REG_LIST_DCN2_0(inst) \ TG_COMMON_REG_LIST_DCN(inst),\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c index 70fcbec03f..70fcbec03f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.h index e9545b7351..e9545b7351 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c index b97bdb868a..b97bdb868a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h index d3a056c12b..d3a056c12b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c index b3cfcb8879..b3cfcb8879 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.h index b49585682a..b49585682a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c index 63a677c8ee..63a677c8ee 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h index 30b81a448c..30b81a448c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c index 0086cafb0f..0086cafb0f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h index 99c098e761..99c098e761 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c index 8234935433..52eab8fccb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c @@ -122,6 +122,13 @@ void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combi } } +void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg) +{ + struct optc *optc1 = DCN10TG_FROM_TG(tg); + + REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING, 0, 2, 50000); +} + void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode) { struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -260,9 +267,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc) OTG_V_TOTAL_MAX_SEL, 1, OTG_FORCE_LOCK_ON_EVENT, 0, OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */ - - // Setup manual flow control for EOF via TRIG_A - optc->funcs->setup_manual_trigger(optc); } } @@ -345,6 +349,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = { .set_odm_bypass = optc32_set_odm_bypass, .set_odm_combine = optc32_set_odm_combine, .get_odm_combine_segments = optc32_get_odm_combine_segments, + .wait_odm_doublebuffer_pending_clear = optc32_wait_odm_doublebuffer_pending_clear, .set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode, .get_optc_source = optc2_get_optc_source, .set_out_mux = optc3_set_out_mux, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h index 8ce3b178ca..0c2c146955 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h @@ -183,5 +183,6 @@ void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool man void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combine_segments); void optc32_set_odm_bypass(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); +void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg); #endif /* __DC_OPTC_DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c index 5b15475088..5b15475088 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h index 1f422e4c46..1f422e4c46 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h diff --git a/drivers/gpu/drm/amd/display/dc/resource/Makefile b/drivers/gpu/drm/amd/display/dc/resource/Makefile new file mode 100644 index 0000000000..0a75ed8962 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/Makefile @@ -0,0 +1,199 @@ + +# Copyright 2022 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# Makefile for the 'resource' sub-component of DAL. +# + + +############################################################################### +# DCE +############################################################################### + +RESOURCE_DCE100 = dce100_resource.o + +AMD_DAL_RESOURCE_DCE100 = $(addprefix $(AMDDALPATH)/dc/resource/dce100/,$(RESOURCE_DCE100)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE100) + +############################################################################### + +RESOURCE_DCE110 = dce110_resource.o + +AMD_DAL_RESOURCE_DCE110 = $(addprefix $(AMDDALPATH)/dc/resource/dce110/,$(RESOURCE_DCE110)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE110) + +############################################################################### + +RESOURCE_DCE112 = dce112_resource.o + +AMD_DAL_RESOURCE_DCE112 = $(addprefix $(AMDDALPATH)/dc/resource/dce112/,$(RESOURCE_DCE112)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE112) + +############################################################################### + +RESOURCE_DCE120 = dce120_resource.o + +AMD_DAL_RESOURCE_DCE120 = $(addprefix $(AMDDALPATH)/dc/resource/dce120/,$(RESOURCE_DCE120)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE120) + +############################################################################### + +RESOURCE_DCE80 = dce80_resource.o + +AMD_DAL_RESOURCE_DCE80 = $(addprefix $(AMDDALPATH)/dc/resource/dce80/,$(RESOURCE_DCE80)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE80) + +ifdef CONFIG_DRM_AMD_DC_FP +############################################################################### +# DCN +############################################################################### + +RESOURCE_DCN10 = dcn10_resource.o + +AMD_DAL_RESOURCE_DCN10 = $(addprefix $(AMDDALPATH)/dc/resource/dcn10/,$(RESOURCE_DCN10)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN10) + +############################################################################### + +RESOURCE_DCN20 = dcn20_resource.o + +AMD_DAL_RESOURCE_DCN20 = $(addprefix $(AMDDALPATH)/dc/resource/dcn20/,$(RESOURCE_DCN20)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN20) + +############################################################################### + +RESOURCE_DCN201 = dcn201_resource.o + +AMD_DAL_RESOURCE_DCN201 = $(addprefix $(AMDDALPATH)/dc/resource/dcn201/,$(RESOURCE_DCN201)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN201) + +############################################################################### + +RESOURCE_DCN21 = dcn21_resource.o + +AMD_DAL_RESOURCE_DCN21 = $(addprefix $(AMDDALPATH)/dc/resource/dcn21/,$(RESOURCE_DCN21)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN21) + +############################################################################### + +############################################################################### + +############################################################################### + +RESOURCE_DCN30 = dcn30_resource.o + +AMD_DAL_RESOURCE_DCN30 = $(addprefix $(AMDDALPATH)/dc/resource/dcn30/,$(RESOURCE_DCN30)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN30) + +############################################################################### + +RESOURCE_DCN301 = dcn301_resource.o + +AMD_DAL_RESOURCE_DCN301 = $(addprefix $(AMDDALPATH)/dc/resource/dcn301/,$(RESOURCE_DCN301)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN301) + +############################################################################### + +RESOURCE_DCN302 = dcn302_resource.o + +AMD_DAL_RESOURCE_DCN302 = $(addprefix $(AMDDALPATH)/dc/resource/dcn302/,$(RESOURCE_DCN302)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN302) + +############################################################################### + +RESOURCE_DCN303 = dcn303_resource.o + +AMD_DAL_RESOURCE_DCN303 = $(addprefix $(AMDDALPATH)/dc/resource/dcn303/,$(RESOURCE_DCN303)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN303) + +############################################################################### + +RESOURCE_DCN31 = dcn31_resource.o + +AMD_DAL_RESOURCE_DCN31 = $(addprefix $(AMDDALPATH)/dc/resource/dcn31/,$(RESOURCE_DCN31)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN31) + +############################################################################### + +RESOURCE_DCN314 = dcn314_resource.o + +AMD_DAL_RESOURCE_DCN314 = $(addprefix $(AMDDALPATH)/dc/resource/dcn314/,$(RESOURCE_DCN314)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN314) + +############################################################################### + +RESOURCE_DCN315 = dcn315_resource.o + +AMD_DAL_RESOURCE_DCN315 = $(addprefix $(AMDDALPATH)/dc/resource/dcn315/,$(RESOURCE_DCN315)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN315) + +############################################################################### + +RESOURCE_DCN316 = dcn316_resource.o + +AMD_DAL_RESOURCE_DCN316 = $(addprefix $(AMDDALPATH)/dc/resource/dcn316/,$(RESOURCE_DCN316)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN316) + +############################################################################### + +RESOURCE_DCN32 = dcn32_resource.o + +AMD_DAL_RESOURCE_DCN32 = $(addprefix $(AMDDALPATH)/dc/resource/dcn32/,$(RESOURCE_DCN32)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN32) + +############################################################################### + +RESOURCE_DCN321 = dcn321_resource.o + +AMD_DAL_RESOURCE_DCN321 = $(addprefix $(AMDDALPATH)/dc/resource/dcn321/,$(RESOURCE_DCN321)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN321) + +############################################################################### + +RESOURCE_DCN35 = dcn35_resource.o + +AMD_DAL_RESOURCE_DCN35 = $(addprefix $(AMDDALPATH)/dc/resource/dcn35/,$(RESOURCE_DCN35)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN35) + +############################################################################### + +############################################################################### + +endif diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c index 53a5f4cb64..53a5f4cb64 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h index fecab7c560..fecab7c560 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c index fe518fd27b..fe518fd27b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.h index aa4531e080..aa4531e080 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c index d1edac46c9..d1edac46c9 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h index 1f57ebc6f9..1f57ebc6f9 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c index 962de79be1..20662edd0a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c @@ -36,7 +36,7 @@ #include "dce110/dce110_resource.h" #include "virtual/virtual_stream_encoder.h" -#include "dce120_timing_generator.h" +#include "dce120/dce120_timing_generator.h" #include "irq/dce120/irq_service_dce120.h" #include "dce/dce_opp.h" #include "dce/dce_clock_source.h" diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.h index 3d1f3cf012..3d1f3cf012 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/resource/dce80/CMakeLists.txt new file mode 100644 index 0000000000..19dd73bc9a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/CMakeLists.txt @@ -0,0 +1,4 @@ +dal3_subdirectory_sources( + dce80_resource.c + dce80_resource.h + )
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c index 35a2cce0c2..35a2cce0c2 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.h index eff31ab83a..eff31ab83a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c index b94c5c97ee..d08d109692 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c @@ -26,29 +26,32 @@ #include "dm_services.h" #include "dc.h" -#include "dcn10_init.h" +#include "dcn10/dcn10_init.h" #include "resource.h" #include "include/irq_service_interface.h" -#include "dcn10_resource.h" -#include "dcn10_ipp.h" -#include "dcn10_mpc.h" +#include "dcn10/dcn10_resource.h" +#include "dcn10/dcn10_ipp.h" +#include "dcn10/dcn10_mpc.h" + +#include "dcn10/dcn10_dwb.h" + #include "irq/dcn10/irq_service_dcn10.h" -#include "dcn10_dpp.h" -#include "dcn10_optc.h" +#include "dcn10/dcn10_dpp.h" +#include "dcn10/dcn10_optc.h" #include "dcn10/dcn10_hwseq.h" #include "dce110/dce110_hwseq.h" -#include "dcn10_opp.h" -#include "dcn10_link_encoder.h" -#include "dcn10_stream_encoder.h" +#include "dcn10/dcn10_opp.h" +#include "dcn10/dcn10_link_encoder.h" +#include "dcn10/dcn10_stream_encoder.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dce112/dce112_resource.h" -#include "dcn10_hubp.h" -#include "dcn10_hubbub.h" +#include "dcn10/dcn10_hubp.h" +#include "dcn10/dcn10_hubbub.h" #include "dce/dce_panel_cntl.h" #include "soc15_hw_ip.h" @@ -1247,7 +1250,10 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link( /* Store first available for MST second display * in daisy chain use case */ - j = i; + + if (pool->stream_enc[i]->id != ENGINE_ID_VIRTUAL) + j = i; + if (link->ep_type == DISPLAY_ENDPOINT_PHY && pool->stream_enc[i]->id == link->link_enc->preferred_engine) return pool->stream_enc[i]; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h index bf8e33cd81..bf8e33cd81 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c index e73e597548..f9c5bc624b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c @@ -29,7 +29,7 @@ #include "dm_services.h" #include "dc.h" -#include "dcn20_init.h" +#include "dcn20/dcn20_init.h" #include "resource.h" #include "include/irq_service_interface.h" @@ -39,29 +39,29 @@ #include "dcn10/dcn10_hubp.h" #include "dcn10/dcn10_ipp.h" -#include "dcn20_hubbub.h" -#include "dcn20_mpc.h" -#include "dcn20_hubp.h" +#include "dcn20/dcn20_hubbub.h" +#include "dcn20/dcn20_mpc.h" +#include "dcn20/dcn20_hubp.h" #include "irq/dcn20/irq_service_dcn20.h" -#include "dcn20_dpp.h" -#include "dcn20_optc.h" +#include "dcn20/dcn20_dpp.h" +#include "dcn20/dcn20_optc.h" #include "dcn20/dcn20_hwseq.h" #include "dce110/dce110_hwseq.h" #include "dcn10/dcn10_resource.h" -#include "dcn20_opp.h" +#include "dcn20/dcn20_opp.h" -#include "dcn20_dsc.h" +#include "dcn20/dcn20_dsc.h" -#include "dcn20_link_encoder.h" -#include "dcn20_stream_encoder.h" +#include "dcn20/dcn20_link_encoder.h" +#include "dcn20/dcn20_stream_encoder.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dml/display_mode_vba.h" -#include "dcn20_dccg.h" -#include "dcn20_vmid.h" +#include "dcn20/dcn20_dccg.h" +#include "dcn20/dcn20_vmid.h" #include "dce/dce_panel_cntl.h" #include "navi10_ip_offset.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h index 4cee3fa11a..4cee3fa11a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c index bca22d8676..914b234d7f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c @@ -26,7 +26,7 @@ #include "dm_services.h" #include "dc.h" -#include "dcn201_init.h" +#include "dcn201/dcn201_init.h" #include "dml/dcn20/dcn20_fpu.h" #include "resource.h" #include "include/irq_service_interface.h" @@ -36,16 +36,16 @@ #include "dcn10/dcn10_hubp.h" #include "dcn10/dcn10_ipp.h" -#include "dcn201_mpc.h" -#include "dcn201_hubp.h" +#include "dcn201/dcn201_mpc.h" +#include "dcn201/dcn201_hubp.h" #include "irq/dcn201/irq_service_dcn201.h" #include "dcn201/dcn201_dpp.h" #include "dcn201/dcn201_hubbub.h" -#include "dcn201_dccg.h" -#include "dcn201_optc.h" +#include "dcn201/dcn201_dccg.h" +#include "dcn201/dcn201_optc.h" #include "dcn201/dcn201_hwseq.h" #include "dce110/dce110_hwseq.h" -#include "dcn201_opp.h" +#include "dcn201/dcn201_opp.h" #include "dcn201/dcn201_link_encoder.h" #include "dcn20/dcn20_stream_encoder.h" #include "dce/dce_clock_source.h" @@ -55,7 +55,7 @@ #include "dce110/dce110_resource.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" -#include "dcn201_hubbub.h" +#include "dcn201/dcn201_hubbub.h" #include "dcn10/dcn10_resource.h" #include "cyan_skillfish_ip_offset.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.h index e0467d17d4..e0467d17d4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c index 42277b2805..65d337731f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c @@ -29,7 +29,7 @@ #include "dm_services.h" #include "dc.h" -#include "dcn21_init.h" +#include "dcn21/dcn21_init.h" #include "resource.h" #include "include/irq_service_interface.h" @@ -44,7 +44,7 @@ #include "dcn20/dcn20_hubbub.h" #include "dcn20/dcn20_mpc.h" #include "dcn20/dcn20_hubp.h" -#include "dcn21_hubp.h" +#include "dcn21/dcn21_hubp.h" #include "irq/dcn21/irq_service_dcn21.h" #include "dcn20/dcn20_dpp.h" #include "dcn20/dcn20_optc.h" @@ -61,7 +61,7 @@ #include "dml/display_mode_vba.h" #include "dcn20/dcn20_dccg.h" #include "dcn21/dcn21_dccg.h" -#include "dcn21_hubbub.h" +#include "dcn21/dcn21_hubbub.h" #include "dcn10/dcn10_resource.h" #include "dce/dce_panel_cntl.h" @@ -713,9 +713,8 @@ static void dcn21_resource_destruct(struct dcn21_resource_pool *pool) pool->base.hubps[i] = NULL; } - if (pool->base.irqs != NULL) { + if (pool->base.irqs != NULL) dal_irq_service_destroy(&pool->base.irqs); - } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h index f7ecc002c2..f7ecc002c2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c index 7b259cb5f4..37a64186f3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c @@ -27,7 +27,7 @@ #include "dm_services.h" #include "dc.h" -#include "dcn30_init.h" +#include "dcn30/dcn30_init.h" #include "resource.h" #include "include/irq_service_interface.h" @@ -1682,6 +1682,7 @@ noinline bool dcn30_internal_validate_bw( * We don't actually support prefetch mode 2, so require that we * at least support prefetch mode 1. */ + context->bw_ctx.dml.validate_max_state = fast_validate; context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = dm_allow_self_refresh; @@ -1691,6 +1692,7 @@ noinline bool dcn30_internal_validate_bw( memset(merge, 0, sizeof(merge)); vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); } + context->bw_ctx.dml.validate_max_state = false; } dml_log_mode_support_params(&context->bw_ctx.dml); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h index 8e6b8b7368..8e6b8b7368 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c index ce04caf35d..7538b548c5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c @@ -27,7 +27,7 @@ #include "dm_services.h" #include "dc.h" -#include "dcn301_init.h" +#include "dcn301/dcn301_init.h" #include "resource.h" #include "include/irq_service_interface.h" @@ -61,7 +61,7 @@ #include "dcn10/dcn10_resource.h" #include "dcn30/dcn30_dio_stream_encoder.h" #include "dcn301/dcn301_dio_link_encoder.h" -#include "dcn301_panel_cntl.h" +#include "dcn301/dcn301_panel_cntl.h" #include "vangogh_ip_offset.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.h index ae8672680c..ae8672680c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c index 63ac984a04..5791b5cc28 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c @@ -23,9 +23,9 @@ * */ -#include "dcn302_init.h" +#include "dcn302/dcn302_init.h" #include "dcn302_resource.h" -#include "dcn302_dccg.h" +#include "dcn302/dcn302_dccg.h" #include "irq/dcn302/irq_service_dcn302.h" #include "dcn30/dcn30_dio_link_encoder.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.h index 9f24e73b92..9f24e73b92 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c index 49cb7fde41..25cd6236b0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c @@ -23,9 +23,9 @@ * Authors: AMD */ -#include "dcn303_init.h" +#include "dcn303/dcn303_init.h" #include "dcn303_resource.h" -#include "dcn303_dccg.h" +#include "dcn303/dcn303_dccg.h" #include "irq/dcn303/irq_service_dcn303.h" #include "dcn30/dcn30_dio_link_encoder.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.h index 37cf152582..37cf152582 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index 79416cfb22..31035fc3d8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -70,7 +70,7 @@ #include "dml/dcn31/dcn31_fpu.h" #include "dcn31/dcn31_dccg.h" #include "dcn10/dcn10_resource.h" -#include "dcn31_panel_cntl.h" +#include "dcn31/dcn31_panel_cntl.h" #include "dcn30/dcn30_dwb.h" #include "dcn30/dcn30_mmhubbub.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h index 901436591e..901436591e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c index c97391edb5..c97391edb5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h index 49ffe71018..49ffe71018 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c index cb8024eee8..515ba435f7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c @@ -1631,8 +1631,10 @@ static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context) int i; struct resource_context *res_ctx = &context->res_ctx; - /*Don't apply for single stream*/ - if (context->stream_count < 2) + /* Only apply for dual stream scenarios with edp*/ + if (context->stream_count != 2) + return false; + if (context->streams[0]->signal != SIGNAL_TYPE_EDP && context->streams[1]->signal != SIGNAL_TYPE_EDP) return false; for (i = 0; i < dc->res_pool->pipe_count; i++) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.h index 22849eaa6f..22849eaa6f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c index b9753d4606..b9753d4606 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.h index aba6d63413..aba6d63413 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index f663de1cdc..9042378fa8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -27,7 +27,7 @@ #include "dm_services.h" #include "dc.h" -#include "dcn32_init.h" +#include "dcn32/dcn32_init.h" #include "resource.h" #include "include/irq_service_interface.h" @@ -41,7 +41,7 @@ #include "dcn31/dcn31_hubbub.h" #include "dcn32/dcn32_hubbub.h" #include "dcn32/dcn32_mpc.h" -#include "dcn32_hubp.h" +#include "dcn32/dcn32_hubp.h" #include "irq/dcn32/irq_service_dcn32.h" #include "dcn32/dcn32_dpp.h" #include "dcn32/dcn32_optc.h" @@ -89,6 +89,8 @@ #include "dcn20/dcn20_vmid.h" #include "dml/dcn32/dcn32_fpu.h" +#include "dc_state_priv.h" + #include "dml2/dml2_wrapper.h" #define DC_LOGGER_INIT(logger) @@ -1644,7 +1646,7 @@ static void dcn32_enable_phantom_plane(struct dc *dc, if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state) phantom_plane = prev_phantom_plane; else - phantom_plane = dc_create_plane_state(dc); + phantom_plane = dc_state_create_phantom_plane(dc, context, curr_pipe->plane_state); memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address)); memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality, @@ -1665,9 +1667,7 @@ static void dcn32_enable_phantom_plane(struct dc *dc, phantom_plane->clip_rect.y = 0; phantom_plane->clip_rect.height = phantom_stream->src.height; - phantom_plane->is_phantom = true; - - dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context); + dc_state_add_phantom_plane(dc, phantom_stream, phantom_plane, context); curr_pipe = curr_pipe->bottom_pipe; prev_phantom_plane = phantom_plane; @@ -1683,13 +1683,7 @@ static struct dc_stream_state *dcn32_enable_phantom_stream(struct dc *dc, struct dc_stream_state *phantom_stream = NULL; struct pipe_ctx *ref_pipe = &context->res_ctx.pipe_ctx[dc_pipe_idx]; - phantom_stream = dc_create_stream_for_sink(ref_pipe->stream->sink); - phantom_stream->signal = SIGNAL_TYPE_VIRTUAL; - phantom_stream->dpms_off = true; - phantom_stream->mall_stream_config.type = SUBVP_PHANTOM; - phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream; - ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN; - ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream; + phantom_stream = dc_state_create_phantom_stream(dc, context, ref_pipe->stream); /* stream has limited viewport and small timing */ memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing)); @@ -1699,81 +1693,10 @@ static struct dc_stream_state *dcn32_enable_phantom_stream(struct dc *dc, dcn32_set_phantom_stream_timing(dc, context, ref_pipe, phantom_stream, pipes, pipe_cnt, dc_pipe_idx); DC_FP_END(); - dc_add_stream_to_ctx(dc, context, phantom_stream); + dc_state_add_phantom_stream(dc, context, phantom_stream, ref_pipe->stream); return phantom_stream; } -void dcn32_retain_phantom_pipes(struct dc *dc, struct dc_state *context) -{ - int i; - struct dc_plane_state *phantom_plane = NULL; - struct dc_stream_state *phantom_stream = NULL; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - - if (resource_is_pipe_type(pipe, OTG_MASTER) && - resource_is_pipe_type(pipe, DPP_PIPE) && - pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - phantom_plane = pipe->plane_state; - phantom_stream = pipe->stream; - - dc_plane_state_retain(phantom_plane); - dc_stream_retain(phantom_stream); - } - } -} - -// return true if removed piped from ctx, false otherwise -bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context, bool fast_update) -{ - int i; - bool removed_pipe = false; - struct dc_plane_state *phantom_plane = NULL; - struct dc_stream_state *phantom_stream = NULL; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - // build scaling params for phantom pipes - if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - phantom_plane = pipe->plane_state; - phantom_stream = pipe->stream; - - dc_rem_all_planes_for_stream(dc, pipe->stream, context); - dc_remove_stream_from_ctx(dc, context, pipe->stream); - - /* Ref count is incremented on allocation and also when added to the context. - * Therefore we must call release for the the phantom plane and stream once - * they are removed from the ctx to finally decrement the refcount to 0 to free. - */ - dc_plane_state_release(phantom_plane); - dc_stream_release(phantom_stream); - - removed_pipe = true; - } - - /* For non-full updates, a shallow copy of the current state - * is created. In this case we don't want to erase the current - * state (there can be 2 HIRQL threads, one in flip, and one in - * checkMPO) that can cause a race condition. - * - * This is just a workaround, needs a proper fix. - */ - if (!fast_update) { - // Clear all phantom stream info - if (pipe->stream) { - pipe->stream->mall_stream_config.type = SUBVP_NONE; - pipe->stream->mall_stream_config.paired_stream = NULL; - } - - if (pipe->plane_state) { - pipe->plane_state->is_phantom = false; - } - } - } - return removed_pipe; -} - /* TODO: Input to this function should indicate which pipe indexes (or streams) * require a phantom pipe / stream */ @@ -1798,7 +1721,7 @@ void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context, // We determine which phantom pipes were added by comparing with // the phantom stream. if (pipe->plane_state && pipe->stream && pipe->stream == phantom_stream && - pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { pipe->stream->use_dynamic_meta = false; pipe->plane_state->flip_immediate = false; if (!resource_build_scaling_params(pipe)) { @@ -1817,7 +1740,6 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val int vlevel = 0; int pipe_cnt = 0; display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); - struct mall_temp_config mall_temp_config; /* To handle Freesync properly, setting FreeSync DML parameters * to its default state for the first stage of validation @@ -1827,29 +1749,12 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val DC_LOGGER_INIT(dc->ctx->logger); - /* For fast validation, there are situations where a shallow copy of - * of the dc->current_state is created for the validation. In this case - * we want to save and restore the mall config because we always - * teardown subvp at the beginning of validation (and don't attempt - * to add it back if it's fast validation). If we don't restore the - * subvp config in cases of fast validation + shallow copy of the - * dc->current_state, the dc->current_state will have a partially - * removed subvp state when we did not intend to remove it. - */ - if (fast_validate) { - memset(&mall_temp_config, 0, sizeof(mall_temp_config)); - dcn32_save_mall_state(dc, context, &mall_temp_config); - } - BW_VAL_TRACE_COUNT(); DC_FP_START(); out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); DC_FP_END(); - if (fast_validate) - dcn32_restore_mall_state(dc, context, &mall_temp_config); - if (pipe_cnt == 0) goto validate_out; @@ -1948,7 +1853,7 @@ int dcn32_populate_dml_pipes_from_context( * This is just a workaround -- needs a proper fix. */ if (!fast_validate) { - switch (pipe->stream->mall_stream_config.type) { + switch (dc_state_get_pipe_subvp_type(context, pipe)) { case SUBVP_MAIN: pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_sub_viewport; subvp_in_use = true; @@ -2054,10 +1959,6 @@ static struct resource_funcs dcn32_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .add_phantom_pipes = dcn32_add_phantom_pipes, - .remove_phantom_pipes = dcn32_remove_phantom_pipes, - .retain_phantom_pipes = dcn32_retain_phantom_pipes, - .save_mall_state = dcn32_save_mall_state, - .restore_mall_state = dcn32_restore_mall_state, .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params, }; @@ -2471,16 +2372,19 @@ static bool dcn32_resource_construct( dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; dc->dml2_options.svp_pstate.callbacks.dc = dc; - dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context; - dc->dml2_options.svp_pstate.callbacks.add_stream_to_ctx = &dc_add_stream_to_ctx; + dc->dml2_options.svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane; + dc->dml2_options.svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream; dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params; - dc->dml2_options.svp_pstate.callbacks.create_plane = &dc_create_plane_state; - dc->dml2_options.svp_pstate.callbacks.remove_plane_from_context = &dc_remove_plane_from_context; - dc->dml2_options.svp_pstate.callbacks.remove_stream_from_ctx = &dc_remove_stream_from_ctx; - dc->dml2_options.svp_pstate.callbacks.create_stream_for_sink = &dc_create_stream_for_sink; - dc->dml2_options.svp_pstate.callbacks.plane_state_release = &dc_plane_state_release; - dc->dml2_options.svp_pstate.callbacks.stream_release = &dc_stream_release; + dc->dml2_options.svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane; + dc->dml2_options.svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane; + dc->dml2_options.svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream; + dc->dml2_options.svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream; + dc->dml2_options.svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane; + dc->dml2_options.svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream; dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc; + dc->dml2_options.svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type; + dc->dml2_options.svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type; + dc->dml2_options.svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream; dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us; dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h index 351c8a2843..2258c5c721 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h @@ -39,6 +39,7 @@ #define DCN3_2_MBLK_HEIGHT_8BPE 64 #define DCN3_2_DCFCLK_DS_INIT_KHZ 10000 // Choose 10Mhz for init DCFCLK DS freq #define SUBVP_HIGH_REFRESH_LIST_LEN 4 +#define SUBVP_ACTIVE_MARGIN_LIST_LEN 2 #define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800 #define DCN3_2_VMIN_DISPCLK_HZ 717000000 #define MIN_SUBVP_DCFCLK_KHZ 400000 @@ -58,6 +59,15 @@ struct subvp_high_refresh_list { } res[SUBVP_HIGH_REFRESH_LIST_LEN]; }; +struct subvp_active_margin_list { + int min_refresh; + int max_refresh; + struct { + int width; + int height; + } res[SUBVP_ACTIVE_MARGIN_LIST_LEN]; +}; + struct dcn32_resource_pool { struct resource_pool base; }; @@ -82,12 +92,6 @@ bool dcn32_release_post_bldn_3dlut( struct dc_3dlut **lut, struct dc_transfer_func **shaper); -bool dcn32_remove_phantom_pipes(struct dc *dc, - struct dc_state *context, bool fast_update); - -void dcn32_retain_phantom_pipes(struct dc *dc, - struct dc_state *context); - void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, @@ -160,15 +164,7 @@ void dcn32_determine_det_override(struct dc *dc, void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes); -void dcn32_save_mall_state(struct dc *dc, - struct dc_state *context, - struct mall_temp_config *temp_config); - -void dcn32_restore_mall_state(struct dc *dc, - struct dc_state *context, - struct mall_temp_config *temp_config); - -struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, const struct dc_state *context); +struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context); bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe); diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index 3b7505b5f0..f4dd6443a3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -63,7 +63,7 @@ #include "dcn31/dcn31_apg.h" #include "dcn31/dcn31_dio_link_encoder.h" #include "dcn32/dcn32_dio_link_encoder.h" -#include "dcn321_dio_link_encoder.h" +#include "dcn321/dcn321_dio_link_encoder.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" @@ -92,6 +92,8 @@ #include "vm_helper.h" #include "dcn20/dcn20_vmid.h" +#include "dc_state_priv.h" + #define DC_LOGGER_INIT(logger) enum dcn321_clk_src_array_id { @@ -1607,10 +1609,6 @@ static struct resource_funcs dcn321_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .add_phantom_pipes = dcn32_add_phantom_pipes, - .remove_phantom_pipes = dcn32_remove_phantom_pipes, - .retain_phantom_pipes = dcn32_retain_phantom_pipes, - .save_mall_state = dcn32_save_mall_state, - .restore_mall_state = dcn32_restore_mall_state, .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params, }; @@ -2010,16 +2008,19 @@ static bool dcn321_resource_construct( dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; dc->dml2_options.svp_pstate.callbacks.dc = dc; - dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context; - dc->dml2_options.svp_pstate.callbacks.add_stream_to_ctx = &dc_add_stream_to_ctx; + dc->dml2_options.svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane; + dc->dml2_options.svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream; dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params; - dc->dml2_options.svp_pstate.callbacks.create_plane = &dc_create_plane_state; - dc->dml2_options.svp_pstate.callbacks.remove_plane_from_context = &dc_remove_plane_from_context; - dc->dml2_options.svp_pstate.callbacks.remove_stream_from_ctx = &dc_remove_stream_from_ctx; - dc->dml2_options.svp_pstate.callbacks.create_stream_for_sink = &dc_create_stream_for_sink; - dc->dml2_options.svp_pstate.callbacks.plane_state_release = &dc_plane_state_release; - dc->dml2_options.svp_pstate.callbacks.stream_release = &dc_stream_release; + dc->dml2_options.svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane; + dc->dml2_options.svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane; + dc->dml2_options.svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream; + dc->dml2_options.svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream; + dc->dml2_options.svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane; + dc->dml2_options.svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream; dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc; + dc->dml2_options.svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type; + dc->dml2_options.svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type; + dc->dml2_options.svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream; dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us; dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us; diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.h index 82cbf009f2..82cbf009f2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index 70ef1e7ff8..78c315541f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -78,7 +78,7 @@ #include "dcn10/dcn10_resource.h" #include "dcn31/dcn31_panel_cntl.h" #include "dcn35/dcn35_hwseq.h" -#include "dcn35_dio_link_encoder.h" +#include "dcn35/dcn35_dio_link_encoder.h" #include "dml/dcn31/dcn31_fpu.h" /*todo*/ #include "dml/dcn35/dcn35_fpu.h" #include "dcn35/dcn35_dwb.h" @@ -96,12 +96,15 @@ #include "reg_helper.h" #include "dce/dmub_abm.h" #include "dce/dmub_psr.h" +#include "dce/dmub_replay.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" #include "dml/dcn31/display_mode_vba_31.h" /*temp*/ #include "vm_helper.h" #include "dcn20/dcn20_vmid.h" +#include "dc_state_priv.h" + #include "link_enc_cfg.h" #define DC_LOGGER_INIT(logger) @@ -626,7 +629,19 @@ static struct dce_hwseq_registers hwseq_reg; HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_ROOT_GATE_DISABLE, mask_sh), \ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_ROOT_GATE_DISABLE, mask_sh), \ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_ROOT_GATE_DISABLE, mask_sh), \ - HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_ROOT_GATE_DISABLE, mask_sh) + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_ROOT_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK0_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK1_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK2_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK3_GATE_DISABLE, mask_sh) static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCN35_MASK_SH_LIST(__SHIFT) @@ -686,7 +701,7 @@ static const struct dc_plane_cap plane_cap = { // 6:1 downscaling ratio: 1000/6 = 166.666 .max_downscale_factor = { - .argb8888 = 167, + .argb8888 = 250, .nv12 = 167, .fp16 = 167 }, @@ -705,7 +720,9 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dcc = DCC_ENABLE, .disable_dpp_power_gate = true, .disable_hubp_power_gate = true, - .disable_clock_gate = true, + .disable_optc_power_gate = true, /*should the same as above two*/ + .disable_hpo_power_gate = true, /*dmubfw force domain25 on*/ + .disable_clock_gate = false, .disable_dsc_power_gate = true, .vsr_support = true, .performance_trace = false, @@ -724,7 +741,7 @@ static const struct dc_debug_options debug_defaults_drv = { .i2c = true, .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled .dscl = true, - .cm = false, + .cm = true, .mpc = true, .optc = true, .vpg = true, @@ -752,7 +769,7 @@ static const struct dc_debug_options debug_defaults_drv = { .enable_hpo_pg_support = false, .enable_legacy_fast_update = true, .enable_single_display_2to1_odm_policy = false, - .disable_idle_power_optimizations = true, + .disable_idle_power_optimizations = false, .dmcub_emulation = false, .disable_boot_optimizations = false, .disable_unbounded_requesting = false, @@ -763,14 +780,17 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_z10 = false, .ignore_pg = true, .psp_disabled_wa = true, - .ips2_eval_delay_us = 200, - .ips2_entry_delay_us = 400 + .ips2_eval_delay_us = 2000, + .ips2_entry_delay_us = 800, + .disable_dmub_reallow_idle = true, + .static_screen_wait_frames = 2, }; static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, .disallow_psrsu = false, + .disallow_replay = false, }, .ilr = { .optimize_edp_link_rate = true, @@ -1529,6 +1549,9 @@ static void dcn35_resource_destruct(struct dcn35_resource_pool *pool) if (pool->base.psr != NULL) dmub_psr_destroy(&pool->base.psr); + if (pool->base.replay != NULL) + dmub_replay_destroy(&pool->base.replay); + if (pool->base.pg_cntl != NULL) dcn_pg_cntl_destroy(&pool->base.pg_cntl); @@ -2013,6 +2036,14 @@ static bool dcn35_resource_construct( goto create_fail; } + /* Replay */ + pool->base.replay = dmub_replay_create(ctx); + if (pool->base.replay == NULL) { + dm_error("DC: failed to create replay obj!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + /* ABM */ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { pool->base.multiple_abms[i] = dmub_abm_create(ctx, @@ -2100,6 +2131,7 @@ static bool dcn35_resource_construct( dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; dc->dml2_options.use_native_pstate_optimization = true; dc->dml2_options.use_native_soc_bb_construction = true; + dc->dml2_options.minimize_dispclk_using_odm = false; if (dc->config.EnableMinDispClkODM) dc->dml2_options.minimize_dispclk_using_odm = true; dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm; diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h index 99aea102e3..a51c4a9eaa 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h @@ -166,6 +166,7 @@ struct resource_pool *dcn35_create_resource_pool( SR(MMHUBBUB_MEM_PWR_CNTL), \ SR(DCCG_GATE_DISABLE_CNTL), \ SR(DCCG_GATE_DISABLE_CNTL2), \ + SR(DCCG_GATE_DISABLE_CNTL4), \ SR(DCCG_GATE_DISABLE_CNTL5), \ SR(DCFCLK_CNTL),\ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \ diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index d1a4ed6f59..c78c9224ab 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -86,6 +86,7 @@ enum dmub_status { DMUB_STATUS_TIMEOUT, DMUB_STATUS_INVALID, DMUB_STATUS_HW_FAILURE, + DMUB_STATUS_POWER_STATE_D3 }; /* enum dmub_asic - dmub asic identifier */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 3cea96a364..bb1f69a54c 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -185,8 +185,7 @@ union abm_flags { unsigned int disable_abm_requested : 1; /** - * @disable_abm_immediately: Indicates if driver has requested ABM to be disabled - * immediately. + * @disable_abm_immediately: Indicates if driver has requested ABM to be disabled immediately. */ unsigned int disable_abm_immediately : 1; @@ -654,7 +653,7 @@ union dmub_fw_boot_options { uint32_t gpint_scratch8: 1; /* 1 if GPINT is in scratch8*/ uint32_t usb4_cm_version: 1; /**< 1 CM support */ uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */ - uint32_t usb4_dpia_bw_alloc_supported: 1; /* 1 if USB4 dpia BW allocation supported */ + uint32_t reserved0: 1; uint32_t disable_clk_ds: 1; /* 1 if disallow dispclk_ds and dppclk_ds*/ uint32_t disable_timeout_recovery : 1; /* 1 if timeout recovery should be disabled */ uint32_t ips_pg_disable: 1; /* 1 to disable ONO domains power gating*/ @@ -818,18 +817,61 @@ enum dmub_gpint_command { * RETURN: Lower 32-bit mask. */ DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK = 101, + /** - * DESC: Updates the trace buffer lower 32-bit mask. + * DESC: Updates the trace buffer mask bit0~bit15. * ARGS: The new mask * RETURN: Lower 32-bit mask. */ DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD0 = 102, + /** - * DESC: Updates the trace buffer mask bi0~bit15. + * DESC: Updates the trace buffer mask bit16~bit31. * ARGS: The new mask * RETURN: Lower 32-bit mask. */ DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1 = 103, + + /** + * DESC: Updates the trace buffer mask bit32~bit47. + * ARGS: The new mask + * RETURN: Lower 32-bit mask. + */ + DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD2 = 114, + + /** + * DESC: Updates the trace buffer mask bit48~bit63. + * ARGS: The new mask + * RETURN: Lower 32-bit mask. + */ + DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD3 = 115, + + /** + * DESC: Read the trace buffer mask bi0~bit15. + */ + DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD0 = 116, + + /** + * DESC: Read the trace buffer mask bit16~bit31. + */ + DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD1 = 117, + + /** + * DESC: Read the trace buffer mask bi32~bit47. + */ + DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD2 = 118, + + /** + * DESC: Updates the trace buffer mask bit32~bit63. + */ + DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD3 = 119, + + /** + * DESC: Enable measurements for various task duration + * ARGS: 0 - Disable measurement + * 1 - Enable measurement + */ + DMUB_GPINT__TRACE_DMUB_WAKE_ACTIVITY = 123, }; /** @@ -1303,6 +1345,10 @@ enum dmub_cmd_cab_type { * Fit surfaces in CAB (i.e. CAB enable) */ DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB = 2, + /** + * Do not fit surfaces in CAB (i.e. no CAB) + */ + DMUB_CMD__CAB_DCN_SS_NOT_FIT_IN_CAB = 3, }; /** @@ -2786,6 +2832,7 @@ struct dmub_rb_cmd_psr_set_power_opt { #define REPLAY_RESIDENCY_MODE_MASK (0x1 << REPLAY_RESIDENCY_MODE_SHIFT) # define REPLAY_RESIDENCY_MODE_PHY (0x0 << REPLAY_RESIDENCY_MODE_SHIFT) # define REPLAY_RESIDENCY_MODE_ALPM (0x1 << REPLAY_RESIDENCY_MODE_SHIFT) +# define REPLAY_RESIDENCY_MODE_IPS 0x10 #define REPLAY_RESIDENCY_ENABLE_MASK (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT) # define REPLAY_RESIDENCY_DISABLE (0x0 << REPLAY_RESIDENCY_ENABLE_SHIFT) @@ -2840,6 +2887,18 @@ enum dmub_cmd_replay_type { * Set power opt and coasting vtotal. */ DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL = 4, + /** + * Set disabled iiming sync. + */ + DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED = 5, + /** + * Set Residency Frameupdate Timer. + */ + DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER = 6, + /** + * Set pseudo vtotal + */ + DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL = 7, }; /** @@ -3003,6 +3062,46 @@ struct dmub_cmd_replay_set_power_opt_data { }; /** + * Data passed from driver to FW in a DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED command. + */ +struct dmub_cmd_replay_set_timing_sync_data { + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * REPLAY set_timing_sync + */ + uint8_t timing_sync_supported; + /** + * Explicit padding to 4 byte boundary. + */ + uint8_t pad[2]; +}; + +/** + * Data passed from driver to FW in a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command. + */ +struct dmub_cmd_replay_set_pseudo_vtotal { + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * Source Vtotal that Replay + IPS + ABM full screen video src vtotal + */ + uint16_t vtotal; + /** + * Explicit padding to 4 byte boundary. + */ + uint8_t pad; +}; + +/** * Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command. */ struct dmub_rb_cmd_replay_set_power_opt { @@ -3034,6 +3133,14 @@ struct dmub_cmd_replay_set_coasting_vtotal_data { * Currently the support is only for 0 or 1 */ uint8_t panel_inst; + /** + * 16-bit value dicated by driver that indicates the coasting vtotal high byte part. + */ + uint16_t coasting_vtotal_high; + /** + * Explicit padding to 4 byte boundary. + */ + uint8_t pad[2]; }; /** @@ -3069,6 +3176,91 @@ struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal { }; /** + * Definition of a DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED command. + */ +struct dmub_rb_cmd_replay_set_timing_sync { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Definition of DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED command. + */ + struct dmub_cmd_replay_set_timing_sync_data replay_set_timing_sync_data; +}; + +/** + * Definition of a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command. + */ +struct dmub_rb_cmd_replay_set_pseudo_vtotal { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Definition of DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command. + */ + struct dmub_cmd_replay_set_pseudo_vtotal data; +}; + +/** + * Data passed from driver to FW in DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command. + */ +struct dmub_cmd_replay_frameupdate_timer_data { + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * Replay Frameupdate Timer Enable or not + */ + uint8_t enable; + /** + * REPLAY force reflash frame update number + */ + uint16_t frameupdate_count; +}; +/** + * Definition of DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER + */ +struct dmub_rb_cmd_replay_set_frameupdate_timer { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command. + */ + struct dmub_cmd_replay_frameupdate_timer_data data; +}; + +/** + * Definition union of replay command set + */ +union dmub_replay_cmd_set { + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * Definition of DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED command data. + */ + struct dmub_cmd_replay_set_timing_sync_data sync_data; + /** + * Definition of DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command data. + */ + struct dmub_cmd_replay_frameupdate_timer_data timer_data; + /** + * Definition of DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command data. + */ + struct dmub_cmd_replay_set_pseudo_vtotal pseudo_vtotal_data; +}; + +/** * Set of HW components that can be locked. * * Note: If updating with more HW components, fields @@ -4211,6 +4403,16 @@ union dmub_rb_cmd { * Definition of a DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL command. */ struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal replay_set_power_opt_and_coasting_vtotal; + + struct dmub_rb_cmd_replay_set_timing_sync replay_set_timing_sync; + /** + * Definition of a DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command. + */ + struct dmub_rb_cmd_replay_set_frameupdate_timer replay_set_frameupdate_timer; + /** + * Definition of a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command. + */ + struct dmub_rb_cmd_replay_set_pseudo_vtotal replay_set_pseudo_vtotal; }; /** diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 59d4e64845..9ad7388053 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -64,7 +64,7 @@ /* Default scratch mem size. */ -#define DMUB_SCRATCH_MEM_SIZE (256) +#define DMUB_SCRATCH_MEM_SIZE (1024) /* Number of windows in use. */ #define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL) @@ -768,7 +768,7 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, return DMUB_STATUS_INVALID; if (dmub->power_state != DMUB_POWER_STATE_D0) - return DMUB_STATUS_INVALID; + return DMUB_STATUS_POWER_STATE_D3; if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity || dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) { @@ -789,7 +789,7 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub) return DMUB_STATUS_INVALID; if (dmub->power_state != DMUB_POWER_STATE_D0) - return DMUB_STATUS_INVALID; + return DMUB_STATUS_POWER_STATE_D3; /** * Read back all the queued commands to ensure that they've diff --git a/drivers/gpu/drm/amd/display/include/hdcp_msg_types.h b/drivers/gpu/drm/amd/display/include/hdcp_msg_types.h index 42229b4eff..eced9ad91f 100644 --- a/drivers/gpu/drm/amd/display/include/hdcp_msg_types.h +++ b/drivers/gpu/drm/amd/display/include/hdcp_msg_types.h @@ -69,6 +69,11 @@ enum hdcp_message_id { HDCP_MESSAGE_ID_READ_RXSTATUS, HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE, + /* PS175 chip */ + + HDCP_MESSAGE_ID_WRITE_PS175_CMD, + HDCP_MESSAGE_ID_READ_PS175_RSP, + HDCP_MESSAGE_ID_MAX }; diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index ccecddafeb..3955b7e4b2 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -81,6 +81,7 @@ fail_alloc_context: void mod_freesync_destroy(struct mod_freesync *mod_freesync) { struct core_freesync *core_freesync = NULL; + if (mod_freesync == NULL) return; core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); @@ -278,9 +279,8 @@ static void apply_below_the_range(struct core_freesync *core_freesync, } } else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) { /* Enter Below the Range */ - if (!in_out_vrr->btr.btr_active) { + if (!in_out_vrr->btr.btr_active) in_out_vrr->btr.btr_active = true; - } } /* BTR set to "not active" so disengage */ @@ -693,10 +693,12 @@ static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf, if (app_tf != TRANSFER_FUNC_UNKNOWN) { infopacket->valid = true; - if (app_tf != TRANSFER_FUNC_PQ2084) { + if (app_tf == TRANSFER_FUNC_PQ2084) + infopacket->sb[9] |= 0x20; // PB9 = [Bit 5 = PQ EOTF Active] + else { infopacket->sb[6] |= 0x08; // PB6 = [Bit 3 = Native Color Active] if (app_tf == TRANSFER_FUNC_GAMMA_22) - infopacket->sb[9] |= 0x04; // PB6 = [Bit 2 = Gamma 2.2 EOTF Active] + infopacket->sb[9] |= 0x04; // PB9 = [Bit 2 = Gamma 2.2 EOTF Active] } } } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c index 1ddb4f5eac..182e7532dd 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c @@ -63,6 +63,7 @@ static inline enum mod_hdcp_status check_hdcp_capable_dp(struct mod_hdcp *hdcp) static inline enum mod_hdcp_status check_r0p_available_dp(struct mod_hdcp *hdcp) { enum mod_hdcp_status status; + if (is_dp_hdcp(hdcp)) { status = (hdcp->auth.msg.hdcp1.bstatus & DP_BSTATUS_R0_PRIME_READY) ? @@ -131,9 +132,8 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp) static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) { /* Avoid device count == 0 to do authentication */ - if (0 == get_device_count(hdcp)) { + if (get_device_count(hdcp) == 0) return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE; - } /* Some MST display may choose to report the internal panel as an HDCP RX. * To update this condition with 1(because the immediate repeater's internal diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c index 91c22b96eb..733f22bed0 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c @@ -208,9 +208,8 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp) static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) { /* Avoid device count == 0 to do authentication */ - if (0 == get_device_count(hdcp)) { + if (get_device_count(hdcp) == 0) return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE; - } /* Some MST display may choose to report the internal panel as an HDCP RX. */ /* To update this condition with 1(because the immediate repeater's internal */ @@ -689,9 +688,8 @@ static enum mod_hdcp_status validate_stream_ready(struct mod_hdcp *hdcp, if (is_hdmi_dvi_sl_hdcp(hdcp)) { if (!process_rxstatus(hdcp, event_ctx, input, &status)) goto out; - if (event_ctx->rx_id_list_ready) { + if (event_ctx->rx_id_list_ready) goto out; - } } if (is_hdmi_dvi_sl_hdcp(hdcp)) if (!mod_hdcp_execute_and_set(check_stream_ready_available, diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h index c62df3bcc7..1d83c1b9da 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h @@ -86,10 +86,12 @@ #define HDCP_CPIRQ_TRACE(hdcp) \ HDCP_LOG_FSM(hdcp, "[Link %d] --> CPIRQ", hdcp->config.index) #define HDCP_EVENT_TRACE(hdcp, event) \ - if (event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) \ - HDCP_TIMEOUT_TRACE(hdcp); \ - else if (event == MOD_HDCP_EVENT_CPIRQ) \ - HDCP_CPIRQ_TRACE(hdcp) + do { \ + if (event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) \ + HDCP_TIMEOUT_TRACE(hdcp); \ + else if (event == MOD_HDCP_EVENT_CPIRQ) \ + HDCP_CPIRQ_TRACE(hdcp); \ + } while (0) /* TODO: find some way to tell if logging is off to save time */ #define HDCP_DDC_READ_TRACE(hdcp, msg_name, msg, msg_size) do { \ mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index ff930a71e4..7c9805705f 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -443,7 +443,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE) - continue; + continue; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -929,7 +929,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE) - continue; + continue; hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index; hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id; diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h index 5b71bc96b9..7844ea9165 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h @@ -98,9 +98,9 @@ enum ta_dtm_encoder_type { * This enum defines software value for dio_output_type */ typedef enum { - TA_DTM_DIO_OUTPUT_TYPE__INVALID, - TA_DTM_DIO_OUTPUT_TYPE__DIRECT, - TA_DTM_DIO_OUTPUT_TYPE__DPIA + TA_DTM_DIO_OUTPUT_TYPE__INVALID, + TA_DTM_DIO_OUTPUT_TYPE__DIRECT, + TA_DTM_DIO_OUTPUT_TYPE__DPIA } ta_dtm_dio_output_type; struct ta_dtm_topology_update_input_v3 { @@ -237,11 +237,11 @@ enum ta_hdcp2_hdcp2_msg_id_max_size { #define TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES 127 #define TA_HDCP__HDCP1_V_PRIME_SIZE 20 #define TA_HDCP__HDCP2_TX_BUF_MAX_SIZE \ - TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_NO_STORED_KM + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_STORED_KM + 6 + (TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_NO_STORED_KM + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_STORED_KM + 6) // 64 bits boundaries #define TA_HDCP__HDCP2_RX_BUF_MAX_SIZE \ - TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_RECEIVER_INFO + 4 + (TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_RECEIVER_INFO + 4) enum ta_hdcp_status { TA_HDCP_STATUS__SUCCESS = 0x00, diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h index afe1f6cce5..cc3dc9b589 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h @@ -23,34 +23,6 @@ * */ - - - -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - #ifndef MOD_FREESYNC_H_ #define MOD_FREESYNC_H_ diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h index 5960dd760e..8ce6c22e5d 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h @@ -57,10 +57,10 @@ void mod_stats_update_event(struct mod_stats *mod_stats, unsigned int length); void mod_stats_update_flip(struct mod_stats *mod_stats, - unsigned long timestamp_in_ns); + unsigned long long timestamp_in_ns); void mod_stats_update_vupdate(struct mod_stats *mod_stats, - unsigned long timestamp_in_ns); + unsigned long long timestamp_in_ns); void mod_stats_update_freesync(struct mod_stats *mod_stats, unsigned int v_total_min, diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 1675314a3f..2a3698fd2d 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -31,7 +31,7 @@ #define DIV_ROUNDUP(a, b) (((a)+((b)/2))/(b)) #define bswap16_based_on_endian(big_endian, value) \ - (big_endian) ? cpu_to_be16(value) : cpu_to_le16(value) + ((big_endian) ? cpu_to_be16(value) : cpu_to_le16(value)) /* Possible Min Reduction config from least aggressive to most aggressive * 0 1 2 3 4 5 6 7 8 9 10 11 12 @@ -973,6 +973,39 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link, return true; } +void set_replay_coasting_vtotal(struct dc_link *link, + enum replay_coasting_vtotal_type type, + uint32_t vtotal) +{ + link->replay_settings.coasting_vtotal_table[type] = vtotal; +} + +void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal) +{ + link->replay_settings.abm_with_ips_on_full_screen_video_pseudo_vtotal = vtotal; +} + +void calculate_replay_link_off_frame_count(struct dc_link *link, + uint16_t vtotal, uint16_t htotal) +{ + uint8_t max_link_off_frame_count = 0; + uint16_t max_deviation_line = 0, pixel_deviation_per_line = 0; + + max_deviation_line = link->dpcd_caps.pr_info.max_deviation_line; + pixel_deviation_per_line = link->dpcd_caps.pr_info.pixel_deviation_per_line; + + if (htotal != 0 && vtotal != 0) + max_link_off_frame_count = htotal * max_deviation_line / (pixel_deviation_per_line * vtotal); + else + ASSERT(0); + + link->replay_settings.link_off_frame_count_level = + max_link_off_frame_count >= PR_LINK_OFF_FRAME_COUNT_BEST ? PR_LINK_OFF_FRAME_COUNT_BEST : + max_link_off_frame_count >= PR_LINK_OFF_FRAME_COUNT_GOOD ? PR_LINK_OFF_FRAME_COUNT_GOOD : + PR_LINK_OFF_FRAME_COUNT_FAIL; + +} + bool fill_custom_backlight_caps(unsigned int config_no, struct dm_acpi_atif_backlight_caps *caps) { unsigned int data_points_size; diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h index d9e0d67d67..ff7e6f3cd6 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h @@ -54,6 +54,12 @@ bool dmub_init_abm_config(struct resource_pool *res_pool, unsigned int inst); void init_replay_config(struct dc_link *link, struct replay_config *pr_config); +void set_replay_coasting_vtotal(struct dc_link *link, + enum replay_coasting_vtotal_type type, + uint32_t vtotal); +void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal); +void calculate_replay_link_off_frame_count(struct dc_link *link, + uint16_t vtotal, uint16_t htotal); bool is_psr_su_specific_panel(struct dc_link *link); void mod_power_calc_psr_configs(struct psr_config *psr_config, diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 579977f6ad..df2c7ffe19 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -255,6 +255,10 @@ enum DC_DEBUG_MASK { DC_FORCE_SUBVP_MCLK_SWITCH = 0x20, DC_DISABLE_MPO = 0x40, DC_ENABLE_DPIA_TRACE = 0x80, + DC_ENABLE_DML2 = 0x100, + DC_DISABLE_PSR_SU = 0x200, + DC_DISABLE_REPLAY = 0x400, + DC_DISABLE_IPS = 0x800, }; enum amd_dpm_forced_level; diff --git a/drivers/gpu/drm/amd/include/amdgpu_reg_state.h b/drivers/gpu/drm/amd/include/amdgpu_reg_state.h new file mode 100644 index 0000000000..335980e2af --- /dev/null +++ b/drivers/gpu/drm/amd/include/amdgpu_reg_state.h @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_REG_STATE_H__ +#define __AMDGPU_REG_STATE_H__ + +enum amdgpu_reg_state { + AMDGPU_REG_STATE_TYPE_INVALID = 0, + AMDGPU_REG_STATE_TYPE_XGMI = 1, + AMDGPU_REG_STATE_TYPE_WAFL = 2, + AMDGPU_REG_STATE_TYPE_PCIE = 3, + AMDGPU_REG_STATE_TYPE_USR = 4, + AMDGPU_REG_STATE_TYPE_USR_1 = 5 +}; + +enum amdgpu_sysfs_reg_offset { + AMDGPU_SYS_REG_STATE_XGMI = 0x0000, + AMDGPU_SYS_REG_STATE_WAFL = 0x1000, + AMDGPU_SYS_REG_STATE_PCIE = 0x2000, + AMDGPU_SYS_REG_STATE_USR = 0x3000, + AMDGPU_SYS_REG_STATE_USR_1 = 0x4000, + AMDGPU_SYS_REG_STATE_END = 0x5000, +}; + +struct amdgpu_reg_state_header { + uint16_t structure_size; + uint8_t format_revision; + uint8_t content_revision; + uint8_t state_type; + uint8_t num_instances; + uint16_t pad; +}; + +enum amdgpu_reg_inst_state { + AMDGPU_INST_S_OK, + AMDGPU_INST_S_EDISABLED, + AMDGPU_INST_S_EACCESS, +}; + +struct amdgpu_smn_reg_data { + uint64_t addr; + uint32_t value; + uint32_t pad; +}; + +struct amdgpu_reg_inst_header { + uint16_t instance; + uint16_t state; + uint16_t num_smn_regs; + uint16_t pad; +}; + + +struct amdgpu_regs_xgmi_v1_0 { + struct amdgpu_reg_inst_header inst_header; + + struct amdgpu_smn_reg_data smn_reg_values[]; +}; + +struct amdgpu_reg_state_xgmi_v1_0 { + /* common_header.state_type must be AMDGPU_REG_STATE_TYPE_XGMI */ + struct amdgpu_reg_state_header common_header; + + struct amdgpu_regs_xgmi_v1_0 xgmi_state_regs[]; +}; + +struct amdgpu_regs_wafl_v1_0 { + struct amdgpu_reg_inst_header inst_header; + + struct amdgpu_smn_reg_data smn_reg_values[]; +}; + +struct amdgpu_reg_state_wafl_v1_0 { + /* common_header.state_type must be AMDGPU_REG_STATE_TYPE_WAFL */ + struct amdgpu_reg_state_header common_header; + + struct amdgpu_regs_wafl_v1_0 wafl_state_regs[]; +}; + +struct amdgpu_regs_pcie_v1_0 { + struct amdgpu_reg_inst_header inst_header; + + uint16_t device_status; + uint16_t link_status; + uint32_t sub_bus_number_latency; + uint32_t pcie_corr_err_status; + uint32_t pcie_uncorr_err_status; + + struct amdgpu_smn_reg_data smn_reg_values[]; +}; + +struct amdgpu_reg_state_pcie_v1_0 { + /* common_header.state_type must be AMDGPU_REG_STATE_TYPE_PCIE */ + struct amdgpu_reg_state_header common_header; + + struct amdgpu_regs_pcie_v1_0 pci_state_regs[]; +}; + +struct amdgpu_regs_usr_v1_0 { + struct amdgpu_reg_inst_header inst_header; + + struct amdgpu_smn_reg_data smn_reg_values[]; +}; + +struct amdgpu_reg_state_usr_v1_0 { + /* common_header.state_type must be AMDGPU_REG_STATE_TYPE_USR */ + struct amdgpu_reg_state_header common_header; + + struct amdgpu_regs_usr_v1_0 usr_state_regs[]; +}; + +static inline size_t amdgpu_reginst_size(uint16_t num_inst, size_t inst_size, + uint16_t num_regs) +{ + return num_inst * + (inst_size + num_regs * sizeof(struct amdgpu_smn_reg_data)); +} + +#define amdgpu_asic_get_reg_state_supported(adev) \ + (((adev)->asic_funcs && (adev)->asic_funcs->get_reg_state) ? 1 : 0) + +#define amdgpu_asic_get_reg_state(adev, state, buf, size) \ + ((adev)->asic_funcs->get_reg_state ? \ + (adev)->asic_funcs->get_reg_state((adev), (state), (buf), \ + (size)) : \ + 0) + + +int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev); +void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev); + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h index b646648792..fca72e2ec9 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h @@ -6220,12 +6220,20 @@ #define DCCG_GATE_DISABLE_CNTL4__PHYD_REFCLK_ROOT_GATE_DISABLE__SHIFT 0x3 #define DCCG_GATE_DISABLE_CNTL4__PHYE_REFCLK_ROOT_GATE_DISABLE__SHIFT 0x4 #define DCCG_GATE_DISABLE_CNTL4__HDMICHARCLK0_ROOT_GATE_DISABLE__SHIFT 0x11 +#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK0_GATE_DISABLE__SHIFT 0x17 +#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK1_GATE_DISABLE__SHIFT 0x18 +#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK2_GATE_DISABLE__SHIFT 0x19 +#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK3_GATE_DISABLE__SHIFT 0x1a #define DCCG_GATE_DISABLE_CNTL4__PHYA_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000001L #define DCCG_GATE_DISABLE_CNTL4__PHYB_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000002L #define DCCG_GATE_DISABLE_CNTL4__PHYC_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000004L #define DCCG_GATE_DISABLE_CNTL4__PHYD_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000008L #define DCCG_GATE_DISABLE_CNTL4__PHYE_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000010L #define DCCG_GATE_DISABLE_CNTL4__HDMICHARCLK0_ROOT_GATE_DISABLE_MASK 0x00020000L +#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK0_GATE_DISABLE_MASK 0x00800000L +#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK1_GATE_DISABLE_MASK 0x01000000L +#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK2_GATE_DISABLE_MASK 0x02000000L +#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK3_GATE_DISABLE_MASK 0x04000000L #define DPSTREAMCLK_CNTL__DPSTREAMCLK0_SRC_SEL__SHIFT 0x0 #define DPSTREAMCLK_CNTL__DPSTREAMCLK0_EN__SHIFT 0x3 #define DPSTREAMCLK_CNTL__DPSTREAMCLK1_SRC_SEL__SHIFT 0x4 diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h index 7ee3d29112..6f80bfa7e4 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h @@ -8707,10 +8707,10 @@ #define regBIF_BX1_MM_CFGREGS_CNTL_BASE_IDX 2 #define regBIF_BX1_BX_RESET_CNTL 0x00f0 #define regBIF_BX1_BX_RESET_CNTL_BASE_IDX 2 -#define regBIF_BX1_INTERRUPT_CNTL 0x8e11 -#define regBIF_BX1_INTERRUPT_CNTL_BASE_IDX 5 -#define regBIF_BX1_INTERRUPT_CNTL2 0x8e12 -#define regBIF_BX1_INTERRUPT_CNTL2_BASE_IDX 5 +#define regBIF_BX1_INTERRUPT_CNTL 0x00f1 +#define regBIF_BX1_INTERRUPT_CNTL_BASE_IDX 2 +#define regBIF_BX1_INTERRUPT_CNTL2 0x00f2 +#define regBIF_BX1_INTERRUPT_CNTL2_BASE_IDX 2 #define regBIF_BX1_CLKREQB_PAD_CNTL 0x00f8 #define regBIF_BX1_CLKREQB_PAD_CNTL_BASE_IDX 2 #define regBIF_BX1_BIF_FEATURES_CONTROL_MISC 0x00fb diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_offset.h new file mode 100644 index 0000000000..a4dd372c05 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_offset.h @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _smuio_10_0_2_OFFSET_HEADER + +// addressBlock: smuio_smuio_misc_SmuSmuioDec +// base address: 0x5a000 +#define mmSMUIO_MCM_CONFIG 0x0023 +#define mmSMUIO_MCM_CONFIG_BASE_IDX 0 +#define mmIP_DISCOVERY_VERSION 0x0000 +#define mmIP_DISCOVERY_VERSION_BASE_IDX 1 +#define mmIO_SMUIO_PINSTRAP 0x01b1 +#define mmIO_SMUIO_PINSTRAP_BASE_IDX 1 +#define mmSCRATCH_REGISTER0 0x01b2 +#define mmSCRATCH_REGISTER0_BASE_IDX 1 +#define mmSCRATCH_REGISTER1 0x01b3 +#define mmSCRATCH_REGISTER1_BASE_IDX 1 +#define mmSCRATCH_REGISTER2 0x01b4 +#define mmSCRATCH_REGISTER2_BASE_IDX 1 +#define mmSCRATCH_REGISTER3 0x01b5 +#define mmSCRATCH_REGISTER3_BASE_IDX 1 +#define mmSCRATCH_REGISTER4 0x01b6 +#define mmSCRATCH_REGISTER4_BASE_IDX 1 +#define mmSCRATCH_REGISTER5 0x01b7 +#define mmSCRATCH_REGISTER5_BASE_IDX 1 +#define mmSCRATCH_REGISTER6 0x01b8 +#define mmSCRATCH_REGISTER6_BASE_IDX 1 +#define mmSCRATCH_REGISTER7 0x01b9 +#define mmSCRATCH_REGISTER7_BASE_IDX 1 + + +// addressBlock: smuio_smuio_reset_SmuSmuioDec +// base address: 0x5a300 +#define mmSMUIO_MP_RESET_INTR 0x00c1 +#define mmSMUIO_MP_RESET_INTR_BASE_IDX 0 +#define mmSMUIO_SOC_HALT 0x00c2 +#define mmSMUIO_SOC_HALT_BASE_IDX 0 +#define mmSMUIO_GFX_MISC_CNTL 0x00c8 +#define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0 + + +// addressBlock: smuio_smuio_ccxctrl_SmuSmuioDec +// base address: 0x5a000 +#define mmPWROK_REFCLK_GAP_CYCLES 0x0001 +#define mmPWROK_REFCLK_GAP_CYCLES_BASE_IDX 1 +#define mmGOLDEN_TSC_INCREMENT_UPPER 0x0004 +#define mmGOLDEN_TSC_INCREMENT_UPPER_BASE_IDX 1 +#define mmGOLDEN_TSC_INCREMENT_LOWER 0x0005 +#define mmGOLDEN_TSC_INCREMENT_LOWER_BASE_IDX 1 +#define mmGOLDEN_TSC_COUNT_UPPER 0x0025 +#define mmGOLDEN_TSC_COUNT_UPPER_BASE_IDX 1 +#define mmGOLDEN_TSC_COUNT_LOWER 0x0026 +#define mmGOLDEN_TSC_COUNT_LOWER_BASE_IDX 1 +#define mmGFX_GOLDEN_TSC_SHADOW_UPPER 0x0029 +#define mmGFX_GOLDEN_TSC_SHADOW_UPPER_BASE_IDX 1 +#define mmGFX_GOLDEN_TSC_SHADOW_LOWER 0x002a +#define mmGFX_GOLDEN_TSC_SHADOW_LOWER_BASE_IDX 1 +#define mmSOC_GOLDEN_TSC_SHADOW_UPPER 0x002b +#define mmSOC_GOLDEN_TSC_SHADOW_UPPER_BASE_IDX 1 +#define mmSOC_GOLDEN_TSC_SHADOW_LOWER 0x002c +#define mmSOC_GOLDEN_TSC_SHADOW_LOWER_BASE_IDX 1 +#define mmSOC_GAP_PWROK 0x002d +#define mmSOC_GAP_PWROK_BASE_IDX 1 + +// addressBlock: smuio_smuio_swtimer_SmuSmuioDec +// base address: 0x5ac40 +#define mmPWR_VIRT_RESET_REQ 0x0110 +#define mmPWR_VIRT_RESET_REQ_BASE_IDX 1 +#define mmPWR_DISP_TIMER_CONTROL 0x0111 +#define mmPWR_DISP_TIMER_CONTROL_BASE_IDX 1 +#define mmPWR_DISP_TIMER2_CONTROL 0x0113 +#define mmPWR_DISP_TIMER2_CONTROL_BASE_IDX 1 +#define mmPWR_DISP_TIMER_GLOBAL_CONTROL 0x0115 +#define mmPWR_DISP_TIMER_GLOBAL_CONTROL_BASE_IDX 1 +#define mmPWR_IH_CONTROL 0x0116 +#define mmPWR_IH_CONTROL_BASE_IDX 1 + +// addressBlock: smuio_smuio_svi0_SmuSmuioDec +// base address: 0x6f000 +#define mmSMUSVI0_TEL_PLANE0 0x520e +#define mmSMUSVI0_TEL_PLANE0_BASE_IDX 1 +#define mmSMUSVI0_PLANE0_CURRENTVID 0x5217 +#define mmSMUSVI0_PLANE0_CURRENTVID_BASE_IDX 1 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_sh_mask.h new file mode 100644 index 0000000000..d10ae61c34 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_sh_mask.h @@ -0,0 +1,184 @@ +/* + * Copyright (C) 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _smuio_10_0_2_SH_MASK_HEADER + +// addressBlock: smuio_smuio_misc_SmuSmuioDec +//SMUIO_MCM_CONFIG +#define SMUIO_MCM_CONFIG__DIE_ID__SHIFT 0x0 +#define SMUIO_MCM_CONFIG__PKG_TYPE__SHIFT 0x2 +#define SMUIO_MCM_CONFIG__SOCKET_ID__SHIFT 0x5 +#define SMUIO_MCM_CONFIG__PKG_SUBTYPE__SHIFT 0x6 +#define SMUIO_MCM_CONFIG__CONSOLE_K__SHIFT 0x10 +#define SMUIO_MCM_CONFIG__CONSOLE_A__SHIFT 0x11 +#define SMUIO_MCM_CONFIG__DIE_ID_MASK 0x00000003L +#define SMUIO_MCM_CONFIG__PKG_TYPE_MASK 0x0000001CL +#define SMUIO_MCM_CONFIG__SOCKET_ID_MASK 0x00000020L +#define SMUIO_MCM_CONFIG__PKG_SUBTYPE_MASK 0x000000C0L +#define SMUIO_MCM_CONFIG__CONSOLE_K_MASK 0x00010000L +#define SMUIO_MCM_CONFIG__CONSOLE_A_MASK 0x00020000L +//IP_DISCOVERY_VERSION +#define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION__SHIFT 0x0 +#define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION_MASK 0xFFFFFFFFL +//IO_SMUIO_PINSTRAP +#define IO_SMUIO_PINSTRAP__AUD_PORT_CONN__SHIFT 0x0 +#define IO_SMUIO_PINSTRAP__AUD__SHIFT 0x3 +#define IO_SMUIO_PINSTRAP__AUD_PORT_CONN_MASK 0x00000007L +#define IO_SMUIO_PINSTRAP__AUD_MASK 0x00000018L +//SCRATCH_REGISTER0 +#define SCRATCH_REGISTER0__ScratchPad0__SHIFT 0x0 +#define SCRATCH_REGISTER0__ScratchPad0_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER1 +#define SCRATCH_REGISTER1__ScratchPad1__SHIFT 0x0 +#define SCRATCH_REGISTER1__ScratchPad1_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER2 +#define SCRATCH_REGISTER2__ScratchPad2__SHIFT 0x0 +#define SCRATCH_REGISTER2__ScratchPad2_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER3 +#define SCRATCH_REGISTER3__ScratchPad3__SHIFT 0x0 +#define SCRATCH_REGISTER3__ScratchPad3_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER4 +#define SCRATCH_REGISTER4__ScratchPad4__SHIFT 0x0 +#define SCRATCH_REGISTER4__ScratchPad4_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER5 +#define SCRATCH_REGISTER5__ScratchPad5__SHIFT 0x0 +#define SCRATCH_REGISTER5__ScratchPad5_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER6 +#define SCRATCH_REGISTER6__ScratchPad6__SHIFT 0x0 +#define SCRATCH_REGISTER6__ScratchPad6_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER7 +#define SCRATCH_REGISTER7__ScratchPad7__SHIFT 0x0 +#define SCRATCH_REGISTER7__ScratchPad7_MASK 0xFFFFFFFFL + +// addressBlock: smuio_smuio_reset_SmuSmuioDec +//SMUIO_MP_RESET_INTR +#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR__SHIFT 0x0 +#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR_MASK 0x00000001L +//SMUIO_SOC_HALT +#define SMUIO_SOC_HALT__WDT_FORCE_PWROK_EN__SHIFT 0x2 +#define SMUIO_SOC_HALT__WDT_FORCE_RESETn_EN__SHIFT 0x3 +#define SMUIO_SOC_HALT__WDT_FORCE_PWROK_EN_MASK 0x00000004L +#define SMUIO_SOC_HALT__WDT_FORCE_RESETn_EN_MASK 0x00000008L +//SMUIO_GFX_MISC_CNTL +#define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff__SHIFT 0x0 +#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1 +#define SMUIO_GFX_MISC_CNTL__PWR_GFX_DLDO_CLK_SWITCH__SHIFT 0x3 +#define SMUIO_GFX_MISC_CNTL__PWR_GFX_RLC_CGPG_EN__SHIFT 0x4 +#define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff_MASK 0x00000001L +#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L +#define SMUIO_GFX_MISC_CNTL__PWR_GFX_DLDO_CLK_SWITCH_MASK 0x00000008L +#define SMUIO_GFX_MISC_CNTL__PWR_GFX_RLC_CGPG_EN_MASK 0x00000010L + +// addressBlock: smuio_smuio_ccxctrl_SmuSmuioDec +//PWROK_REFCLK_GAP_CYCLES +#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles__SHIFT 0x0 +#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles__SHIFT 0x8 +#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles_MASK 0x000000FFL +#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles_MASK 0x0000FF00L +//GOLDEN_TSC_INCREMENT_UPPER +#define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper__SHIFT 0x0 +#define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper_MASK 0x00FFFFFFL +//GOLDEN_TSC_INCREMENT_LOWER +#define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower__SHIFT 0x0 +#define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower_MASK 0xFFFFFFFFL +//GOLDEN_TSC_COUNT_UPPER +#define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper__SHIFT 0x0 +#define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper_MASK 0x00FFFFFFL +//GOLDEN_TSC_COUNT_LOWER +#define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower__SHIFT 0x0 +#define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower_MASK 0xFFFFFFFFL +//GFX_GOLDEN_TSC_SHADOW_UPPER +#define GFX_GOLDEN_TSC_SHADOW_UPPER__GfxGoldenTscShadowUpper__SHIFT 0x0 +#define GFX_GOLDEN_TSC_SHADOW_UPPER__GfxGoldenTscShadowUpper_MASK 0x00FFFFFFL +//GFX_GOLDEN_TSC_SHADOW_LOWER +#define GFX_GOLDEN_TSC_SHADOW_LOWER__GfxGoldenTscShadowLower__SHIFT 0x0 +#define GFX_GOLDEN_TSC_SHADOW_LOWER__GfxGoldenTscShadowLower_MASK 0xFFFFFFFFL +//SOC_GOLDEN_TSC_SHADOW_UPPER +#define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper__SHIFT 0x0 +#define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper_MASK 0x00FFFFFFL +//SOC_GOLDEN_TSC_SHADOW_LOWER +#define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower__SHIFT 0x0 +#define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower_MASK 0xFFFFFFFFL +//SOC_GAP_PWROK +#define SOC_GAP_PWROK__soc_gap_pwrok__SHIFT 0x0 +#define SOC_GAP_PWROK__soc_gap_pwrok_MASK 0x00000001L + +// addressBlock: smuio_smuio_swtimer_SmuSmuioDec +//PWR_VIRT_RESET_REQ +#define PWR_VIRT_RESET_REQ__VF_FLR__SHIFT 0x0 +#define PWR_VIRT_RESET_REQ__PF_FLR__SHIFT 0x1f +#define PWR_VIRT_RESET_REQ__VF_FLR_MASK 0x7FFFFFFFL +#define PWR_VIRT_RESET_REQ__PF_FLR_MASK 0x80000000L +//PWR_DISP_TIMER_CONTROL +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L +//PWR_DISP_TIMER2_CONTROL +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L +//PWR_DISP_TIMER_GLOBAL_CONTROL +#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH__SHIFT 0x0 +#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN__SHIFT 0xa +#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH_MASK 0x000003FFL +#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN_MASK 0x00000400L +//PWR_IH_CONTROL +#define PWR_IH_CONTROL__MAX_CREDIT__SHIFT 0x0 +#define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK__SHIFT 0x5 +#define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK__SHIFT 0x6 +#define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN__SHIFT 0x1f +#define PWR_IH_CONTROL__MAX_CREDIT_MASK 0x0000001FL +#define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK_MASK 0x00000020L +#define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK_MASK 0x00000040L +#define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN_MASK 0x80000000L + +// addressBlock: smuio_smuio_svi0_SmuSmuioDec +//SMUSVI0_TEL_PLANE0 +#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_IDDCOR__SHIFT 0x0 +#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT 0x10 +#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_IDDCOR_MASK 0x000000FFL +#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK 0x01FF0000L +//SMUSVI0_PLANE0_CURRENTVID +#define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT 0x18 +#define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK 0xFF000000L + +#endif diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index c2ccf3724e..edcb85560c 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -422,7 +422,7 @@ struct amd_pm_funcs { int (*set_hard_min_dcefclk_by_freq)(void *handle, uint32_t clock); int (*set_hard_min_fclk_by_freq)(void *handle, uint32_t clock); int (*set_min_deep_sleep_dcefclk)(void *handle, uint32_t clock); - int (*get_asic_baco_capability)(void *handle, bool *cap); + bool (*get_asic_baco_capability)(void *handle); int (*get_asic_baco_state)(void *handle, int *state); int (*set_asic_baco_state)(void *handle, int state); int (*get_ppfeature_status)(void *handle, char *buf); @@ -432,6 +432,7 @@ struct amd_pm_funcs { int (*set_df_cstate)(void *handle, enum pp_df_cstate state); int (*set_xgmi_pstate)(void *handle, uint32_t pstate); ssize_t (*get_gpu_metrics)(void *handle, void **table); + ssize_t (*get_pm_metrics)(void *handle, void *pmmetrics, size_t size); int (*set_watermarks_for_clock_ranges)(void *handle, struct pp_smu_wm_range_sets *ranges); int (*display_disable_memory_clock_switch)(void *handle, @@ -1225,4 +1226,19 @@ struct gpu_metrics_v3_0 { /* Metrics table alpha filter time constant [us] */ uint32_t time_filter_alphavalue; }; + +struct amdgpu_pmmetrics_header { + uint16_t structure_size; + uint16_t pad; + uint32_t mp1_ip_discovery_version; + uint32_t pmfw_version; + uint32_t pmmetrics_version; +}; + +struct amdgpu_pm_metrics { + struct amdgpu_pmmetrics_header common_header; + + uint8_t data[]; +}; + #endif diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h index e07e93167a..ec5b9ab67c 100644 --- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h @@ -232,6 +232,7 @@ union MESAPI_SET_HW_RESOURCES { }; uint32_t oversubscription_timer; uint64_t doorbell_info; + uint64_t event_intr_history_gpu_mc_ptr; }; uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 8ec11da031..6627ee07d5 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -203,8 +203,7 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; void *pp_handle = adev->powerplay.pp_handle; - bool baco_cap; - int ret = 0; + bool ret; if (!pp_funcs || !pp_funcs->get_asic_baco_capability) return false; @@ -222,12 +221,11 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) mutex_lock(&adev->pm.mutex); - ret = pp_funcs->get_asic_baco_capability(pp_handle, - &baco_cap); + ret = pp_funcs->get_asic_baco_capability(pp_handle); mutex_unlock(&adev->pm.mutex); - return ret ? false : baco_cap; + return ret; } int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) @@ -618,6 +616,16 @@ void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) enable ? "enable" : "disable", ret); } +void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable) +{ + int ret = 0; + + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable); + if (ret) + DRM_ERROR("Dpm %s vpe failed, ret = %d.\n", + enable ? "enable" : "disable", ret); +} + int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; @@ -1319,6 +1327,23 @@ int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table) return ret; } +ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics, + size_t size) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; + + if (!pp_funcs->get_pm_metrics) + return -EOPNOTSUPP; + + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics, + size); + mutex_unlock(&adev->pm.mutex); + + return ret; +} + int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, uint32_t *fan_mode) { diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index fee86be55c..b4698f9856 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -1799,6 +1799,44 @@ static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev, return count; } +static int amdgpu_pm_metrics_attr_update(struct amdgpu_device *adev, + struct amdgpu_device_attr *attr, + uint32_t mask, + enum amdgpu_device_attr_states *states) +{ + if (amdgpu_dpm_get_pm_metrics(adev, NULL, 0) == -EOPNOTSUPP) + *states = ATTR_STATE_UNSUPPORTED; + + return 0; +} + +static ssize_t amdgpu_get_pm_metrics(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + ssize_t size = 0; + int ret; + + if (amdgpu_in_reset(adev)) + return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; + + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); + return ret; + } + + size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE); + + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + + return size; +} + /** * DOC: gpu_metrics * @@ -2096,6 +2134,8 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = { AMDGPU_DEVICE_ATTR_RW(smartshift_bias, ATTR_FLAG_BASIC, .attr_update = ss_bias_attr_update), AMDGPU_DEVICE_ATTR_RW(xgmi_plpd_policy, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RO(pm_metrics, ATTR_FLAG_BASIC, + .attr_update = amdgpu_pm_metrics_attr_update), }; static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, @@ -4177,6 +4217,13 @@ static int amdgpu_od_set_init(struct amdgpu_device *adev) } } + /* + * If gpu_od is the only member in the list, that means gpu_od is an + * empty directory, so remove it. + */ + if (list_is_singular(&adev->pm.od_kobj_list)) + goto err_out; + return 0; err_out: @@ -4319,11 +4366,19 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size)) seq_printf(m, "\t%u mV (VDDNB)\n", value); size = sizeof(uint32_t); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size)) - seq_printf(m, "\t%u.%02u W (average GPU)\n", query >> 8, query & 0xff); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size)) { + if (adev->flags & AMD_IS_APU) + seq_printf(m, "\t%u.%02u W (average SoC including CPU)\n", query >> 8, query & 0xff); + else + seq_printf(m, "\t%u.%02u W (average SoC)\n", query >> 8, query & 0xff); + } size = sizeof(uint32_t); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size)) - seq_printf(m, "\t%u.%02u W (current GPU)\n", query >> 8, query & 0xff); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size)) { + if (adev->flags & AMD_IS_APU) + seq_printf(m, "\t%u.%02u W (current SoC including CPU)\n", query >> 8, query & 0xff); + else + seq_printf(m, "\t%u.%02u W (current SoC)\n", query >> 8, query & 0xff); + } size = sizeof(value); seq_printf(m, "\n"); @@ -4349,9 +4404,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a /* VCN clocks */ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) { if (!value) { - seq_printf(m, "VCN: Disabled\n"); + seq_printf(m, "VCN: Powered down\n"); } else { - seq_printf(m, "VCN: Enabled\n"); + seq_printf(m, "VCN: Powered up\n"); if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) seq_printf(m, "\t%u MHz (DCLK)\n", value/100); if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) @@ -4363,9 +4418,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a /* UVD clocks */ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { if (!value) { - seq_printf(m, "UVD: Disabled\n"); + seq_printf(m, "UVD: Powered down\n"); } else { - seq_printf(m, "UVD: Enabled\n"); + seq_printf(m, "UVD: Powered up\n"); if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) seq_printf(m, "\t%u MHz (DCLK)\n", value/100); if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) @@ -4377,9 +4432,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a /* VCE clocks */ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { if (!value) { - seq_printf(m, "VCE: Disabled\n"); + seq_printf(m, "VCE: Powered down\n"); } else { - seq_printf(m, "VCE: Enabled\n"); + seq_printf(m, "VCE: Powered up\n"); if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); } diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 482ea30147..3047ffe7f2 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -445,6 +445,7 @@ void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev); void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable); void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable); void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable); +void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable); int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version); int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable); int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size); @@ -513,6 +514,18 @@ int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, long *input, uint32_t size); int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table); + +/** + * @get_pm_metrics: Get one snapshot of power management metrics from PMFW. The + * sample is copied to pm_metrics buffer. It's expected to be allocated by the + * caller and size of the allocated buffer is passed. Max size expected for a + * metrics sample is 4096 bytes. + * + * Return: Actual size of the metrics sample + */ +ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics, + size_t size); + int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, uint32_t *fan_mode); int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 914c153871..aed0e2cefb 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -1371,21 +1371,18 @@ static int pp_set_active_display_count(void *handle, uint32_t count) return phm_set_active_display_count(hwmgr, count); } -static int pp_get_asic_baco_capability(void *handle, bool *cap) +static bool pp_get_asic_baco_capability(void *handle) { struct pp_hwmgr *hwmgr = handle; - *cap = false; if (!hwmgr) - return -EINVAL; + return false; if (!(hwmgr->not_vf && amdgpu_dpm) || !hwmgr->hwmgr_func->get_asic_baco_capability) - return 0; + return false; - hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap); - - return 0; + return hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr); } static int pp_get_asic_baco_state(void *handle, int *state) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c index 044cda005a..e8a9471c18 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c @@ -33,21 +33,20 @@ #include "smu/smu_7_1_2_d.h" #include "smu/smu_7_1_2_sh_mask.h" -int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap) +bool smu7_baco_get_capability(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); uint32_t reg; - *cap = false; if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO)) return 0; reg = RREG32(mmCC_BIF_BX_FUSESTRAP0); if (reg & CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK) - *cap = true; + return true; - return 0; + return false; } int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h index be0d98abb5..73a773f4ce 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h @@ -25,7 +25,7 @@ #include "hwmgr.h" #include "common_baco.h" -extern int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap); +extern bool smu7_baco_get_capability(struct pp_hwmgr *hwmgr); extern int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); extern int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c index de0a37f7c6..c66ef97415 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c @@ -28,14 +28,13 @@ #include "vega10_inc.h" #include "smu9_baco.h" -int smu9_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap) +bool smu9_baco_get_capability(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); uint32_t reg, data; - *cap = false; if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO)) - return 0; + return false; WREG32(0x12074, 0xFFF0003B); data = RREG32(0x12075); @@ -44,10 +43,10 @@ int smu9_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap) reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0); if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) - *cap = true; + return true; } - return 0; + return false; } int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h index 84e90f801a..9ff7c2ea1b 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h @@ -25,7 +25,7 @@ #include "hwmgr.h" #include "common_baco.h" -extern int smu9_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap); +extern bool smu9_baco_get_capability(struct pp_hwmgr *hwmgr); extern int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); #endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c index 994c0d374b..dad4c80aee 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c @@ -36,23 +36,22 @@ static const struct soc15_baco_cmd_entry clean_baco_tbl[] = { {CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_7), 0, 0, 0, 0}, }; -int vega20_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap) +bool vega20_baco_get_capability(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); uint32_t reg; - *cap = false; if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO)) - return 0; + return false; if (((RREG32(0x17569) & 0x20000000) >> 29) == 0x1) { reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0); if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) - *cap = true; + return true; } - return 0; + return false; } int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h index f06471e712..bdad9c9156 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h @@ -25,7 +25,7 @@ #include "hwmgr.h" #include "common_baco.h" -extern int vega20_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap); +extern bool vega20_baco_get_capability(struct pp_hwmgr *hwmgr); extern int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); extern int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); extern int vega20_baco_apply_vdci_flush_workaround(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h index 81650727a5..6f536159df 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h @@ -351,7 +351,7 @@ struct pp_hwmgr_func { int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); int (*set_hard_min_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); int (*set_soft_max_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); - int (*get_asic_baco_capability)(struct pp_hwmgr *hwmgr, bool *cap); + bool (*get_asic_baco_capability)(struct pp_hwmgr *hwmgr); int (*get_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); int (*set_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE state); int (*get_ppfeature_status)(struct pp_hwmgr *hwmgr, char *buf); diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c index 9e4228232f..ad1fd3150d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c @@ -2298,6 +2298,7 @@ static uint32_t ci_get_mac_definition(uint32_t value) case SMU_MAX_ENTRIES_SMIO: return SMU7_MAX_ENTRIES_SMIO; case SMU_MAX_LEVELS_VDDC: + case SMU_MAX_LEVELS_VDDGFX: return SMU7_MAX_LEVELS_VDDC; case SMU_MAX_LEVELS_VDDCI: return SMU7_MAX_LEVELS_VDDCI; diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c index 97d9802fe6..17d2f5bff4 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c @@ -2263,6 +2263,7 @@ static uint32_t iceland_get_mac_definition(uint32_t value) case SMU_MAX_ENTRIES_SMIO: return SMU71_MAX_ENTRIES_SMIO; case SMU_MAX_LEVELS_VDDC: + case SMU_MAX_LEVELS_VDDGFX: return SMU71_MAX_LEVELS_VDDC; case SMU_MAX_LEVELS_VDDCI: return SMU71_MAX_LEVELS_VDDCI; diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 60dce148b2..0ad947df77 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1315,6 +1315,187 @@ static int smu_get_thermal_temperature_range(struct smu_context *smu) return ret; } +/** + * smu_wbrf_handle_exclusion_ranges - consume the wbrf exclusion ranges + * + * @smu: smu_context pointer + * + * Retrieve the wbrf exclusion ranges and send them to PMFW for proper handling. + * Returns 0 on success, error on failure. + */ +static int smu_wbrf_handle_exclusion_ranges(struct smu_context *smu) +{ + struct wbrf_ranges_in_out wbrf_exclusion = {0}; + struct freq_band_range *wifi_bands = wbrf_exclusion.band_list; + struct amdgpu_device *adev = smu->adev; + uint32_t num_of_wbrf_ranges = MAX_NUM_OF_WBRF_RANGES; + uint64_t start, end; + int ret, i, j; + + ret = amd_wbrf_retrieve_freq_band(adev->dev, &wbrf_exclusion); + if (ret) { + dev_err(adev->dev, "Failed to retrieve exclusion ranges!\n"); + return ret; + } + + /* + * The exclusion ranges array we got might be filled with holes and duplicate + * entries. For example: + * {(2400, 2500), (0, 0), (6882, 6962), (2400, 2500), (0, 0), (6117, 6189), (0, 0)...} + * We need to do some sortups to eliminate those holes and duplicate entries. + * Expected output: {(2400, 2500), (6117, 6189), (6882, 6962), (0, 0)...} + */ + for (i = 0; i < num_of_wbrf_ranges; i++) { + start = wifi_bands[i].start; + end = wifi_bands[i].end; + + /* get the last valid entry to fill the intermediate hole */ + if (!start && !end) { + for (j = num_of_wbrf_ranges - 1; j > i; j--) + if (wifi_bands[j].start && wifi_bands[j].end) + break; + + /* no valid entry left */ + if (j <= i) + break; + + start = wifi_bands[i].start = wifi_bands[j].start; + end = wifi_bands[i].end = wifi_bands[j].end; + wifi_bands[j].start = 0; + wifi_bands[j].end = 0; + num_of_wbrf_ranges = j; + } + + /* eliminate duplicate entries */ + for (j = i + 1; j < num_of_wbrf_ranges; j++) { + if ((wifi_bands[j].start == start) && (wifi_bands[j].end == end)) { + wifi_bands[j].start = 0; + wifi_bands[j].end = 0; + } + } + } + + /* Send the sorted wifi_bands to PMFW */ + ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands); + /* Try to set the wifi_bands again */ + if (unlikely(ret == -EBUSY)) { + mdelay(5); + ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands); + } + + return ret; +} + +/** + * smu_wbrf_event_handler - handle notify events + * + * @nb: notifier block + * @action: event type + * @_arg: event data + * + * Calls relevant amdgpu function in response to wbrf event + * notification from kernel. + */ +static int smu_wbrf_event_handler(struct notifier_block *nb, + unsigned long action, void *_arg) +{ + struct smu_context *smu = container_of(nb, struct smu_context, wbrf_notifier); + + switch (action) { + case WBRF_CHANGED: + schedule_delayed_work(&smu->wbrf_delayed_work, + msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE)); + break; + default: + return NOTIFY_DONE; + } + + return NOTIFY_OK; +} + +/** + * smu_wbrf_delayed_work_handler - callback on delayed work timer expired + * + * @work: struct work_struct pointer + * + * Flood is over and driver will consume the latest exclusion ranges. + */ +static void smu_wbrf_delayed_work_handler(struct work_struct *work) +{ + struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work); + + smu_wbrf_handle_exclusion_ranges(smu); +} + +/** + * smu_wbrf_support_check - check wbrf support + * + * @smu: smu_context pointer + * + * Verifies the ACPI interface whether wbrf is supported. + */ +static void smu_wbrf_support_check(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + smu->wbrf_supported = smu_is_asic_wbrf_supported(smu) && amdgpu_wbrf && + acpi_amd_wbrf_supported_consumer(adev->dev); + + if (smu->wbrf_supported) + dev_info(adev->dev, "RF interference mitigation is supported\n"); +} + +/** + * smu_wbrf_init - init driver wbrf support + * + * @smu: smu_context pointer + * + * Verifies the AMD ACPI interfaces and registers with the wbrf + * notifier chain if wbrf feature is supported. + * Returns 0 on success, error on failure. + */ +static int smu_wbrf_init(struct smu_context *smu) +{ + int ret; + + if (!smu->wbrf_supported) + return 0; + + INIT_DELAYED_WORK(&smu->wbrf_delayed_work, smu_wbrf_delayed_work_handler); + + smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler; + ret = amd_wbrf_register_notifier(&smu->wbrf_notifier); + if (ret) + return ret; + + /* + * Some wifiband exclusion ranges may be already there + * before our driver loaded. To make sure our driver + * is awared of those exclusion ranges. + */ + schedule_delayed_work(&smu->wbrf_delayed_work, + msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE)); + + return 0; +} + +/** + * smu_wbrf_fini - tear down driver wbrf support + * + * @smu: smu_context pointer + * + * Unregisters with the wbrf notifier chain. + */ +static void smu_wbrf_fini(struct smu_context *smu) +{ + if (!smu->wbrf_supported) + return; + + amd_wbrf_unregister_notifier(&smu->wbrf_notifier); + + cancel_delayed_work_sync(&smu->wbrf_delayed_work); +} + static int smu_smc_hw_setup(struct smu_context *smu) { struct smu_feature *feature = &smu->smu_feature; @@ -1407,6 +1588,15 @@ static int smu_smc_hw_setup(struct smu_context *smu) if (ret) return ret; + /* Enable UclkShadow on wbrf supported */ + if (smu->wbrf_supported) { + ret = smu_enable_uclk_shadow(smu, true); + if (ret) { + dev_err(adev->dev, "Failed to enable UclkShadow feature to support wbrf!\n"); + return ret; + } + } + /* * With SCPM enabled, these actions(and relevant messages) are * not needed and permitted. @@ -1505,6 +1695,15 @@ static int smu_smc_hw_setup(struct smu_context *smu) */ ret = smu_set_min_dcef_deep_sleep(smu, smu->smu_table.boot_values.dcefclk / 100); + if (ret) { + dev_err(adev->dev, "Error setting min deepsleep dcefclk\n"); + return ret; + } + + /* Init wbrf support. Properly setup the notifier */ + ret = smu_wbrf_init(smu); + if (ret) + dev_err(adev->dev, "Error during wbrf init call\n"); return ret; } @@ -1560,6 +1759,13 @@ static int smu_hw_init(void *handle) return ret; } + /* + * Check whether wbrf is supported. This needs to be done + * before SMU setup starts since part of SMU configuration + * relies on this. + */ + smu_wbrf_support_check(smu); + if (smu->is_apu) { ret = smu_set_gfx_imu_enable(smu); if (ret) @@ -1726,6 +1932,8 @@ static int smu_smc_hw_cleanup(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; int ret = 0; + smu_wbrf_fini(smu); + cancel_work_sync(&smu->throttling_logging_work); cancel_work_sync(&smu->interrupt_work); @@ -2980,19 +3188,17 @@ static int smu_set_xgmi_pstate(void *handle, return ret; } -static int smu_get_baco_capability(void *handle, bool *cap) +static bool smu_get_baco_capability(void *handle) { struct smu_context *smu = handle; - *cap = false; - if (!smu->pm_enabled) - return 0; + return false; - if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support) - *cap = smu->ppt_funcs->baco_is_support(smu); + if (!smu->ppt_funcs || !smu->ppt_funcs->baco_is_support) + return false; - return 0; + return smu->ppt_funcs->baco_is_support(smu); } static int smu_baco_set_state(void *handle, int state) @@ -3166,6 +3372,20 @@ static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table) return smu->ppt_funcs->get_gpu_metrics(smu, table); } +static ssize_t smu_sys_get_pm_metrics(void *handle, void *pm_metrics, + size_t size) +{ + struct smu_context *smu = handle; + + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + return -EOPNOTSUPP; + + if (!smu->ppt_funcs->get_pm_metrics) + return -EOPNOTSUPP; + + return smu->ppt_funcs->get_pm_metrics(smu, pm_metrics, size); +} + static int smu_enable_mgpu_fan_boost(void *handle) { struct smu_context *smu = handle; @@ -3307,6 +3527,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = { .set_df_cstate = smu_set_df_cstate, .set_xgmi_pstate = smu_set_xgmi_pstate, .get_gpu_metrics = smu_sys_get_gpu_metrics, + .get_pm_metrics = smu_sys_get_pm_metrics, .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges, .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch, .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc, diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index e8329d157b..66e84defd0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -22,6 +22,9 @@ #ifndef __AMDGPU_SMU_H__ #define __AMDGPU_SMU_H__ +#include <linux/acpi_amd_wbrf.h> +#include <linux/units.h> + #include "amdgpu.h" #include "kgd_pp_interface.h" #include "dm_pp_interface.h" @@ -253,6 +256,7 @@ struct smu_table { uint64_t mc_address; void *cpu_addr; struct amdgpu_bo *bo; + uint32_t version; }; enum smu_perf_level_designation { @@ -317,6 +321,7 @@ enum smu_table_id { SMU_TABLE_PACE, SMU_TABLE_ECCINFO, SMU_TABLE_COMBO_PPTABLE, + SMU_TABLE_WIFIBAND, SMU_TABLE_COUNT, }; @@ -469,6 +474,12 @@ struct stb_context { #define WORKLOAD_POLICY_MAX 7 +/* + * Configure wbrf event handling pace as there can be only one + * event processed every SMU_WBRF_EVENT_HANDLING_PACE ms. + */ +#define SMU_WBRF_EVENT_HANDLING_PACE 10 + struct smu_context { struct amdgpu_device *adev; struct amdgpu_irq_src irq_source; @@ -568,6 +579,11 @@ struct smu_context { struct delayed_work swctf_delayed_work; enum pp_xgmi_plpd_mode plpd_mode; + + /* data structures for wbrf feature support */ + bool wbrf_supported; + struct notifier_block wbrf_notifier; + struct delayed_work wbrf_delayed_work; }; struct i2c_adapter; @@ -1252,6 +1268,15 @@ struct pptable_funcs { ssize_t (*get_gpu_metrics)(struct smu_context *smu, void **table); /** + * @get_pm_metrics: Get one snapshot of power management metrics from + * PMFW. + * + * Return: Size of the metrics sample + */ + ssize_t (*get_pm_metrics)(struct smu_context *smu, void *pm_metrics, + size_t size); + + /** * @enable_mgpu_fan_boost: Enable multi-GPU fan boost. */ int (*enable_mgpu_fan_boost)(struct smu_context *smu); @@ -1364,6 +1389,22 @@ struct pptable_funcs { * @notify_rlc_state: Notify RLC power state to SMU. */ int (*notify_rlc_state)(struct smu_context *smu, bool en); + + /** + * @is_asic_wbrf_supported: check whether PMFW supports the wbrf feature + */ + bool (*is_asic_wbrf_supported)(struct smu_context *smu); + + /** + * @enable_uclk_shadow: Enable the uclk shadow feature on wbrf supported + */ + int (*enable_uclk_shadow)(struct smu_context *smu, bool enable); + + /** + * @set_wbrf_exclusion_ranges: notify SMU the wifi bands occupied + */ + int (*set_wbrf_exclusion_ranges)(struct smu_context *smu, + struct freq_band_range *exclusion_ranges); }; typedef enum { @@ -1490,6 +1531,17 @@ enum smu_baco_seq { __dst_size); \ }) +typedef struct { + uint16_t LowFreq; + uint16_t HighFreq; +} WifiOneBand_t; + +typedef struct { + uint32_t WifiBandEntryNum; + WifiOneBand_t WifiBandEntry[11]; + uint32_t MmHubPadding[8]; +} WifiBandEntryTable_t; + #if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4) int smu_get_power_limit(void *handle, uint32_t *limit, diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h index 9dd1ed5b89..b114d14fc0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h @@ -1615,7 +1615,8 @@ typedef struct { #define TABLE_I2C_COMMANDS 9 #define TABLE_DRIVER_INFO 10 #define TABLE_ECCINFO 11 -#define TABLE_COUNT 12 +#define TABLE_WIFIBAND 12 +#define TABLE_COUNT 13 //IH Interupt ID #define IH_INTERRUPT_ID_TO_DRIVER 0xFE diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h index 62b7c0daff..8b1496f8ce 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h @@ -1605,7 +1605,8 @@ typedef struct { #define TABLE_I2C_COMMANDS 9 #define TABLE_DRIVER_INFO 10 #define TABLE_ECCINFO 11 -#define TABLE_COUNT 12 +#define TABLE_WIFIBAND 12 +#define TABLE_COUNT 13 //IH Interupt ID #define IH_INTERRUPT_ID_TO_DRIVER 0xFE diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h index 8f42771e1f..5bb7a63c06 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h @@ -24,11 +24,6 @@ #ifndef SMU14_DRIVER_IF_V14_0_0_H #define SMU14_DRIVER_IF_V14_0_0_H -// *** IMPORTANT *** -// SMU TEAM: Always increment the interface version if -// any structure is changed in this file -#define PMFW_DRIVER_IF_VERSION 7 - typedef struct { int32_t value; uint32_t numFractionalBits; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h index e2ee855c77..e862d323ca 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h @@ -138,10 +138,9 @@ #define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4A #define PPSMC_MSG_SetPriorityDeltaGain 0x4B #define PPSMC_MSG_AllowIHHostInterrupt 0x4C - #define PPSMC_MSG_DALNotPresent 0x4E - -#define PPSMC_Message_Count 0x4F +#define PPSMC_MSG_EnableUCLKShadow 0x51 +#define PPSMC_Message_Count 0x52 //Debug Dump Message #define DEBUGSMC_MSG_TestMessage 0x1 diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h index 6aaefca9b5..a6bf9cdd13 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h @@ -134,6 +134,7 @@ #define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4A #define PPSMC_MSG_SetPriorityDeltaGain 0x4B #define PPSMC_MSG_AllowIHHostInterrupt 0x4C -#define PPSMC_Message_Count 0x4D +#define PPSMC_MSG_EnableUCLKShadow 0x51 +#define PPSMC_Message_Count 0x52 #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h index 9dd47d9109..953a767613 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h @@ -259,7 +259,9 @@ __SMU_DUMMY_MAP(PowerUpUmsch), \ __SMU_DUMMY_MAP(PowerDownUmsch), \ __SMU_DUMMY_MAP(SetSoftMaxVpe), \ - __SMU_DUMMY_MAP(SetSoftMinVpe), + __SMU_DUMMY_MAP(SetSoftMinVpe), \ + __SMU_DUMMY_MAP(GetMetricsVersion), \ + __SMU_DUMMY_MAP(EnableUCLKShadow), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index 95cb919718..fbd57fa1a0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -210,15 +210,8 @@ int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu); int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks); -int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu, - enum smu_baco_seq baco_seq); - bool smu_v13_0_baco_is_support(struct smu_context *smu); -enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu); - -int smu_v13_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state); - int smu_v13_0_baco_enter(struct smu_context *smu); int smu_v13_0_baco_exit(struct smu_context *smu); @@ -301,5 +294,9 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu, int smu_v13_0_disable_pmfw_state(struct smu_context *smu); +int smu_v13_0_enable_uclk_shadow(struct smu_context *smu, bool enable); + +int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu, + struct freq_band_range *exclusion_ranges); #endif #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h index a5b569976f..3f7463c1c1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h @@ -26,8 +26,8 @@ #include "amdgpu_smu.h" #define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF +#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7 #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1 -#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x6 #define FEATURE_MASK(feature) (1ULL << feature) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index ac04ed1e49..40ba7227cc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -2411,8 +2411,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, .baco_is_support = smu_v11_0_baco_is_support, - .baco_get_state = smu_v11_0_baco_get_state, - .baco_set_state = smu_v11_0_baco_set_state, .baco_enter = smu_v11_0_baco_enter, .baco_exit = smu_v11_0_baco_exit, .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 0821018459..836b1df799 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -3539,8 +3539,6 @@ static const struct pptable_funcs navi10_ppt_funcs = { .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, .baco_is_support = smu_v11_0_baco_is_support, - .baco_get_state = smu_v11_0_baco_get_state, - .baco_set_state = smu_v11_0_baco_set_state, .baco_enter = navi10_baco_enter, .baco_exit = navi10_baco_exit, .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 6f37ca7a06..1f18b61884 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -4432,8 +4432,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, .baco_is_support = smu_v11_0_baco_is_support, - .baco_get_state = smu_v11_0_baco_get_state, - .baco_set_state = smu_v11_0_baco_set_state, .baco_enter = sienna_cichlid_baco_enter, .baco_exit = sienna_cichlid_baco_exit, .mode1_reset_is_support = sienna_cichlid_is_mode1_reset_supported, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index e3579a8fd7..ca0d5a5b20 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1530,7 +1530,6 @@ static int aldebaran_i2c_control_init(struct smu_context *smu) smu_i2c->port = 0; mutex_init(&smu_i2c->mutex); control->owner = THIS_MODULE; - control->class = I2C_CLASS_SPD; control->dev.parent = &adev->pdev->dev; control->algo = &aldebaran_i2c_algo; snprintf(control->name, sizeof(control->name), "AMDGPU SMU 0"); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 68981086b5..c486182ff2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -2201,7 +2201,7 @@ int smu_v13_0_gfx_ulv_control(struct smu_context *smu, return ret; } -int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu, +static int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_baco_seq baco_seq) { struct smu_baco_context *smu_baco = &smu->smu_baco; @@ -2223,33 +2223,14 @@ int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu, return 0; } -bool smu_v13_0_baco_is_support(struct smu_context *smu) -{ - struct smu_baco_context *smu_baco = &smu->smu_baco; - - if (amdgpu_sriov_vf(smu->adev) || - !smu_baco->platform_support) - return false; - - /* return true if ASIC is in BACO state already */ - if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) - return true; - - if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && - !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) - return false; - - return true; -} - -enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu) +static enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu) { struct smu_baco_context *smu_baco = &smu->smu_baco; return smu_baco->state; } -int smu_v13_0_baco_set_state(struct smu_context *smu, +static int smu_v13_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) { struct smu_baco_context *smu_baco = &smu->smu_baco; @@ -2283,24 +2264,60 @@ int smu_v13_0_baco_set_state(struct smu_context *smu, return ret; } -int smu_v13_0_baco_enter(struct smu_context *smu) +bool smu_v13_0_baco_is_support(struct smu_context *smu) { - int ret = 0; + struct smu_baco_context *smu_baco = &smu->smu_baco; - ret = smu_v13_0_baco_set_state(smu, - SMU_BACO_STATE_ENTER); - if (ret) - return ret; + if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support) + return false; + + /* return true if ASIC is in BACO state already */ + if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) + return true; - msleep(10); + if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && + !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) + return false; - return ret; + return true; +} + +int smu_v13_0_baco_enter(struct smu_context *smu) +{ + struct smu_baco_context *smu_baco = &smu->smu_baco; + struct amdgpu_device *adev = smu->adev; + int ret; + + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { + return smu_v13_0_baco_set_armd3_sequence(smu, + (smu_baco->maco_support && amdgpu_runtime_pm != 1) ? + BACO_SEQ_BAMACO : BACO_SEQ_BACO); + } else { + ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_ENTER); + if (!ret) + usleep_range(10000, 11000); + + return ret; + } } int smu_v13_0_baco_exit(struct smu_context *smu) { - return smu_v13_0_baco_set_state(smu, - SMU_BACO_STATE_EXIT); + struct amdgpu_device *adev = smu->adev; + int ret; + + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { + /* Wait for PMFW handling for the Dstate change */ + usleep_range(10000, 11000); + ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); + } else { + ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_EXIT); + } + + if (!ret) + adev->gfx.is_poweron = false; + + return ret; } int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu) @@ -2492,3 +2509,51 @@ int smu_v13_0_disable_pmfw_state(struct smu_context *smu) return ret == 0 ? 0 : -EINVAL; } + +int smu_v13_0_enable_uclk_shadow(struct smu_context *smu, bool enable) +{ + return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableUCLKShadow, enable, NULL); +} + +int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu, + struct freq_band_range *exclusion_ranges) +{ + WifiBandEntryTable_t wifi_bands; + int valid_entries = 0; + int ret, i; + + memset(&wifi_bands, 0, sizeof(wifi_bands)); + for (i = 0; i < ARRAY_SIZE(wifi_bands.WifiBandEntry); i++) { + if (!exclusion_ranges[i].start && !exclusion_ranges[i].end) + break; + + /* PMFW expects the inputs to be in Mhz unit */ + wifi_bands.WifiBandEntry[valid_entries].LowFreq = + DIV_ROUND_DOWN_ULL(exclusion_ranges[i].start, HZ_PER_MHZ); + wifi_bands.WifiBandEntry[valid_entries++].HighFreq = + DIV_ROUND_UP_ULL(exclusion_ranges[i].end, HZ_PER_MHZ); + } + wifi_bands.WifiBandEntryNum = valid_entries; + + /* + * Per confirm with PMFW team, WifiBandEntryNum = 0 + * is a valid setting. + * + * Considering the scenarios below: + * - At first the wifi device adds an exclusion range e.g. (2400,2500) to + * BIOS and our driver gets notified. We will set WifiBandEntryNum = 1 + * and pass the WifiBandEntry (2400, 2500) to PMFW. + * + * - Later the wifi device removes the wifiband list added above and + * our driver gets notified again. At this time, driver will set + * WifiBandEntryNum = 0 and pass an empty WifiBandEntry list to PMFW. + * + * - PMFW may still need to do some uclk shadow update(e.g. switching + * from shadow clock back to primary clock) on receiving this. + */ + ret = smu_cmn_update_table(smu, SMU_TABLE_WIFIBAND, 0, &wifi_bands, true); + if (ret) + dev_warn(smu->adev->dev, "Failed to set wifiband!"); + + return ret; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 9ac6c408d2..9c03296f92 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -169,6 +169,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0), MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0), MSG_MAP(DALNotPresent, PPSMC_MSG_DALNotPresent, 0), + MSG_MAP(EnableUCLKShadow, PPSMC_MSG_EnableUCLKShadow, 0), }; static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = { @@ -253,6 +254,7 @@ static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = { TAB_MAP(I2C_COMMANDS), TAB_MAP(ECCINFO), TAB_MAP(OVERDRIVE), + TAB_MAP(WIFIBAND), }; static struct cmn2asic_mapping smu_v13_0_0_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { @@ -498,6 +500,9 @@ static int smu_v13_0_0_tables_init(struct smu_context *smu) PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_WIFIBAND, + sizeof(WifiBandEntryTable_t), PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM); smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL); if (!smu_table->metrics_table) @@ -2544,16 +2549,19 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, workload_mask = 1 << workload_type; - /* Add optimizations for SMU13.0.0. Reuse the power saving profile */ - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE && - (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0)) && - ((smu->adev->pm.fw_version == 0x004e6601) || - (smu->adev->pm.fw_version >= 0x004e7400))) { - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - PP_SMC_POWER_PROFILE_POWERSAVING); - if (workload_type >= 0) - workload_mask |= 1 << workload_type; + /* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */ + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) { + if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) && + ((smu->adev->pm.fw_version == 0x004e6601) || + (smu->adev->pm.fw_version >= 0x004e7300))) || + (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && + smu->adev->pm.fw_version >= 0x00504500)) { + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + PP_SMC_POWER_PROFILE_POWERSAVING); + if (workload_type >= 0) + workload_mask |= 1 << workload_type; + } } return smu_cmn_send_smc_msg_with_param(smu, @@ -2562,38 +2570,6 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, NULL); } -static int smu_v13_0_0_baco_enter(struct smu_context *smu) -{ - struct smu_baco_context *smu_baco = &smu->smu_baco; - struct amdgpu_device *adev = smu->adev; - - if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) - return smu_v13_0_baco_set_armd3_sequence(smu, - (smu_baco->maco_support && amdgpu_runtime_pm != 1) ? - BACO_SEQ_BAMACO : BACO_SEQ_BACO); - else - return smu_v13_0_baco_enter(smu); -} - -static int smu_v13_0_0_baco_exit(struct smu_context *smu) -{ - struct amdgpu_device *adev = smu->adev; - int ret; - - if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { - /* Wait for PMFW handling for the Dstate change */ - usleep_range(10000, 11000); - ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); - } else { - ret = smu_v13_0_baco_exit(smu); - } - - if (!ret) - adev->gfx.is_poweron = false; - - return ret; -} - static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; @@ -2724,7 +2700,6 @@ static int smu_v13_0_0_i2c_control_init(struct smu_context *smu) smu_i2c->port = i; mutex_init(&smu_i2c->mutex); control->owner = THIS_MODULE; - control->class = I2C_CLASS_SPD; control->dev.parent = &adev->pdev->dev; control->algo = &smu_v13_0_0_i2c_algo; snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); @@ -2968,6 +2943,20 @@ static ssize_t smu_v13_0_0_get_ecc_info(struct smu_context *smu, return ret; } +static bool smu_v13_0_0_wbrf_support_check(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { + case IP_VERSION(13, 0, 0): + return smu->smc_fw_version >= 0x004e6300; + case IP_VERSION(13, 0, 10): + return smu->smc_fw_version >= 0x00503300; + default: + return false; + } +} + static int smu_v13_0_0_set_power_limit(struct smu_context *smu, enum smu_ppt_limit_type limit_type, uint32_t limit) @@ -3082,10 +3071,8 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .deep_sleep_control = smu_v13_0_deep_sleep_control, .gfx_ulv_control = smu_v13_0_gfx_ulv_control, .baco_is_support = smu_v13_0_baco_is_support, - .baco_get_state = smu_v13_0_baco_get_state, - .baco_set_state = smu_v13_0_baco_set_state, - .baco_enter = smu_v13_0_0_baco_enter, - .baco_exit = smu_v13_0_0_baco_exit, + .baco_enter = smu_v13_0_baco_enter, + .baco_exit = smu_v13_0_baco_exit, .mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported, .mode1_reset = smu_v13_0_0_mode1_reset, .mode2_reset = smu_v13_0_0_mode2_reset, @@ -3097,6 +3084,9 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .gpo_control = smu_v13_0_gpo_control, .get_ecc_info = smu_v13_0_0_get_ecc_info, .notify_display_change = smu_v13_0_notify_display_change, + .is_asic_wbrf_supported = smu_v13_0_0_wbrf_support_check, + .enable_uclk_shadow = smu_v13_0_enable_uclk_shadow, + .set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges, }; void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index bb98156b2f..949131bd1e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -226,8 +226,18 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en) struct amdgpu_device *adev = smu->adev; int ret = 0; - if (!en && !adev->in_s0ix) + if (!en && !adev->in_s0ix) { + /* Adds a GFX reset as workaround just before sending the + * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering + * an invalid state. + */ + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, + SMU_RESET_MODE_2, NULL); + if (ret) + return ret; + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL); + } return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 58f63b7a72..78491b04df 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -120,6 +120,7 @@ struct mca_ras_info { #define P2S_TABLE_ID_A 0x50325341 #define P2S_TABLE_ID_X 0x50325358 +// clang-format off static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), @@ -128,6 +129,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0), MSG_MAP(RequestI2cTransaction, PPSMC_MSG_RequestI2cTransaction, 0), MSG_MAP(GetMetricsTable, PPSMC_MSG_GetMetricsTable, 1), + MSG_MAP(GetMetricsVersion, PPSMC_MSG_GetMetricsVersion, 1), MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1), MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1), MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), @@ -158,8 +160,8 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0), MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxDpmFreq, 1), MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 1), - MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 0), - MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0), + MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 1), + MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1), MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0), MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0), MSG_MAP(GetThermalLimit, PPSMC_MSG_ReadThrottlerLimit, 0), @@ -171,6 +173,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0), }; +// clang-format on static const struct cmn2asic_mapping smu_v13_0_6_clk_map[SMU_CLK_COUNT] = { CLK_MAP(SOCCLK, PPCLK_SOCCLK), CLK_MAP(FCLK, PPCLK_FCLK), @@ -432,6 +435,41 @@ static int smu_v13_0_6_get_metrics_table(struct smu_context *smu, return 0; } +static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu, + void *metrics, size_t max_size) +{ + struct smu_table_context *smu_tbl_ctxt = &smu->smu_table; + uint32_t table_version = smu_tbl_ctxt->tables[SMU_TABLE_SMU_METRICS].version; + uint32_t table_size = smu_tbl_ctxt->tables[SMU_TABLE_SMU_METRICS].size; + struct amdgpu_pm_metrics *pm_metrics = metrics; + uint32_t pmfw_version; + int ret; + + if (!pm_metrics || !max_size) + return -EINVAL; + + if (max_size < (table_size + sizeof(pm_metrics->common_header))) + return -EOVERFLOW; + + /* Don't use cached metrics data */ + ret = smu_v13_0_6_get_metrics_table(smu, pm_metrics->data, true); + if (ret) + return ret; + + smu_cmn_get_smc_version(smu, NULL, &pmfw_version); + + memset(&pm_metrics->common_header, 0, + sizeof(pm_metrics->common_header)); + pm_metrics->common_header.mp1_ip_discovery_version = + IP_VERSION(13, 0, 6); + pm_metrics->common_header.pmfw_version = pmfw_version; + pm_metrics->common_header.pmmetrics_version = table_version; + pm_metrics->common_header.structure_size = + sizeof(pm_metrics->common_header) + table_size; + + return pm_metrics->common_header.structure_size; +} + static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; @@ -441,6 +479,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) (struct PPTable_t *)smu_table->driver_pptable; struct amdgpu_device *adev = smu->adev; int ret, i, retry = 100; + uint32_t table_version; /* Store one-time values in driver PPTable */ if (!pptable->Init) { @@ -459,6 +498,13 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) if (!retry) return -ETIME; + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsVersion, + &table_version); + if (ret) + return ret; + smu_table->tables[SMU_TABLE_SMU_METRICS].version = + table_version; + pptable->MaxSocketPowerLimit = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit)); pptable->MaxGfxclkFrequency = @@ -1479,7 +1525,6 @@ static int smu_v13_0_6_mca_set_debug_mode(struct smu_context *smu, bool enable) if (smu->smc_fw_version < 0x554800) return 0; - amdgpu_ras_set_mca_debug_mode(smu->adev, enable); return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ClearMcaOnRead, enable ? 0 : ClearMcaOnRead_UE_FLAG_MASK | ClearMcaOnRead_CE_POLL_MASK, NULL); @@ -1893,7 +1938,6 @@ static int smu_v13_0_6_i2c_control_init(struct smu_context *smu) smu_i2c->port = i; mutex_init(&smu_i2c->mutex); control->owner = THIS_MODULE; - control->class = I2C_CLASS_SPD; control->dev.parent = &adev->pdev->dev; control->algo = &smu_v13_0_6_i2c_algo; snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); @@ -2332,16 +2376,6 @@ static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu, return ret; } -static int smu_v13_0_6_post_init(struct smu_context *smu) -{ - struct amdgpu_device *adev = smu->adev; - - if (!amdgpu_sriov_vf(adev) && adev->ras_enabled) - return smu_v13_0_6_mca_set_debug_mode(smu, false); - - return 0; -} - static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) { struct smu_context *smu = adev->powerplay.pp_handle; @@ -2424,8 +2458,8 @@ static const struct mca_bank_ipid smu_v13_0_6_mca_ipid_table[AMDGPU_MCA_IP_COUNT static void mca_bank_entry_info_decode(struct mca_bank_entry *entry, struct mca_bank_info *info) { - uint64_t ipid = entry->regs[MCA_REG_IDX_IPID]; - uint32_t insthi; + u64 ipid = entry->regs[MCA_REG_IDX_IPID]; + u32 instidhi, instid; /* NOTE: All MCA IPID register share the same format, * so the driver can share the MCMP1 register header file. @@ -2434,9 +2468,15 @@ static void mca_bank_entry_info_decode(struct mca_bank_entry *entry, struct mca_ info->hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID); info->mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType); - insthi = REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi); - info->aid = ((insthi >> 2) & 0x03); - info->socket_id = insthi & 0x03; + /* + * Unfied DieID Format: SAASS. A:AID, S:Socket. + * Unfied DieID[4] = InstanceId[0] + * Unfied DieID[0:3] = InstanceIdHi[0:3] + */ + instidhi = REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi); + instid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo); + info->aid = ((instidhi >> 2) & 0x03); + info->socket_id = ((instid & 0x1) << 2) | (instidhi & 0x03); } static int mca_bank_read_reg(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, @@ -2515,9 +2555,9 @@ static int mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct return 0; } - if (type == AMDGPU_MCA_ERROR_TYPE_UE && umc_v12_0_is_uncorrectable_error(status0)) + if (type == AMDGPU_MCA_ERROR_TYPE_UE && umc_v12_0_is_uncorrectable_error(adev, status0)) *count = 1; - else if (type == AMDGPU_MCA_ERROR_TYPE_CE && umc_v12_0_is_correctable_error(status0)) + else if (type == AMDGPU_MCA_ERROR_TYPE_CE && umc_v12_0_is_correctable_error(adev, status0)) *count = 1; return 0; @@ -2528,13 +2568,15 @@ static int mca_pcs_xgmi_mca_get_err_count(const struct mca_ras_info *mca_ras, st uint32_t *count) { u32 ext_error_code; + u32 err_cnt; ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(entry->regs[MCA_REG_IDX_STATUS]); + err_cnt = MCA_REG__MISC0__ERRCNT(entry->regs[MCA_REG_IDX_MISC0]); if (type == AMDGPU_MCA_ERROR_TYPE_UE && ext_error_code == 0) - *count = 1; + *count = err_cnt; else if (type == AMDGPU_MCA_ERROR_TYPE_CE && ext_error_code == 6) - *count = 1; + *count = err_cnt; return 0; } @@ -2610,6 +2652,7 @@ static bool mca_gfx_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct uint32_t instlo; instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo); + instlo &= GENMASK(31, 1); switch (instlo) { case 0x36430400: /* SMNAID XCD 0 */ case 0x38430400: /* SMNAID XCD 1 */ @@ -2629,6 +2672,7 @@ static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amd uint32_t errcode, instlo; instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo); + instlo &= GENMASK(31, 1); if (instlo != 0x03b30400) return false; @@ -2851,6 +2895,13 @@ static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu, return ret; } +static ssize_t smu_v13_0_6_get_ecc_info(struct smu_context *smu, + void *table) +{ + /* Support ecc info by default */ + return 0; +} + static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { /* init dpm */ .get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask, @@ -2895,6 +2946,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .log_thermal_throttling_event = smu_v13_0_6_log_thermal_throttling_event, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .get_gpu_metrics = smu_v13_0_6_get_gpu_metrics, + .get_pm_metrics = smu_v13_0_6_get_pm_metrics, .get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range, .mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported, .mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported, @@ -2904,7 +2956,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .i2c_init = smu_v13_0_6_i2c_control_init, .i2c_fini = smu_v13_0_6_i2c_control_fini, .send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num, - .post_init = smu_v13_0_6_post_init, + .get_ecc_info = smu_v13_0_6_get_ecc_info, }; void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 402e0d184e..7318964f1f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -140,6 +140,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0), MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0), + MSG_MAP(EnableUCLKShadow, PPSMC_MSG_EnableUCLKShadow, 0), }; static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = { @@ -222,6 +223,7 @@ static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = { TAB_MAP(ACTIVITY_MONITOR_COEFF), [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE}, TAB_MAP(OVERDRIVE), + TAB_MAP(WIFIBAND), }; static struct cmn2asic_mapping smu_v13_0_7_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { @@ -512,6 +514,9 @@ static int smu_v13_0_7_tables_init(struct smu_context *smu) AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_WIFIBAND, + sizeof(WifiBandEntryTable_t), PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM); smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL); if (!smu_table->metrics_table) @@ -2513,38 +2518,6 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu, return ret; } -static int smu_v13_0_7_baco_enter(struct smu_context *smu) -{ - struct smu_baco_context *smu_baco = &smu->smu_baco; - struct amdgpu_device *adev = smu->adev; - - if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) - return smu_v13_0_baco_set_armd3_sequence(smu, - (smu_baco->maco_support && amdgpu_runtime_pm != 1) ? - BACO_SEQ_BAMACO : BACO_SEQ_BACO); - else - return smu_v13_0_baco_enter(smu); -} - -static int smu_v13_0_7_baco_exit(struct smu_context *smu) -{ - struct amdgpu_device *adev = smu->adev; - int ret; - - if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { - /* Wait for PMFW handling for the Dstate change */ - usleep_range(10000, 11000); - ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); - } else { - ret = smu_v13_0_baco_exit(smu); - } - - if (!ret) - adev->gfx.is_poweron = false; - - return ret; -} - static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; @@ -2565,6 +2538,11 @@ static int smu_v13_0_7_set_df_cstate(struct smu_context *smu, NULL); } +static bool smu_v13_0_7_wbrf_support_check(struct smu_context *smu) +{ + return smu->smc_fw_version > 0x00524600; +} + static int smu_v13_0_7_set_power_limit(struct smu_context *smu, enum smu_ppt_limit_type limit_type, uint32_t limit) @@ -2673,15 +2651,16 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, .baco_is_support = smu_v13_0_baco_is_support, - .baco_get_state = smu_v13_0_baco_get_state, - .baco_set_state = smu_v13_0_baco_set_state, - .baco_enter = smu_v13_0_7_baco_enter, - .baco_exit = smu_v13_0_7_baco_exit, + .baco_enter = smu_v13_0_baco_enter, + .baco_exit = smu_v13_0_baco_exit, .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported, .mode1_reset = smu_v13_0_mode1_reset, .set_mp1_state = smu_v13_0_7_set_mp1_state, .set_df_cstate = smu_v13_0_7_set_df_cstate, .gpo_control = smu_v13_0_gpo_control, + .is_asic_wbrf_supported = smu_v13_0_7_wbrf_support_check, + .enable_uclk_shadow = smu_v13_0_enable_uclk_shadow, + .set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges, }; void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 01f2ab4567..6dae5ad74f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -224,7 +224,7 @@ int smu_v14_0_check_fw_version(struct smu_context *smu) if (smu->is_apu) adev->pm.fw_version = smu_version; - switch (adev->ip_versions[MP1_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(14, 0, 2): smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2; break; @@ -233,7 +233,7 @@ int smu_v14_0_check_fw_version(struct smu_context *smu) break; default: dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n", - adev->ip_versions[MP1_HWIP][0]); + amdgpu_ip_version(adev, MP1_HWIP, 0)); smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_INV; break; } @@ -731,7 +731,7 @@ int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable) int ret = 0; struct amdgpu_device *adev = smu->adev; - switch (adev->ip_versions[MP1_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(14, 0, 2): case IP_VERSION(14, 0, 0): if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index 24a43374a7..9310c4758e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -1088,6 +1088,25 @@ static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu, 0, NULL); } +static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table) +{ + DpmClocks_t *clk_table = smu->smu_table.clocks_table; + uint8_t idx; + + /* Only the Clock information of SOC and VPE is copied to provide VPE DPM settings for use. */ + for (idx = 0; idx < NUM_SOCCLK_DPM_LEVELS; idx++) { + clock_table->SocClocks[idx].Freq = (idx < clk_table->NumSocClkLevelsEnabled) ? clk_table->SocClocks[idx]:0; + clock_table->SocClocks[idx].Vol = 0; + } + + for (idx = 0; idx < NUM_VPE_DPM_LEVELS; idx++) { + clock_table->VPEClocks[idx].Freq = (idx < clk_table->VpeClkLevelsEnabled) ? clk_table->VPEClocks[idx]:0; + clock_table->VPEClocks[idx].Vol = 0; + } + + return 0; +} + static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .check_fw_status = smu_v14_0_check_fw_status, .check_fw_version = smu_v14_0_check_fw_version, @@ -1118,6 +1137,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu, .dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable, .dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable, + .get_dpm_clock_table = smu_14_0_0_get_dpm_table, }; static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h index 64766ac69c..6f4d212607 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h @@ -98,6 +98,9 @@ #define smu_set_config_table(smu, config_table) smu_ppt_funcs(set_config_table, -EOPNOTSUPP, smu, config_table) #define smu_init_pptable_microcode(smu) smu_ppt_funcs(init_pptable_microcode, 0, smu) #define smu_notify_rlc_state(smu, en) smu_ppt_funcs(notify_rlc_state, 0, smu, en) +#define smu_is_asic_wbrf_supported(smu) smu_ppt_funcs(is_asic_wbrf_supported, false, smu) +#define smu_enable_uclk_shadow(smu, enable) smu_ppt_funcs(enable_uclk_shadow, 0, smu, enable) +#define smu_set_wbrf_exclusion_ranges(smu, freq_band_range) smu_ppt_funcs(set_wbrf_exclusion_ranges, -EOPNOTSUPP, smu, freq_band_range) #endif #endif |