From 9f0fc191371843c4fc000a226b0a26b6c059aacd Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 18 May 2024 19:40:19 +0200 Subject: Merging upstream version 6.7.7. Signed-off-by: Daniel Baumann --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 454 +++++++++-------- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 4 +- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c | 57 +-- .../drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 66 ++- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c | 1 + .../drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 129 ++--- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c | 5 +- .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 26 +- .../drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c | 561 +++++++++++---------- .../drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h | 2 +- .../drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c | 4 +- .../drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h | 2 +- drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c | 53 +- 13 files changed, 748 insertions(+), 616 deletions(-) (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 861b5e45e2..272c27495e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -65,7 +65,6 @@ #include "amdgpu_dm_debugfs.h" #endif #include "amdgpu_dm_psr.h" -#include "amdgpu_dm_replay.h" #include "ivsrcid/ivsrcid_vislands30.h" @@ -142,6 +141,9 @@ MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin" MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); +#define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB); + /* Number of bytes in PSP header for firmware. */ #define PSP_HEADER_BYTES 0x100 @@ -410,6 +412,7 @@ static void dm_pflip_high_irq(void *interrupt_params) struct amdgpu_crtc *amdgpu_crtc; struct common_irq_params *irq_params = interrupt_params; struct amdgpu_device *adev = irq_params->adev; + struct drm_device *dev = adev_to_drm(adev); unsigned long flags; struct drm_pending_vblank_event *e; u32 vpos, hpos, v_blank_start, v_blank_end; @@ -420,18 +423,17 @@ static void dm_pflip_high_irq(void *interrupt_params) /* IRQ could occur when in initial stage */ /* TODO work and BO cleanup */ if (amdgpu_crtc == NULL) { - DC_LOG_PFLIP("CRTC is null, returning.\n"); + drm_dbg_state(dev, "CRTC is null, returning.\n"); return; } spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { - DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n", - amdgpu_crtc->pflip_status, - AMDGPU_FLIP_SUBMITTED, - amdgpu_crtc->crtc_id, - amdgpu_crtc); + drm_dbg_state(dev, + "amdgpu_crtc->pflip_status = %d != AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n", + amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED, + amdgpu_crtc->crtc_id, amdgpu_crtc); spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); return; } @@ -497,9 +499,9 @@ static void dm_pflip_high_irq(void *interrupt_params) amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); - DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", - amdgpu_crtc->crtc_id, amdgpu_crtc, - vrr_active, (int) !e); + drm_dbg_state(dev, + "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", + amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e); } static void dm_vupdate_high_irq(void *interrupt_params) @@ -529,9 +531,9 @@ static void dm_vupdate_high_irq(void *interrupt_params) atomic64_set(&irq_params->previous_timestamp, vblank->time); } - DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n", - acrtc->crtc_id, - vrr_active); + drm_dbg_vbl(drm_dev, + "crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, + vrr_active); /* Core vblank handling is done here after end of front-porch in * vrr mode, as vblank timestamping will give valid results @@ -582,8 +584,9 @@ static void dm_crtc_high_irq(void *interrupt_params) vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); - DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, - vrr_active, acrtc->dm_irq_params.active_planes); + drm_dbg_vbl(adev_to_drm(adev), + "crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, + vrr_active, acrtc->dm_irq_params.active_planes); /** * Core vblank handling at start of front-porch is only possible @@ -1069,6 +1072,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) const struct firmware *dmub_fw = adev->dm.dmub_fw; struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; struct abm *abm = adev->dm.dc->res_pool->abm; + struct dc_context *ctx = adev->dm.dc->ctx; struct dmub_srv_hw_params hw_params; enum dmub_status status; const unsigned char *fw_inst_const, *fw_bss_data; @@ -1090,6 +1094,10 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) return -EINVAL; } + /* initialize register offsets for ASICs with runtime initialization available */ + if (dmub_srv->hw_funcs.init_reg_offsets) + dmub_srv->hw_funcs.init_reg_offsets(dmub_srv, ctx); + status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); if (status != DMUB_STATUS_OK) { DRM_ERROR("Error checking HW support for DMUB: %d\n", status); @@ -1165,9 +1173,10 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) for (i = 0; i < fb_info->num_fb; ++i) hw_params.fb[i] = &fb_info->fb[i]; - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 4): + case IP_VERSION(3, 5, 0): hw_params.dpia_supported = true; hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; break; @@ -1246,9 +1255,11 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_ agp_top = adev->gmc.agp_end >> 24; /* AGP aperture is disabled */ - if (agp_bot == agp_top) { + if (agp_bot > agp_top) { logical_addr_low = adev->gmc.fb_start >> 18; - if (adev->apu_flags & AMD_APU_IS_RAVEN2) + if (adev->apu_flags & (AMD_APU_IS_RAVEN2 | + AMD_APU_IS_RENOIR | + AMD_APU_IS_GREEN_SARDINE)) /* * Raven2 has a HW issue that it is unable to use the vram which * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the @@ -1260,7 +1271,9 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_ logical_addr_high = adev->gmc.fb_end >> 18; } else { logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; - if (adev->apu_flags & AMD_APU_IS_RAVEN2) + if (adev->apu_flags & (AMD_APU_IS_RAVEN2 | + AMD_APU_IS_RENOIR | + AMD_APU_IS_GREEN_SARDINE)) /* * Raven2 has a HW issue that it is unable to use the vram which * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the @@ -1597,7 +1610,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(2, 1, 0): switch (adev->dm.dmcub_fw_version) { case 0: /* development */ @@ -1616,40 +1629,23 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) break; } - switch (adev->asic_type) { - case CHIP_CARRIZO: - case CHIP_STONEY: - init_data.flags.gpu_vm_support = true; - break; - default: - switch (adev->ip_versions[DCE_HWIP][0]) { - case IP_VERSION(1, 0, 0): - case IP_VERSION(1, 0, 1): - /* enable S/G on PCO and RV2 */ - if ((adev->apu_flags & AMD_APU_IS_RAVEN2) || - (adev->apu_flags & AMD_APU_IS_PICASSO)) - init_data.flags.gpu_vm_support = true; - break; - case IP_VERSION(2, 1, 0): - case IP_VERSION(3, 0, 1): - case IP_VERSION(3, 1, 2): - case IP_VERSION(3, 1, 3): - case IP_VERSION(3, 1, 4): - case IP_VERSION(3, 1, 5): - case IP_VERSION(3, 1, 6): - init_data.flags.gpu_vm_support = true; - break; - default: - break; - } - break; - } - if (init_data.flags.gpu_vm_support && - (amdgpu_sg_display == 0)) + /* APU support S/G display by default except: + * ASICs before Carrizo, + * RAVEN1 (Users reported stability issue) + */ + + if (adev->asic_type < CHIP_CARRIZO) { init_data.flags.gpu_vm_support = false; + } else if (adev->asic_type == CHIP_RAVEN) { + if (adev->apu_flags & AMD_APU_IS_RAVEN) + init_data.flags.gpu_vm_support = false; + else + init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0); + } else { + init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU); + } - if (init_data.flags.gpu_vm_support) - adev->mode_info.gpu_vm_support = true; + adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support; if (amdgpu_dc_feature_mask & DC_FBC_MASK) init_data.flags.fbc_support = true; @@ -1670,7 +1666,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.flags.seamless_boot_edp_requested = false; - if (check_seamless_boot_capability(adev)) { + if (amdgpu_device_seamless_boot_supported(adev)) { init_data.flags.seamless_boot_edp_requested = true; init_data.flags.allow_seamless_boot_optimization = true; DRM_INFO("Seamless boot condition check passed\n"); @@ -1680,6 +1676,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0]; init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0]; + init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0]; INIT_LIST_HEAD(&adev->dm.da_list); @@ -1813,21 +1810,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) DRM_ERROR("amdgpu: fail to register dmub aux callback"); goto error; } - if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) { - DRM_ERROR("amdgpu: fail to register dmub hpd callback"); - goto error; - } - if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) { - DRM_ERROR("amdgpu: fail to register dmub hpd callback"); - goto error; - } - } - - /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. - * It is expected that DMUB will resend any pending notifications at this point, for - * example HPD from DPIA. - */ - if (dc_is_dmub_outbox_supported(adev->dm.dc)) { + /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. + * It is expected that DMUB will resend any pending notifications at this point. Note + * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to + * align legacy interface initialization sequence. Connection status will be proactivly + * detected once in the amdgpu_dm_initialize_drm_device. + */ dc_enable_dmub_outbox(adev->dm.dc); /* DPIA trace goes to dmesg logs only if outbox is enabled */ @@ -2003,7 +1991,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev) return 0; break; default: - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(2, 0, 2): case IP_VERSION(2, 0, 3): case IP_VERSION(2, 0, 0): @@ -2019,6 +2007,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev) case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): + case IP_VERSION(3, 5, 0): return 0; default: break; @@ -2092,7 +2081,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) enum dmub_status status; int r; - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(2, 1, 0): dmub_asic = DMUB_ASIC_DCN21; break; @@ -2127,6 +2116,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) case IP_VERSION(3, 2, 1): dmub_asic = DMUB_ASIC_DCN321; break; + case IP_VERSION(3, 5, 0): + dmub_asic = DMUB_ASIC_DCN35; + break; default: /* ASIC doesn't support DMUB. */ return 0; @@ -2253,6 +2245,7 @@ static int dm_sw_fini(void *handle) if (adev->dm.dmub_srv) { dmub_srv_destroy(adev->dm.dmub_srv); + kfree(adev->dm.dmub_srv); adev->dm.dmub_srv = NULL; } @@ -2462,7 +2455,7 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) * therefore, this function apply to navi10/12/14 but not Renoir * * */ - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(2, 0, 2): case IP_VERSION(2, 0, 0): break; @@ -2648,6 +2641,8 @@ static int dm_suspend(void *handle) WARN_ON(adev->dm.cached_state); adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); + if (IS_ERR(adev->dm.cached_state)) + return PTR_ERR(adev->dm.cached_state); s3_handle_mst(adev_to_drm(adev), true); @@ -2656,11 +2651,12 @@ static int dm_suspend(void *handle) hpd_rx_irq_work_suspend(dm); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); + dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3); return 0; } -struct amdgpu_dm_connector * +struct drm_connector * amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, struct drm_crtc *crtc) { @@ -2673,7 +2669,7 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, crtc_from_state = new_con_state->crtc; if (crtc_from_state == crtc) - return to_amdgpu_dm_connector(connector); + return connector; } return NULL; @@ -2685,6 +2681,7 @@ static void emulated_link_detect(struct dc_link *link) struct display_sink_capability sink_caps = { 0 }; enum dc_edid_status edid_status; struct dc_context *dc_ctx = link->ctx; + struct drm_device *dev = adev_to_drm(dc_ctx->driver_context); struct dc_sink *sink = NULL; struct dc_sink *prev_sink = NULL; @@ -2734,7 +2731,7 @@ static void emulated_link_detect(struct dc_link *link) } default: - DC_ERROR("Invalid connector type! signal:%d\n", + drm_err(dev, "Invalid connector type! signal:%d\n", link->connector_signal); return; } @@ -2744,7 +2741,7 @@ static void emulated_link_detect(struct dc_link *link) sink = dc_sink_create(&sink_init_data); if (!sink) { - DC_ERROR("Failed to create sink!\n"); + drm_err(dev, "Failed to create sink!\n"); return; } @@ -2757,7 +2754,7 @@ static void emulated_link_detect(struct dc_link *link) sink); if (edid_status != EDID_OK) - DC_ERROR("Failed to read EDID"); + drm_err(dev, "Failed to read EDID\n"); } @@ -2776,7 +2773,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state, bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); if (!bundle) { - dm_error("Failed to allocate update bundle\n"); + drm_err(dm->ddev, "Failed to allocate update bundle\n"); goto cleanup; } @@ -2822,6 +2819,10 @@ static int dm_resume(void *handle) int i, r, j, ret; bool need_hotplug = false; + if (dm->dc->caps.ips_support) { + dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false); + } + if (amdgpu_in_reset(adev)) { dc_state = dm->cached_dc_state; @@ -2846,7 +2847,9 @@ static int dm_resume(void *handle) if (r) DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); + dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); + dc_resume(dm->dc); amdgpu_dm_irq_resume_early(adev); @@ -2895,6 +2898,7 @@ static int dm_resume(void *handle) } /* power on hardware */ + dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); /* program HPD filter */ @@ -3224,7 +3228,8 @@ void amdgpu_dm_update_connector_after_detect( aconnector->timing_requested = kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL); if (!aconnector->timing_requested) - dm_error("failed to create aconnector->requested_timing\n"); + drm_err(dev, + "failed to create aconnector->requested_timing\n"); } drm_connector_update_edid_property(connector, aconnector->edid); @@ -3481,6 +3486,14 @@ static void register_hpd_handlers(struct amdgpu_device *adev) int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; + if (dc_is_dmub_outbox_supported(adev->dm.dc)) { + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) + DRM_ERROR("amdgpu: fail to register dmub hpd callback"); + + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) + DRM_ERROR("amdgpu: fail to register dmub hpd callback"); + } + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { @@ -3506,10 +3519,6 @@ static void register_hpd_handlers(struct amdgpu_device *adev) handle_hpd_rx_irq, (void *) aconnector); } - - if (adev->dm.hpd_rx_offload_wq) - adev->dm.hpd_rx_offload_wq[connector->index].aconnector = - aconnector; } } @@ -4338,7 +4347,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) enum dc_connection_type new_connection_type = dc_connection_none; const struct dc_plane_cap *plane; bool psr_feature_enabled = false; - bool replay_feature_enabled = false; int max_overlay = dm->dc->caps.max_slave_planes; dm->display_indexes_num = dm->dc->caps.max_streams; @@ -4410,7 +4418,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } /* Use Outbox interrupt */ - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(3, 0, 0): case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): @@ -4420,6 +4428,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): case IP_VERSION(2, 1, 0): + case IP_VERSION(3, 5, 0): if (register_outbox_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); goto fail; @@ -4427,12 +4436,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) break; default: DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n", - adev->ip_versions[DCE_HWIP][0]); + amdgpu_ip_version(adev, DCE_HWIP, 0)); } /* Determine whether to enable PSR support by default. */ if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) { - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 4): @@ -4440,6 +4449,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): + case IP_VERSION(3, 5, 0): psr_feature_enabled = true; break; default: @@ -4448,20 +4458,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } } - if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) { - switch (adev->ip_versions[DCE_HWIP][0]) { - case IP_VERSION(3, 1, 4): - case IP_VERSION(3, 1, 5): - case IP_VERSION(3, 1, 6): - case IP_VERSION(3, 2, 0): - case IP_VERSION(3, 2, 1): - replay_feature_enabled = true; - break; - default: - replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK; - break; - } - } /* loops over all connectors on the board */ for (i = 0; i < link_cnt; i++) { struct dc_link *link = NULL; @@ -4493,6 +4489,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) link = dc_get_link_at_index(dm->dc, i); + if (dm->hpd_rx_offload_wq) + dm->hpd_rx_offload_wq[aconnector->base.index].aconnector = + aconnector; + if (!dc_link_detect_connection_type(link, &new_connection_type)) DRM_ERROR("KMS: Failed to detect connector\n"); @@ -4510,12 +4510,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) amdgpu_dm_update_connector_after_detect(aconnector); setup_backlight_device(dm, aconnector); - /* - * Disable psr if replay can be enabled - */ - if (replay_feature_enabled && amdgpu_dm_setup_replay(link, aconnector)) - psr_feature_enabled = false; - if (psr_feature_enabled) amdgpu_dm_set_psr_caps(link); @@ -4564,7 +4558,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } break; default: - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(1, 0, 0): case IP_VERSION(1, 0, 1): case IP_VERSION(2, 0, 2): @@ -4582,6 +4576,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): + case IP_VERSION(3, 5, 0): if (dcn10_register_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); goto fail; @@ -4589,7 +4584,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) break; default: DRM_ERROR("Unsupported DCE IP versions: 0x%X\n", - adev->ip_versions[DCE_HWIP][0]); + amdgpu_ip_version(adev, DCE_HWIP, 0)); goto fail; } break; @@ -4672,14 +4667,14 @@ static int dm_init_microcode(struct amdgpu_device *adev) char *fw_name_dmub; int r; - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(2, 1, 0): fw_name_dmub = FIRMWARE_RENOIR_DMUB; if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; break; case IP_VERSION(3, 0, 0): - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0)) fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; else fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; @@ -4712,13 +4707,14 @@ static int dm_init_microcode(struct amdgpu_device *adev) case IP_VERSION(3, 2, 1): fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; break; + case IP_VERSION(3, 5, 0): + fw_name_dmub = FIRMWARE_DCN_35_DMUB; + break; default: /* ASIC doesn't support DMUB. */ return 0; } r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub); - if (r) - DRM_ERROR("DMUB firmware loading failed: %d\n", r); return r; } @@ -4806,7 +4802,7 @@ static int dm_early_init(void *handle) break; default: - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(2, 0, 2): case IP_VERSION(3, 0, 0): adev->mode_info.num_crtc = 6; @@ -4836,13 +4832,14 @@ static int dm_early_init(void *handle) case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): + case IP_VERSION(3, 5, 0): adev->mode_info.num_crtc = 4; adev->mode_info.num_hpd = 4; adev->mode_info.num_dig = 4; break; default: DRM_ERROR("Unsupported DCE IP versions: 0x%x\n", - adev->ip_versions[DCE_HWIP][0]); + amdgpu_ip_version(adev, DCE_HWIP, 0)); return -EINVAL; } break; @@ -5434,6 +5431,24 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing, return color_space; } +static enum display_content_type +get_output_content_type(const struct drm_connector_state *connector_state) +{ + switch (connector_state->content_type) { + default: + case DRM_MODE_CONTENT_TYPE_NO_DATA: + return DISPLAY_CONTENT_TYPE_NO_DATA; + case DRM_MODE_CONTENT_TYPE_GRAPHICS: + return DISPLAY_CONTENT_TYPE_GRAPHICS; + case DRM_MODE_CONTENT_TYPE_PHOTO: + return DISPLAY_CONTENT_TYPE_PHOTO; + case DRM_MODE_CONTENT_TYPE_CINEMA: + return DISPLAY_CONTENT_TYPE_CINEMA; + case DRM_MODE_CONTENT_TYPE_GAME: + return DISPLAY_CONTENT_TYPE_GAME; + } +} + static bool adjust_colour_depth_from_display_info( struct dc_crtc_timing *timing_out, const struct drm_display_info *info) @@ -5497,6 +5512,7 @@ static void fill_stream_properties_from_drm_display_mode( && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; else if (drm_mode_is_420_also(info, mode_in) + && aconnector && aconnector->force_yuv420_output) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444) @@ -5532,7 +5548,7 @@ static void fill_stream_properties_from_drm_display_mode( timing_out->hdmi_vic = hv_frame.vic; } - if (is_freesync_video_mode(mode_in, aconnector)) { + if (aconnector && is_freesync_video_mode(mode_in, aconnector)) { timing_out->h_addressable = mode_in->hdisplay; timing_out->h_total = mode_in->htotal; timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start; @@ -5568,6 +5584,7 @@ static void fill_stream_properties_from_drm_display_mode( } stream->output_color_space = get_output_color_space(timing_out, connector_state); + stream->content_type = get_output_content_type(connector_state); } static void fill_audio_info(struct audio_info *audio_info, @@ -6008,14 +6025,14 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, } static struct dc_stream_state * -create_stream_for_sink(struct amdgpu_dm_connector *aconnector, +create_stream_for_sink(struct drm_connector *connector, const struct drm_display_mode *drm_mode, const struct dm_connector_state *dm_state, const struct dc_stream_state *old_stream, int requested_bpc) { + struct amdgpu_dm_connector *aconnector = NULL; struct drm_display_mode *preferred_mode = NULL; - struct drm_connector *drm_connector; const struct drm_connector_state *con_state = &dm_state->base; struct dc_stream_state *stream = NULL; struct drm_display_mode mode; @@ -6034,20 +6051,22 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, drm_mode_init(&mode, drm_mode); memset(&saved_mode, 0, sizeof(saved_mode)); - if (aconnector == NULL) { - DRM_ERROR("aconnector is NULL!\n"); + if (connector == NULL) { + DRM_ERROR("connector is NULL!\n"); return stream; } - drm_connector = &aconnector->base; - - if (!aconnector->dc_sink) { - sink = create_fake_sink(aconnector); - if (!sink) - return stream; - } else { - sink = aconnector->dc_sink; - dc_sink_retain(sink); + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) { + aconnector = NULL; + aconnector = to_amdgpu_dm_connector(connector); + if (!aconnector->dc_sink) { + sink = create_fake_sink(aconnector); + if (!sink) + return stream; + } else { + sink = aconnector->dc_sink; + dc_sink_retain(sink); + } } stream = dc_create_stream_for_sink(sink); @@ -6057,12 +6076,13 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, goto finish; } + /* We leave this NULL for writeback connectors */ stream->dm_stream_context = aconnector; stream->timing.flags.LTE_340MCSC_SCRAMBLE = - drm_connector->display_info.hdmi.scdc.scrambling.low_rates; + connector->display_info.hdmi.scdc.scrambling.low_rates; - list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { + list_for_each_entry(preferred_mode, &connector->modes, head) { /* Search for preferred mode */ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { native_mode_found = true; @@ -6071,7 +6091,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, } if (!native_mode_found) preferred_mode = list_first_entry_or_null( - &aconnector->base.modes, + &connector->modes, struct drm_display_mode, head); @@ -6085,12 +6105,14 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, * and the modelist may not be filled in time. */ DRM_DEBUG_DRIVER("No preferred mode found\n"); - } else { + } else if (aconnector) { recalculate_timing = is_freesync_video_mode(&mode, aconnector); if (recalculate_timing) { freesync_mode = get_highest_refresh_rate_mode(aconnector, false); drm_mode_copy(&saved_mode, &mode); + saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio; drm_mode_copy(&mode, freesync_mode); + mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio; } else { decide_crtc_timing_for_drm_display_mode( &mode, preferred_mode, scale); @@ -6108,18 +6130,22 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, */ if (!scale || mode_refresh != preferred_refresh) fill_stream_properties_from_drm_display_mode( - stream, &mode, &aconnector->base, con_state, NULL, + stream, &mode, connector, con_state, NULL, requested_bpc); else fill_stream_properties_from_drm_display_mode( - stream, &mode, &aconnector->base, con_state, old_stream, + stream, &mode, connector, con_state, old_stream, requested_bpc); + /* The rest isn't needed for writeback connectors */ + if (!aconnector) + goto finish; + if (aconnector->timing_changed) { - DC_LOG_DEBUG("%s: overriding timing for automated test, bpc %d, changing to %d\n", - __func__, - stream->timing.display_color_depth, - aconnector->timing_requested->display_color_depth); + drm_dbg(aconnector->base.dev, + "overriding timing for automated test, bpc %d, changing to %d\n", + stream->timing.display_color_depth, + aconnector->timing_requested->display_color_depth); stream->timing = *aconnector->timing_requested; } @@ -6132,7 +6158,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, fill_audio_info( &stream->audio_info, - drm_connector, + connector, sink); update_stream_signal(stream, sink); @@ -6422,12 +6448,25 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) struct dc_link *dc_link = aconnector->dc_link; struct dc_sink *dc_em_sink = aconnector->dc_em_sink; struct edid *edid; + struct i2c_adapter *ddc; - if (!connector->edid_override) + if (dc_link->aux_mode) + ddc = &aconnector->dm_dp_aux.aux.ddc; + else + ddc = &aconnector->i2c->base; + + /* + * Note: drm_get_edid gets edid in the following order: + * 1) override EDID if set via edid_override debugfs, + * 2) firmware EDID if set via edid_firmware module parameter + * 3) regular DDC read. + */ + edid = drm_get_edid(connector, ddc); + if (!edid) { + DRM_ERROR("No EDID found on connector: %s.\n", connector->name); return; + } - drm_edid_override_connector_update(&aconnector->base); - edid = aconnector->base.edid_blob_ptr->data; aconnector->edid = edid; /* Update emulated (virtual) sink's EDID */ @@ -6462,29 +6501,34 @@ static int get_modes(struct drm_connector *connector) static void create_eml_sink(struct amdgpu_dm_connector *aconnector) { + struct drm_connector *connector = &aconnector->base; + struct dc_link *dc_link = aconnector->dc_link; struct dc_sink_init_data init_params = { .link = aconnector->dc_link, .sink_signal = SIGNAL_TYPE_VIRTUAL }; struct edid *edid; + struct i2c_adapter *ddc; - if (!aconnector->base.edid_blob_ptr) { - /* if connector->edid_override valid, pass - * it to edid_override to edid_blob_ptr - */ - - drm_edid_override_connector_update(&aconnector->base); - - if (!aconnector->base.edid_blob_ptr) { - DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n", - aconnector->base.name); + if (dc_link->aux_mode) + ddc = &aconnector->dm_dp_aux.aux.ddc; + else + ddc = &aconnector->i2c->base; - aconnector->base.force = DRM_FORCE_OFF; - return; - } + /* + * Note: drm_get_edid gets edid in the following order: + * 1) override EDID if set via edid_override debugfs, + * 2) firmware EDID if set via edid_firmware module parameter + * 3) regular DDC read. + */ + edid = drm_get_edid(connector, ddc); + if (!edid) { + DRM_ERROR("No EDID found on connector: %s.\n", connector->name); + return; } - edid = (struct edid *) aconnector->base.edid_blob_ptr->data; + if (drm_detect_hdmi_monitor(edid)) + init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A; aconnector->edid = edid; @@ -6595,7 +6639,7 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, enum dc_status dc_result = DC_OK; do { - stream = create_stream_for_sink(aconnector, drm_mode, + stream = create_stream_for_sink(connector, drm_mode, dm_state, old_stream, requested_bpc); if (stream == NULL) { @@ -6603,6 +6647,9 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, break; } + if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + return stream; + dc_result = dc_validate_stream(adev->dm.dc, stream); if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream); @@ -6772,6 +6819,14 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn, new_crtc_state->mode_changed = true; } + if (new_con_state->content_type != old_con_state->content_type) { + new_crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(new_crtc_state)) + return PTR_ERR(new_crtc_state); + + new_crtc_state->mode_changed = true; + } + if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) { struct dc_info_packet hdr_infopacket; @@ -7406,6 +7461,11 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, adev->mode_info.abm_level_property, 0); } + if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { + /* Content Type is currently only implemented for HDMI. */ + drm_connector_attach_content_type_property(&aconnector->base); + } + if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces)) drm_connector_attach_colorspace_property(&aconnector->base); @@ -7832,8 +7892,9 @@ static void prepare_flip_isr(struct amdgpu_crtc *acrtc) /* Mark this event as consumed */ acrtc->base.state->event = NULL; - DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n", - acrtc->crtc_id); + drm_dbg_state(acrtc->base.dev, + "crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n", + acrtc->crtc_id); } static void update_freesync_state_on_stream( @@ -8076,7 +8137,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); if (!bundle) { - dm_error("Failed to allocate update bundle\n"); + drm_err(dev, "Failed to allocate update bundle\n"); goto cleanup; } @@ -8664,7 +8725,9 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, status = dc_stream_get_status_from_state(dc_state, dm_new_crtc_state->stream); if (!status) - DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc); + drm_err(dev, + "got no status for stream %p on acrtc%p\n", + dm_new_crtc_state->stream, acrtc); else acrtc->otg_inst = status->primary_otg_inst; } @@ -8698,6 +8761,17 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) trace_amdgpu_dm_atomic_commit_tail_begin(state); + if (dm->dc->caps.ips_support) { + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + if (new_con_state->crtc && + new_con_state->crtc->state->active && + drm_atomic_crtc_needs_modeset(new_con_state->crtc->state)) { + dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false); + break; + } + } + } + drm_atomic_helper_update_legacy_modeset_state(dev, state); drm_dp_mst_atomic_wait_for_dependencies(state); @@ -9289,6 +9363,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, * update changed items */ struct amdgpu_crtc *acrtc = NULL; + struct drm_connector *connector = NULL; struct amdgpu_dm_connector *aconnector = NULL; struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; @@ -9298,15 +9373,17 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); acrtc = to_amdgpu_crtc(crtc); - aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); + connector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); + if (connector) + aconnector = to_amdgpu_dm_connector(connector); /* TODO This hack should go away */ - if (aconnector && enable) { + if (connector && enable) { /* Make sure fake sink is created in plug-in scenario */ drm_new_conn_state = drm_atomic_get_new_connector_state(state, - &aconnector->base); + connector); drm_old_conn_state = drm_atomic_get_old_connector_state(state, - &aconnector->base); + connector); if (IS_ERR(drm_new_conn_state)) { ret = PTR_ERR_OR_ZERO(drm_new_conn_state); @@ -9453,7 +9530,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, * added MST connectors not found in existing crtc_state in the chained mode * TODO: need to dig out the root cause of that */ - if (!aconnector) + if (!connector) goto skip_modeset; if (modereset_required(new_crtc_state)) @@ -9496,7 +9573,7 @@ skip_modeset: * We want to do dc stream updates that do not require a * full modeset below. */ - if (!(enable && aconnector && new_crtc_state->active)) + if (!(enable && connector && new_crtc_state->active)) return 0; /* * Given above conditions, the dc state cannot be NULL because: @@ -9825,7 +9902,7 @@ static int dm_update_plane_state(struct dc *dc, /* Block top most plane from being a video plane */ if (plane->type == DRM_PLANE_TYPE_OVERLAY) { - if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay) + if (amdgpu_dm_plane_is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay) return -EINVAL; *is_top_most_overlay = false; @@ -10376,11 +10453,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; } - ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); - if (ret) { - DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); - ret = -EINVAL; - goto fail; + if (dc_resource_is_dsc_encoding_supported(dc)) { + ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); + if (ret) { + DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); + ret = -EINVAL; + goto fail; + } } ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); @@ -10538,7 +10617,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, input->cea_total_length = total_length; memcpy(input->payload, data, length); - res = dm_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); + res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); if (!res) { DRM_ERROR("EDID CEA parser failed\n"); return false; @@ -10887,7 +10966,8 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, { #ifdef DM_CHECK_ADDR_0 if (address == 0) { - DC_ERR("invalid register write. address = 0"); + drm_err(adev_to_drm(ctx->driver_context), + "invalid register write. address = 0"); return; } #endif @@ -10901,7 +10981,8 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, u32 value; #ifdef DM_CHECK_ADDR_0 if (address == 0) { - DC_ERR("invalid register read; address = 0\n"); + drm_err(adev_to_drm(ctx->driver_context), + "invalid register read; address = 0\n"); return 0; } #endif @@ -11011,27 +11092,6 @@ int amdgpu_dm_process_dmub_set_config_sync( return ret; } -/* - * Check whether seamless boot is supported. - * - * So far we only support seamless boot on CHIP_VANGOGH. - * If everything goes well, we may consider expanding - * seamless boot to other ASICs. - */ -bool check_seamless_boot_capability(struct amdgpu_device *adev) -{ - switch (adev->ip_versions[DCE_HWIP][0]) { - case IP_VERSION(3, 0, 1): - if (!adev->mman.keep_stolen_vga_memory) - return true; - break; - default: - break; - } - - return false; -} - bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) { return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 9e4cc5eeda..3710f4d0f2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -825,8 +825,6 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned in int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int link_index, struct set_config_cmd_payload *payload, enum set_config_status *operation_result); -bool check_seamless_boot_capability(struct amdgpu_device *adev); - struct dc_stream_state * create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, const struct drm_display_mode *drm_mode, @@ -836,7 +834,7 @@ struct dc_stream_state * int dm_atomic_get_state(struct drm_atomic_state *state, struct dm_atomic_state **dm_state); -struct amdgpu_dm_connector * +struct drm_connector * amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 97b7a0b8a1..d2834ad85a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -29,7 +29,6 @@ #include "dc.h" #include "amdgpu.h" #include "amdgpu_dm_psr.h" -#include "amdgpu_dm_replay.h" #include "amdgpu_dm_crtc.h" #include "amdgpu_dm_plane.h" #include "amdgpu_dm_trace.h" @@ -96,7 +95,7 @@ bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state) dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; } -static void vblank_control_worker(struct work_struct *work) +static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) { struct vblank_control_work *vblank_work = container_of(work, struct vblank_control_work, work); @@ -124,12 +123,7 @@ static void vblank_control_worker(struct work_struct *work) * fill_dc_dirty_rects(). */ if (vblank_work->stream && vblank_work->stream->link) { - /* - * Prioritize replay, instead of psr - */ - if (vblank_work->stream->link->replay_settings.replay_feature_enabled) - amdgpu_dm_replay_enable(vblank_work->stream, false); - else if (vblank_work->enable) { + if (vblank_work->enable) { if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && vblank_work->stream->link->psr_settings.psr_allow_active) amdgpu_dm_psr_disable(vblank_work->stream); @@ -138,7 +132,6 @@ static void vblank_control_worker(struct work_struct *work) #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY !amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base) && #endif - vblank_work->stream->link->panel_config.psr.disallow_replay && vblank_work->acrtc->dm_irq_params.allow_psr_entry) { amdgpu_dm_psr_enable(vblank_work->stream); } @@ -151,7 +144,7 @@ static void vblank_control_worker(struct work_struct *work) kfree(vblank_work); } -static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) +static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable) { struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); struct amdgpu_device *adev = drm_to_adev(crtc->dev); @@ -191,7 +184,7 @@ skip: if (!work) return -ENOMEM; - INIT_WORK(&work->work, vblank_control_worker); + INIT_WORK(&work->work, amdgpu_dm_crtc_vblank_control_worker); work->dm = dm; work->acrtc = acrtc; work->enable = enable; @@ -209,15 +202,15 @@ skip: int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc) { - return dm_set_vblank(crtc, true); + return amdgpu_dm_crtc_set_vblank(crtc, true); } void amdgpu_dm_crtc_disable_vblank(struct drm_crtc *crtc) { - dm_set_vblank(crtc, false); + amdgpu_dm_crtc_set_vblank(crtc, false); } -static void dm_crtc_destroy_state(struct drm_crtc *crtc, +static void amdgpu_dm_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state) { struct dm_crtc_state *cur = to_dm_crtc_state(state); @@ -233,7 +226,7 @@ static void dm_crtc_destroy_state(struct drm_crtc *crtc, kfree(state); } -static struct drm_crtc_state *dm_crtc_duplicate_state(struct drm_crtc *crtc) +static struct drm_crtc_state *amdgpu_dm_crtc_duplicate_state(struct drm_crtc *crtc) { struct dm_crtc_state *state, *cur; @@ -273,12 +266,12 @@ static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc) kfree(crtc); } -static void dm_crtc_reset_state(struct drm_crtc *crtc) +static void amdgpu_dm_crtc_reset_state(struct drm_crtc *crtc) { struct dm_crtc_state *state; if (crtc->state) - dm_crtc_destroy_state(crtc, crtc->state); + amdgpu_dm_crtc_destroy_state(crtc, crtc->state); state = kzalloc(sizeof(*state), GFP_KERNEL); if (WARN_ON(!state)) @@ -298,12 +291,12 @@ static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc) /* Implemented only the options currently available for the driver */ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { - .reset = dm_crtc_reset_state, + .reset = amdgpu_dm_crtc_reset_state, .destroy = amdgpu_dm_crtc_destroy, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, - .atomic_duplicate_state = dm_crtc_duplicate_state, - .atomic_destroy_state = dm_crtc_destroy_state, + .atomic_duplicate_state = amdgpu_dm_crtc_duplicate_state, + .atomic_destroy_state = amdgpu_dm_crtc_destroy_state, .set_crc_source = amdgpu_dm_crtc_set_crc_source, .verify_crc_source = amdgpu_dm_crtc_verify_crc_source, .get_crc_sources = amdgpu_dm_crtc_get_crc_sources, @@ -316,11 +309,11 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { #endif }; -static void dm_crtc_helper_disable(struct drm_crtc *crtc) +static void amdgpu_dm_crtc_helper_disable(struct drm_crtc *crtc) { } -static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) +static int amdgpu_dm_crtc_count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) { struct drm_atomic_state *state = new_crtc_state->state; struct drm_plane *plane; @@ -352,8 +345,8 @@ static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) return num_active; } -static void dm_update_crtc_active_planes(struct drm_crtc *crtc, - struct drm_crtc_state *new_crtc_state) +static void amdgpu_dm_crtc_update_crtc_active_planes(struct drm_crtc *crtc, + struct drm_crtc_state *new_crtc_state) { struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); @@ -364,18 +357,18 @@ static void dm_update_crtc_active_planes(struct drm_crtc *crtc, return; dm_new_crtc_state->active_planes = - count_crtc_active_planes(new_crtc_state); + amdgpu_dm_crtc_count_crtc_active_planes(new_crtc_state); } -static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc, +static bool amdgpu_dm_crtc_helper_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; } -static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, - struct drm_atomic_state *state) +static int amdgpu_dm_crtc_helper_atomic_check(struct drm_crtc *crtc, + struct drm_atomic_state *state) { struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); @@ -386,7 +379,7 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, trace_amdgpu_dm_crtc_atomic_check(crtc_state); - dm_update_crtc_active_planes(crtc, crtc_state); + amdgpu_dm_crtc_update_crtc_active_planes(crtc, crtc_state); if (WARN_ON(unlikely(!dm_crtc_state->stream && amdgpu_dm_crtc_modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) { @@ -429,9 +422,9 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = { - .disable = dm_crtc_helper_disable, - .atomic_check = dm_crtc_helper_atomic_check, - .mode_fixup = dm_crtc_helper_mode_fixup, + .disable = amdgpu_dm_crtc_helper_disable, + .atomic_check = amdgpu_dm_crtc_helper_atomic_check, + .mode_fixup = amdgpu_dm_crtc_helper_mode_fixup, .get_scanout_position = amdgpu_crtc_get_scanout_position, }; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 7c21e21bcc..45c972f263 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -37,6 +37,7 @@ #include "link_hwss.h" #include "dc/dc_dmub_srv.h" #include "link/protocols/link_dp_capability.h" +#include "inc/hw/dchubbub.h" #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY #include "amdgpu_dm_psr.h" @@ -1201,6 +1202,35 @@ static int internal_display_show(struct seq_file *m, void *data) return 0; } +/* + * Returns the number of segments used if ODM Combine mode is enabled. + * Example usage: cat /sys/kernel/debug/dri/0/DP-1/odm_combine_segments + */ +static int odm_combine_segments_show(struct seq_file *m, void *unused) +{ + struct drm_connector *connector = m->private; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct dc_link *link = aconnector->dc_link; + struct pipe_ctx *pipe_ctx = NULL; + int i, segments = -EOPNOTSUPP; + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx->stream && + pipe_ctx->stream->link == link) + break; + } + + if (connector->status != connector_status_connected) + return -ENODEV; + + if (pipe_ctx != NULL && pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments) + pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments(pipe_ctx->stream_res.tg, &segments); + + seq_printf(m, "%d\n", segments); + return 0; +} + /* function description * * generic SDP message access for testing @@ -2713,6 +2743,7 @@ DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer); DEFINE_SHOW_ATTRIBUTE(dp_lttpr_status); DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability); DEFINE_SHOW_ATTRIBUTE(internal_display); +DEFINE_SHOW_ATTRIBUTE(odm_combine_segments); DEFINE_SHOW_ATTRIBUTE(psr_capability); DEFINE_SHOW_ATTRIBUTE(dp_is_mst_connector); DEFINE_SHOW_ATTRIBUTE(dp_mst_progress_status); @@ -2991,7 +3022,8 @@ static const struct { } connector_debugfs_entries[] = { {"force_yuv420_output", &force_yuv420_output_fops}, {"trigger_hotplug", &trigger_hotplug_debugfs_fops}, - {"internal_display", &internal_display_fops} + {"internal_display", &internal_display_fops}, + {"odm_combine_segments", &odm_combine_segments_fops} }; /* @@ -3605,6 +3637,36 @@ static int disable_hpd_get(void *data, u64 *val) DEFINE_DEBUGFS_ATTRIBUTE(disable_hpd_ops, disable_hpd_get, disable_hpd_set, "%llu\n"); +/* + * Prints hardware capabilities. These are used for IGT testing. + */ +static int capabilities_show(struct seq_file *m, void *unused) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct dc *dc = adev->dm.dc; + bool mall_supported = dc->caps.mall_size_total; + bool subvp_supported = dc->caps.subvp_fw_processing_delay_us; + unsigned int mall_in_use = false; + unsigned int subvp_in_use = false; + + struct hubbub *hubbub = dc->res_pool->hubbub; + + if (hubbub->funcs->get_mall_en) + hubbub->funcs->get_mall_en(hubbub, &mall_in_use); + + if (dc->cap_funcs.get_subvp_en) + subvp_in_use = dc->cap_funcs.get_subvp_en(dc, dc->current_state); + + seq_printf(m, "mall supported: %s, enabled: %s\n", + mall_supported ? "yes" : "no", mall_in_use ? "yes" : "no"); + seq_printf(m, "sub-viewport supported: %s, enabled: %s\n", + subvp_supported ? "yes" : "no", subvp_in_use ? "yes" : "no"); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(capabilities); + /* * Temporary w/a to force sst sequence in M42D DP2 mst receiver * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dp_set_mst_en_for_sst @@ -3798,6 +3860,8 @@ void dtn_debugfs_init(struct amdgpu_device *adev) debugfs_create_file("amdgpu_mst_topology", 0444, root, adev, &mst_topo_fops); + debugfs_create_file("amdgpu_dm_capabilities", 0444, root, + adev, &capabilities_fops); debugfs_create_file("amdgpu_dm_dtn_log", 0644, root, adev, &dtn_log_fops); debugfs_create_file("amdgpu_dm_dp_set_mst_en_for_sst", 0644, root, adev, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 20cfc5be21..b54d646a7c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -740,6 +740,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, if (dc->ctx->dce_version == DCN_VERSION_3_1 || dc->ctx->dce_version == DCN_VERSION_3_14 || dc->ctx->dce_version == DCN_VERSION_3_15 || + dc->ctx->dce_version == DCN_VERSION_3_5 || dc->ctx->dce_version == DCN_VERSION_3_16) hdcp_work[i].hdcp.config.psp.caps.dtm_v3_supported = 1; hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index cf0834ae53..4fad9c478c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -210,37 +210,35 @@ void dm_helpers_dp_update_branch_info( {} static void dm_helpers_construct_old_payload( - struct dc_link *link, - int pbn_per_slot, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state, struct drm_dp_mst_atomic_payload *new_payload, struct drm_dp_mst_atomic_payload *old_payload) { - struct link_mst_stream_allocation_table current_link_table = - link->mst_stream_alloc_table; - struct link_mst_stream_allocation *dc_alloc; - int i; + struct drm_dp_mst_atomic_payload *pos; + int pbn_per_slot = mst_state->pbn_div; + u8 next_payload_vc_start = mgr->next_start_slot; + u8 payload_vc_start = new_payload->vc_start_slot; + u8 allocated_time_slots; *old_payload = *new_payload; /* Set correct time_slots/PBN of old payload. * other fields (delete & dsc_enabled) in * struct drm_dp_mst_atomic_payload are don't care fields - * while calling drm_dp_remove_payload() + * while calling drm_dp_remove_payload_part2() */ - for (i = 0; i < current_link_table.stream_count; i++) { - dc_alloc = - ¤t_link_table.stream_allocations[i]; - - if (dc_alloc->vcp_id == new_payload->vcpi) { - old_payload->time_slots = dc_alloc->slot_count; - old_payload->pbn = dc_alloc->slot_count * pbn_per_slot; - break; - } + list_for_each_entry(pos, &mst_state->payloads, next) { + if (pos != new_payload && + pos->vc_start_slot > payload_vc_start && + pos->vc_start_slot < next_payload_vc_start) + next_payload_vc_start = pos->vc_start_slot; } - /* make sure there is an old payload*/ - ASSERT(i != current_link_table.stream_count); + allocated_time_slots = next_payload_vc_start - payload_vc_start; + old_payload->time_slots = allocated_time_slots; + old_payload->pbn = allocated_time_slots * pbn_per_slot; } /* @@ -269,21 +267,20 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( mst_mgr = &aconnector->mst_root->mst_mgr; mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); - - /* It's OK for this to fail */ new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); if (enable) { target_payload = new_payload; + /* It's OK for this to fail */ drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload); } else { /* construct old payload by VCPI*/ - dm_helpers_construct_old_payload(stream->link, mst_state->pbn_div, - new_payload, &old_payload); + dm_helpers_construct_old_payload(mst_mgr, mst_state, + new_payload, &old_payload); target_payload = &old_payload; - drm_dp_remove_payload(mst_mgr, mst_state, &old_payload, new_payload); + drm_dp_remove_payload_part1(mst_mgr, mst_state, new_payload); } /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or @@ -350,7 +347,7 @@ bool dm_helpers_dp_mst_send_payload_allocation( struct amdgpu_dm_connector *aconnector; struct drm_dp_mst_topology_state *mst_state; struct drm_dp_mst_topology_mgr *mst_mgr; - struct drm_dp_mst_atomic_payload *payload; + struct drm_dp_mst_atomic_payload *new_payload, old_payload; enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD; enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD; int ret = 0; @@ -363,15 +360,20 @@ bool dm_helpers_dp_mst_send_payload_allocation( mst_mgr = &aconnector->mst_root->mst_mgr; mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); - payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); + new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); if (!enable) { set_flag = MST_CLEAR_ALLOCATED_PAYLOAD; clr_flag = MST_ALLOCATE_NEW_PAYLOAD; } - if (enable) - ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, payload); + if (enable) { + ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, new_payload); + } else { + dm_helpers_construct_old_payload(mst_mgr, mst_state, + new_payload, &old_payload); + drm_dp_remove_payload_part2(mst_mgr, mst_state, &old_payload, new_payload); + } if (ret) { amdgpu_dm_set_mst_status(&aconnector->mst_status, @@ -540,10 +542,8 @@ bool dm_helpers_dp_read_dpcd( struct amdgpu_dm_connector *aconnector = link->priv; - if (!aconnector) { - DC_LOG_DC("Failed to find connector for link!\n"); + if (!aconnector) return false; - } return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, data, size) == size; @@ -663,7 +663,7 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length); } - DC_LOG_DC("%s: success = %d\n", __func__, success); + drm_dbg_dp(aux->drm_dev, "success = %d\n", success); return success; } @@ -672,7 +672,7 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) { unsigned char data[16] = {0}; - DC_LOG_DC("Start %s\n", __func__); + drm_dbg_dp(aux->drm_dev, "Start\n"); // Step 2 data[0] = 'P'; @@ -730,7 +730,7 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL)) return; - DC_LOG_DC("Done %s\n", __func__); + drm_dbg_dp(aux->drm_dev, "Done\n"); } /* MST Dock */ @@ -743,7 +743,8 @@ static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst( { uint8_t ret = 0; - DC_LOG_DC("Configure DSC to non-virtual dpcd synaptics\n"); + drm_dbg_dp(aux->drm_dev, + "Configure DSC to non-virtual dpcd synaptics\n"); if (enable) { /* When DSC is enabled on previous boot and reboot with the hub, @@ -781,7 +782,9 @@ bool dm_helpers_dp_write_dsc_enable( static const uint8_t DSC_DECODING = 0x01; static const uint8_t DSC_PASSTHROUGH = 0x02; - struct amdgpu_dm_connector *aconnector; + struct amdgpu_dm_connector *aconnector = + (struct amdgpu_dm_connector *)stream->dm_stream_context; + struct drm_device *dev = aconnector->base.dev; struct drm_dp_mst_port *port; uint8_t enable_dsc = enable ? DSC_DECODING : DSC_DISABLE; uint8_t enable_passthrough = enable ? DSC_PASSTHROUGH : DSC_DISABLE; @@ -791,8 +794,6 @@ bool dm_helpers_dp_write_dsc_enable( return false; if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; - if (!aconnector->dsc_aux) return false; @@ -809,30 +810,34 @@ bool dm_helpers_dp_write_dsc_enable( ret = drm_dp_dpcd_write(port->passthrough_aux, DP_DSC_ENABLE, &enable_passthrough, 1); - DC_LOG_DC("Sent DSC pass-through enable to virtual dpcd port, ret = %u\n", - ret); + drm_dbg_dp(dev, + "Sent DSC pass-through enable to virtual dpcd port, ret = %u\n", + ret); } ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1); - DC_LOG_DC("Sent DSC decoding enable to %s port, ret = %u\n", - (port->passthrough_aux) ? "remote RX" : - "virtual dpcd", - ret); + drm_dbg_dp(dev, + "Sent DSC decoding enable to %s port, ret = %u\n", + (port->passthrough_aux) ? "remote RX" : + "virtual dpcd", + ret); } else { ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1); - DC_LOG_DC("Sent DSC decoding disable to %s port, ret = %u\n", - (port->passthrough_aux) ? "remote RX" : - "virtual dpcd", - ret); + drm_dbg_dp(dev, + "Sent DSC decoding disable to %s port, ret = %u\n", + (port->passthrough_aux) ? "remote RX" : + "virtual dpcd", + ret); if (port->passthrough_aux) { ret = drm_dp_dpcd_write(port->passthrough_aux, DP_DSC_ENABLE, &enable_passthrough, 1); - DC_LOG_DC("Sent DSC pass-through disable to virtual dpcd port, ret = %u\n", - ret); + drm_dbg_dp(dev, + "Sent DSC pass-through disable to virtual dpcd port, ret = %u\n", + ret); } } } @@ -840,10 +845,14 @@ bool dm_helpers_dp_write_dsc_enable( if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) { if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); - DC_LOG_DC("Send DSC %s to SST RX\n", enable_dsc ? "enable" : "disable"); + drm_dbg_dp(dev, + "Send DSC %s to SST RX\n", + enable_dsc ? "enable" : "disable"); } else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); - DC_LOG_DC("Send DSC %s to DP-HDMI PCON\n", enable_dsc ? "enable" : "disable"); + drm_dbg_dp(dev, + "Send DSC %s to DP-HDMI PCON\n", + enable_dsc ? "enable" : "disable"); } } @@ -1117,6 +1126,7 @@ bool dm_helpers_dp_handle_test_pattern_request( struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; struct pipe_ctx *pipe_ctx = NULL; struct amdgpu_dm_connector *aconnector = link->priv; + struct drm_device *dev = aconnector->base.dev; int i; for (i = 0; i < MAX_PIPES; i++) { @@ -1194,12 +1204,12 @@ bool dm_helpers_dp_handle_test_pattern_request( && pipe_ctx->stream->timing.display_color_depth != requestColorDepth) || (requestPixelEncoding != PIXEL_ENCODING_UNDEFINED && pipe_ctx->stream->timing.pixel_encoding != requestPixelEncoding)) { - DC_LOG_DEBUG("%s: original bpc %d pix encoding %d, changing to %d %d\n", - __func__, - pipe_ctx->stream->timing.display_color_depth, - pipe_ctx->stream->timing.pixel_encoding, - requestColorDepth, - requestPixelEncoding); + drm_dbg(dev, + "original bpc %d pix encoding %d, changing to %d %d\n", + pipe_ctx->stream->timing.display_color_depth, + pipe_ctx->stream->timing.pixel_encoding, + requestColorDepth, + requestPixelEncoding); pipe_ctx->stream->timing.display_color_depth = requestColorDepth; pipe_ctx->stream->timing.pixel_encoding = requestPixelEncoding; @@ -1210,10 +1220,13 @@ bool dm_helpers_dp_handle_test_pattern_request( if (aconnector->timing_requested) *aconnector->timing_requested = pipe_ctx->stream->timing; else - DC_LOG_ERROR("%s: timing storage failed\n", __func__); + drm_err(dev, "timing storage failed\n"); } + pipe_ctx->stream->test_pattern.type = test_pattern; + pipe_ctx->stream->test_pattern.color_space = test_pattern_color_space; + dc_link_dp_set_test_pattern( (struct dc_link *) link, test_pattern, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index 51467f132c..d595030c33 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -711,7 +711,7 @@ static inline int dm_irq_state(struct amdgpu_device *adev, { bool st; enum dc_irq_source irq_source; - + struct dc *dc = adev->dm.dc; struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id]; if (!acrtc) { @@ -729,6 +729,9 @@ static inline int dm_irq_state(struct amdgpu_device *adev, st = (state == AMDGPU_IRQ_STATE_ENABLE); + if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed) + dc_allow_idle_optimizations(dc, false); + dc_interrupt_set(adev->dm.dc, irq_source, st); return 0; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 10dd4cd6f5..602a2ab98a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -173,8 +173,9 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) if (dc_link->sink_count) dc_link_remove_remote_sink(dc_link, dc_sink); - DC_LOG_MST("DM_MST: remove remote sink 0x%p, %d remaining\n", - dc_sink, dc_link->sink_count); + drm_dbg_dp(connector->dev, + "DM_MST: remove remote sink 0x%p, %d remaining\n", + dc_sink, dc_link->sink_count); dc_sink_release(dc_sink); aconnector->dc_sink = NULL; @@ -325,8 +326,10 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) return 0; } - DC_LOG_MST("DM_MST: add remote sink 0x%p, %d remaining\n", - dc_sink, aconnector->dc_link->sink_count); + drm_dbg_dp(connector->dev, + "DM_MST: add remote sink 0x%p, %d remaining\n", + dc_sink, + aconnector->dc_link->sink_count); dc_sink->priv = aconnector; aconnector->dc_sink = dc_sink; @@ -361,8 +364,9 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) return 0; } - DC_LOG_MST("DM_MST: add remote sink 0x%p, %d remaining\n", - dc_sink, aconnector->dc_link->sink_count); + drm_dbg_dp(connector->dev, + "DM_MST: add remote sink 0x%p, %d remaining\n", + dc_sink, aconnector->dc_link->sink_count); dc_sink->priv = aconnector; /* dc_link_add_remote_sink returns a new reference */ @@ -481,8 +485,10 @@ dm_dp_mst_detect(struct drm_connector *connector, if (aconnector->dc_link->sink_count) dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink); - DC_LOG_MST("DM_MST: remove remote sink 0x%p, %d remaining\n", - aconnector->dc_link, aconnector->dc_link->sink_count); + drm_dbg_dp(connector->dev, + "DM_MST: remove remote sink 0x%p, %d remaining\n", + aconnector->dc_link, + aconnector->dc_link->sink_count); dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; @@ -1494,14 +1500,16 @@ int pre_validate_dsc(struct drm_atomic_state *state, int ind = find_crtc_index_in_state_by_stream(state, stream); if (ind >= 0) { + struct drm_connector *connector; struct amdgpu_dm_connector *aconnector; struct drm_connector_state *drm_new_conn_state; struct dm_connector_state *dm_new_conn_state; struct dm_crtc_state *dm_old_crtc_state; - aconnector = + connector = amdgpu_dm_find_first_crtc_matching_connector(state, state->crtcs[ind].ptr); + aconnector = to_amdgpu_dm_connector(connector); drm_new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index cc74dd69ac..116121e647 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -139,7 +139,7 @@ void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state } } -static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod) +static void amdgpu_dm_plane_add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod) { if (!*mods) return; @@ -164,12 +164,12 @@ static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_ *size += 1; } -static bool modifier_has_dcc(uint64_t modifier) +static bool amdgpu_dm_plane_modifier_has_dcc(uint64_t modifier) { return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier); } -static unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier) +static unsigned int amdgpu_dm_plane_modifier_gfx9_swizzle_mode(uint64_t modifier) { if (modifier == DRM_FORMAT_MOD_LINEAR) return 0; @@ -177,8 +177,8 @@ static unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier) return AMD_FMT_MOD_GET(TILE, modifier); } -static void fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info, - uint64_t tiling_flags) +static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info, + uint64_t tiling_flags) { /* Fill GFX8 params */ if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) { @@ -209,8 +209,8 @@ static void fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info, AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); } -static void fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, - union dc_tiling_info *tiling_info) +static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, + union dc_tiling_info *tiling_info) { /* Fill GFX9 params */ tiling_info->gfx9.num_pipes = @@ -226,13 +226,13 @@ static void fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, tiling_info->gfx9.num_rb_per_se = adev->gfx.config.gb_addr_config_fields.num_rb_per_se; tiling_info->gfx9.shaderEnable = 1; - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs; } -static void fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev, - union dc_tiling_info *tiling_info, - uint64_t modifier) +static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev, + union dc_tiling_info *tiling_info, + uint64_t modifier) { unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier); unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier); @@ -241,7 +241,7 @@ static void fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev pipes_log2 = min(5u, mod_pipe_xor_bits); - fill_gfx9_tiling_info_from_device(adev, tiling_info); + amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info); if (!IS_AMD_FMT_MOD(modifier)) return; @@ -258,13 +258,13 @@ static void fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev } } -static int validate_dcc(struct amdgpu_device *adev, - const enum surface_pixel_format format, - const enum dc_rotation_angle rotation, - const union dc_tiling_info *tiling_info, - const struct dc_plane_dcc_param *dcc, - const struct dc_plane_address *address, - const struct plane_size *plane_size) +static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev, + const enum surface_pixel_format format, + const enum dc_rotation_angle rotation, + const union dc_tiling_info *tiling_info, + const struct dc_plane_dcc_param *dcc, + const struct dc_plane_address *address, + const struct plane_size *plane_size) { struct dc *dc = adev->dm.dc; struct dc_dcc_surface_param input; @@ -303,23 +303,23 @@ static int validate_dcc(struct amdgpu_device *adev, return 0; } -static int fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, - const struct amdgpu_framebuffer *afb, - const enum surface_pixel_format format, - const enum dc_rotation_angle rotation, - const struct plane_size *plane_size, - union dc_tiling_info *tiling_info, - struct dc_plane_dcc_param *dcc, - struct dc_plane_address *address, - const bool force_disable_dcc) +static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, + const struct amdgpu_framebuffer *afb, + const enum surface_pixel_format format, + const enum dc_rotation_angle rotation, + const struct plane_size *plane_size, + union dc_tiling_info *tiling_info, + struct dc_plane_dcc_param *dcc, + struct dc_plane_address *address, + const bool force_disable_dcc) { const uint64_t modifier = afb->base.modifier; int ret = 0; - fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier); - tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier); + amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier); + tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier); - if (modifier_has_dcc(modifier) && !force_disable_dcc) { + if (amdgpu_dm_plane_modifier_has_dcc(modifier) && !force_disable_dcc) { uint64_t dcc_address = afb->address + afb->base.offsets[1]; bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier); bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier); @@ -347,60 +347,64 @@ static int fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, address->grph.meta_addr.high_part = upper_32_bits(dcc_address); } - ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size); + ret = amdgpu_dm_plane_validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size); if (ret) - drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret); + drm_dbg_kms(adev_to_drm(adev), "amdgpu_dm_plane_validate_dcc: returned error: %d\n", ret); return ret; } -static void add_gfx10_1_modifiers(const struct amdgpu_device *adev, - uint64_t **mods, uint64_t *size, uint64_t *capacity) +static void amdgpu_dm_plane_add_gfx10_1_modifiers(const struct amdgpu_device *adev, + uint64_t **mods, + uint64_t *size, + uint64_t *capacity) { int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); - - - /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); + + + /* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */ + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); } -static void add_gfx9_modifiers(const struct amdgpu_device *adev, - uint64_t **mods, uint64_t *size, uint64_t *capacity) +static void amdgpu_dm_plane_add_gfx9_modifiers(const struct amdgpu_device *adev, + uint64_t **mods, + uint64_t *size, + uint64_t *capacity) { int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); int pipe_xor_bits = min(8, pipes + @@ -421,163 +425,164 @@ static void add_gfx9_modifiers(const struct amdgpu_device *adev, */ if (has_constant_encode) { - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1)); } - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0)); if (has_constant_encode) { - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | - - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(RB, rb) | - AMD_FMT_MOD_SET(PIPE, pipes)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(RB, rb) | + AMD_FMT_MOD_SET(PIPE, pipes)); } - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) | - AMD_FMT_MOD_SET(RB, rb) | - AMD_FMT_MOD_SET(PIPE, pipes)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) | + AMD_FMT_MOD_SET(RB, rb) | + AMD_FMT_MOD_SET(PIPE, pipes)); } /* * Only supported for 64bpp on Raven, will be filtered on format in - * dm_plane_format_mod_supported. + * amdgpu_dm_plane_format_mod_supported. */ - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); if (adev->family == AMDGPU_FAMILY_RV) { - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); } /* * Only supported for 64bpp on Raven, will be filtered on format in - * dm_plane_format_mod_supported. + * amdgpu_dm_plane_format_mod_supported. */ - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); if (adev->family == AMDGPU_FAMILY_RV) { - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); } } -static void add_gfx10_3_modifiers(const struct amdgpu_device *adev, - uint64_t **mods, uint64_t *size, uint64_t *capacity) +static void amdgpu_dm_plane_add_gfx10_3_modifiers(const struct amdgpu_device *adev, + uint64_t **mods, + uint64_t *size, + uint64_t *capacity) { int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs); - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs)); - - /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs)); + + /* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */ + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); } -static void add_gfx11_modifiers(struct amdgpu_device *adev, +static void amdgpu_dm_plane_add_gfx11_modifiers(struct amdgpu_device *adev, uint64_t **mods, uint64_t *size, uint64_t *capacity) { int num_pipes = 0; @@ -628,21 +633,21 @@ static void add_gfx11_modifiers(struct amdgpu_device *adev, AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B); - add_modifier(mods, size, capacity, modifier_dcc_best); - add_modifier(mods, size, capacity, modifier_dcc_4k); + amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best); + amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k); - add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1)); - add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1)); - add_modifier(mods, size, capacity, modifier_r_x); + amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_r_x); } - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D)); } -static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) +static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) { uint64_t size = 0, capacity = 128; *mods = NULL; @@ -654,36 +659,37 @@ static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_ty *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL); if (plane_type == DRM_PLANE_TYPE_CURSOR) { - add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); - add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); + amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); + amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); return *mods ? 0 : -ENOMEM; } switch (adev->family) { case AMDGPU_FAMILY_AI: case AMDGPU_FAMILY_RV: - add_gfx9_modifiers(adev, mods, &size, &capacity); + amdgpu_dm_plane_add_gfx9_modifiers(adev, mods, &size, &capacity); break; case AMDGPU_FAMILY_NV: case AMDGPU_FAMILY_VGH: case AMDGPU_FAMILY_YC: case AMDGPU_FAMILY_GC_10_3_6: case AMDGPU_FAMILY_GC_10_3_7: - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) - add_gfx10_3_modifiers(adev, mods, &size, &capacity); + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) + amdgpu_dm_plane_add_gfx10_3_modifiers(adev, mods, &size, &capacity); else - add_gfx10_1_modifiers(adev, mods, &size, &capacity); + amdgpu_dm_plane_add_gfx10_1_modifiers(adev, mods, &size, &capacity); break; case AMDGPU_FAMILY_GC_11_0_0: case AMDGPU_FAMILY_GC_11_0_1: - add_gfx11_modifiers(adev, mods, &size, &capacity); + case AMDGPU_FAMILY_GC_11_5_0: + amdgpu_dm_plane_add_gfx11_modifiers(adev, mods, &size, &capacity); break; } - add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); + amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); /* INVALID marks the end of the list. */ - add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); + amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); if (!*mods) return -ENOMEM; @@ -691,9 +697,9 @@ static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_ty return 0; } -static int get_plane_formats(const struct drm_plane *plane, - const struct dc_plane_cap *plane_cap, - uint32_t *formats, int max_formats) +static int amdgpu_dm_plane_get_plane_formats(const struct drm_plane *plane, + const struct dc_plane_cap *plane_cap, + uint32_t *formats, int max_formats) { int i, num_formats = 0; @@ -817,22 +823,22 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev, } if (adev->family >= AMDGPU_FAMILY_AI) { - ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format, - rotation, plane_size, - tiling_info, dcc, - address, - force_disable_dcc); + ret = amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(adev, afb, format, + rotation, plane_size, + tiling_info, dcc, + address, + force_disable_dcc); if (ret) return ret; } else { - fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags); + amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags); } return 0; } -static int dm_plane_helper_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *new_state) +static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) { struct amdgpu_framebuffer *afb; struct drm_gem_object *obj; @@ -927,8 +933,8 @@ error_unlock: return r; } -static void dm_plane_helper_cleanup_fb(struct drm_plane *plane, - struct drm_plane_state *old_state) +static void amdgpu_dm_plane_helper_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) { struct amdgpu_bo *rbo; int r; @@ -948,7 +954,7 @@ static void dm_plane_helper_cleanup_fb(struct drm_plane *plane, amdgpu_bo_unref(&rbo); } -static void get_min_max_dc_plane_scaling(struct drm_device *dev, +static void amdgpu_dm_plane_get_min_max_dc_plane_scaling(struct drm_device *dev, struct drm_framebuffer *fb, int *min_downscale, int *max_upscale) { @@ -1029,8 +1035,8 @@ int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state, } /* Get min/max allowed scaling factors from plane caps. */ - get_min_max_dc_plane_scaling(state->crtc->dev, fb, - &min_downscale, &max_upscale); + amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->crtc->dev, fb, + &min_downscale, &max_upscale); /* * Convert to drm convention: 16.16 fixed point, instead of dc's * 1.0 == 1000. Also drm scaling is src/dst instead of dc's @@ -1068,8 +1074,8 @@ int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev, * is to gesture the YouTube Android app into full screen * on ChromeOS. */ - if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || - (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) && + if (((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) || + (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) && (state->fb && state->fb->format->format == DRM_FORMAT_NV12 && (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0))) return -EINVAL; @@ -1100,8 +1106,8 @@ int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev, /* Validate scaling per-format with DC plane caps */ if (state->plane && state->plane->dev && state->fb) { - get_min_max_dc_plane_scaling(state->plane->dev, state->fb, - &min_downscale, &max_upscale); + amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->plane->dev, state->fb, + &min_downscale, &max_upscale); } else { min_downscale = 250; max_upscale = 16000; @@ -1127,8 +1133,8 @@ int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev, return 0; } -static int dm_plane_atomic_check(struct drm_plane *plane, - struct drm_atomic_state *state) +static int amdgpu_dm_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) { struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); @@ -1166,8 +1172,8 @@ static int dm_plane_atomic_check(struct drm_plane *plane, return -EINVAL; } -static int dm_plane_atomic_async_check(struct drm_plane *plane, - struct drm_atomic_state *state) +static int amdgpu_dm_plane_atomic_async_check(struct drm_plane *plane, + struct drm_atomic_state *state) { /* Only support async updates on cursor planes. */ if (plane->type != DRM_PLANE_TYPE_CURSOR) @@ -1176,8 +1182,8 @@ static int dm_plane_atomic_async_check(struct drm_plane *plane, return 0; } -static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, - struct dc_cursor_position *position) +static int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, + struct dc_cursor_position *position) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); int x, y; @@ -1236,13 +1242,11 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane, if (!plane->state->fb && !old_plane_state->fb) return; - DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n", - __func__, - amdgpu_crtc->crtc_id, - plane->state->crtc_w, - plane->state->crtc_h); + drm_dbg_atomic(plane->dev, "crtc_id=%d with size %d to %d\n", + amdgpu_crtc->crtc_id, plane->state->crtc_w, + plane->state->crtc_h); - ret = get_cursor_position(plane, crtc, &position); + ret = amdgpu_dm_plane_get_cursor_position(plane, crtc, &position); if (ret) return; @@ -1291,8 +1295,8 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane, } } -static void dm_plane_atomic_async_update(struct drm_plane *plane, - struct drm_atomic_state *state) +static void amdgpu_dm_plane_atomic_async_update(struct drm_plane *plane, + struct drm_atomic_state *state) { struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); @@ -1316,14 +1320,14 @@ static void dm_plane_atomic_async_update(struct drm_plane *plane, } static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { - .prepare_fb = dm_plane_helper_prepare_fb, - .cleanup_fb = dm_plane_helper_cleanup_fb, - .atomic_check = dm_plane_atomic_check, - .atomic_async_check = dm_plane_atomic_async_check, - .atomic_async_update = dm_plane_atomic_async_update + .prepare_fb = amdgpu_dm_plane_helper_prepare_fb, + .cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb, + .atomic_check = amdgpu_dm_plane_atomic_check, + .atomic_async_check = amdgpu_dm_plane_atomic_async_check, + .atomic_async_update = amdgpu_dm_plane_atomic_async_update }; -static void dm_drm_plane_reset(struct drm_plane *plane) +static void amdgpu_dm_plane_drm_plane_reset(struct drm_plane *plane) { struct dm_plane_state *amdgpu_state = NULL; @@ -1337,8 +1341,7 @@ static void dm_drm_plane_reset(struct drm_plane *plane) __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); } -static struct drm_plane_state * -dm_drm_plane_duplicate_state(struct drm_plane *plane) +static struct drm_plane_state *amdgpu_dm_plane_drm_plane_duplicate_state(struct drm_plane *plane) { struct dm_plane_state *dm_plane_state, *old_dm_plane_state; @@ -1357,15 +1360,15 @@ dm_drm_plane_duplicate_state(struct drm_plane *plane) return &dm_plane_state->base; } -static bool dm_plane_format_mod_supported(struct drm_plane *plane, - uint32_t format, - uint64_t modifier) +static bool amdgpu_dm_plane_format_mod_supported(struct drm_plane *plane, + uint32_t format, + uint64_t modifier) { struct amdgpu_device *adev = drm_to_adev(plane->dev); const struct drm_format_info *info = drm_format_info(format); int i; - enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3; + enum dm_micro_swizzle microtile = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier) & 3; if (!info) return false; @@ -1402,7 +1405,7 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane, info->cpp[0] < 8) return false; - if (modifier_has_dcc(modifier)) { + if (amdgpu_dm_plane_modifier_has_dcc(modifier)) { /* Per radeonsi comments 16/64 bpp are more complicated. */ if (info->cpp[0] != 4) return false; @@ -1416,8 +1419,8 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane, return true; } -static void dm_drm_plane_destroy_state(struct drm_plane *plane, - struct drm_plane_state *state) +static void amdgpu_dm_plane_drm_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) { struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); @@ -1431,10 +1434,10 @@ static const struct drm_plane_funcs dm_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = drm_plane_helper_destroy, - .reset = dm_drm_plane_reset, - .atomic_duplicate_state = dm_drm_plane_duplicate_state, - .atomic_destroy_state = dm_drm_plane_destroy_state, - .format_mod_supported = dm_plane_format_mod_supported, + .reset = amdgpu_dm_plane_drm_plane_reset, + .atomic_duplicate_state = amdgpu_dm_plane_drm_plane_duplicate_state, + .atomic_destroy_state = amdgpu_dm_plane_drm_plane_destroy_state, + .format_mod_supported = amdgpu_dm_plane_format_mod_supported, }; int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, @@ -1448,10 +1451,10 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, unsigned int supported_rotations; uint64_t *modifiers = NULL; - num_formats = get_plane_formats(plane, plane_cap, formats, - ARRAY_SIZE(formats)); + num_formats = amdgpu_dm_plane_get_plane_formats(plane, plane_cap, formats, + ARRAY_SIZE(formats)); - res = get_plane_modifiers(dm->adev, plane->type, &modifiers); + res = amdgpu_dm_plane_get_plane_modifiers(dm->adev, plane->type, &modifiers); if (res) return res; @@ -1508,7 +1511,7 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, supported_rotations); - if (dm->adev->ip_versions[DCE_HWIP][0] > IP_VERSION(3, 0, 1) && + if (amdgpu_ip_version(dm->adev, DCE_HWIP, 0) > IP_VERSION(3, 0, 1) && plane->type != DRM_PLANE_TYPE_CURSOR) drm_plane_enable_fb_damage_clips(plane); @@ -1521,7 +1524,7 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, return 0; } -bool is_video_format(uint32_t format) +bool amdgpu_dm_plane_is_video_format(uint32_t format) { int i; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h index 930f1572f8..b51a6b57bd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h @@ -62,5 +62,5 @@ void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state bool *per_pixel_alpha, bool *pre_multiplied_alpha, bool *global_alpha, int *global_alpha_value); -bool is_video_format(uint32_t format); +bool amdgpu_dm_plane_is_video_format(uint32_t format); #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c index 32d3086c4c..5ce542b1f8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c @@ -91,7 +91,7 @@ bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *ac pr_config.replay_supported = true; pr_config.replay_power_opt_supported = 0; pr_config.replay_enable_option |= pr_enable_option_static_screen; - pr_config.replay_timing_sync_supported = aconnector->max_vfreq >= 2 * aconnector->min_vfreq ? true : false; + pr_config.replay_timing_sync_supported = aconnector->max_vfreq >= 2 * aconnector->min_vfreq; if (!pr_config.replay_timing_sync_supported) pr_config.replay_enable_option &= ~pr_enable_option_general_ui; @@ -99,7 +99,7 @@ bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *ac debug_flags = (union replay_debug_flags *)&pr_config.debug_flags; debug_flags->u32All = 0; debug_flags->bitfields.visual_confirm = - link->ctx->dc->debug.visual_confirm == VISUAL_CONFIRM_REPLAY ? true : false; + link->ctx->dc->debug.visual_confirm == VISUAL_CONFIRM_REPLAY; link->replay_settings.replay_feature_enabled = true; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h index 0f580ea375..133af994a0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h @@ -37,7 +37,7 @@ #include #include #include -#include "dcn10/dcn10_optc.h" +#include "dc/inc/hw/optc.h" #include "dc/inc/core_types.h" diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c index 172aa10a88..4ae4720535 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c @@ -60,11 +60,9 @@ static DEFINE_PER_CPU(int, fpu_recursion_depth); */ inline void dc_assert_fp_enabled(void) { - int *pcpu, depth = 0; + int depth; - pcpu = get_cpu_ptr(&fpu_recursion_depth); - depth = *pcpu; - put_cpu_ptr(&fpu_recursion_depth); + depth = __this_cpu_read(fpu_recursion_depth); ASSERT(depth >= 1); } @@ -84,33 +82,28 @@ inline void dc_assert_fp_enabled(void) */ void dc_fpu_begin(const char *function_name, const int line) { - int *pcpu; + int depth; - pcpu = get_cpu_ptr(&fpu_recursion_depth); - *pcpu += 1; + WARN_ON_ONCE(!in_task()); + preempt_disable(); + depth = __this_cpu_inc_return(fpu_recursion_depth); - if (*pcpu == 1) { + if (depth == 1) { #if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) - migrate_disable(); kernel_fpu_begin(); #elif defined(CONFIG_PPC64) - if (cpu_has_feature(CPU_FTR_VSX_COMP)) { - preempt_disable(); + if (cpu_has_feature(CPU_FTR_VSX_COMP)) enable_kernel_vsx(); - } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { - preempt_disable(); + else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) enable_kernel_altivec(); - } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { - preempt_disable(); + else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) enable_kernel_fp(); - } #elif defined(CONFIG_ARM64) kernel_neon_begin(); #endif } - TRACE_DCN_FPU(true, function_name, line, *pcpu); - put_cpu_ptr(&fpu_recursion_depth); + TRACE_DCN_FPU(true, function_name, line, depth); } /** @@ -125,30 +118,26 @@ void dc_fpu_begin(const char *function_name, const int line) */ void dc_fpu_end(const char *function_name, const int line) { - int *pcpu; + int depth; - pcpu = get_cpu_ptr(&fpu_recursion_depth); - *pcpu -= 1; - if (*pcpu <= 0) { + depth = __this_cpu_dec_return(fpu_recursion_depth); + if (depth == 0) { #if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) kernel_fpu_end(); - migrate_enable(); #elif defined(CONFIG_PPC64) - if (cpu_has_feature(CPU_FTR_VSX_COMP)) { + if (cpu_has_feature(CPU_FTR_VSX_COMP)) disable_kernel_vsx(); - preempt_enable(); - } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { + else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) disable_kernel_altivec(); - preempt_enable(); - } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { + else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) disable_kernel_fp(); - preempt_enable(); - } #elif defined(CONFIG_ARM64) kernel_neon_end(); #endif + } else { + WARN_ON_ONCE(depth < 0); } - TRACE_DCN_FPU(false, function_name, line, *pcpu); - put_cpu_ptr(&fpu_recursion_depth); + TRACE_DCN_FPU(false, function_name, line, depth); + preempt_enable(); } -- cgit v1.2.3